repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
nbortych/AEVNMT.pt
[ "360a885a3bab9212a67922f3a9bb44f26c9512fc", "360a885a3bab9212a67922f3a9bb44f26c9512fc" ]
[ "aevnmt/generate.py", "aevnmt/dist/priors.py" ]
[ "import torch\nimport numpy as np\nimport sys\nimport time\n\nfrom aevnmt.hparams import Hyperparameters\nfrom aevnmt.data import TextDataset, RawInputTextDataset, remove_subword_tokens, postprocess\nfrom aevnmt.train_monolingual import create_model\nfrom aevnmt.train_utils import load_vocabularies, load_vocabularies_monolingual , compute_bleu\nfrom aevnmt.data.datasets import InputTextDataset\nfrom aevnmt.data.textprocessing import SentenceSplitter\nfrom aevnmt.data.textprocessing import Pipeline\nfrom aevnmt.data.textprocessing import Tokenizer, Detokenizer\nfrom aevnmt.data.textprocessing import Lowercaser, Truecaser, Recaser\nfrom aevnmt.data.textprocessing import WordSegmenter, WordDesegmenter\n\nfrom torch.utils.data import DataLoader\nfrom pathlib import Path\n\n\nclass GenerationEngine:\n\n def __init__(self, hparams):\n\n output_dir = Path(hparams.output_dir)\n verbose = hparams.verbose\n\n if hparams.vocab.prefix is None:\n hparams.vocab.prefix = str(output_dir / \"vocab\")\n hparams.vocab.shared = False\n\n # Select the correct device (GPU or CPU).\n device = torch.device(\"cuda:0\") if hparams.use_gpu else torch.device(\"cpu\")\n\n # Pre/post-processing\n if hparams.tokenize:\n src_tokenizer_lang = hparams.src\n else:\n src_tokenizer_lang = None\n\n if hparams.lowercase and hparams.truecaser_prefix:\n raise ValueError(\"You cannot use lowercasing and truecasing at the same time\")\n\n model_checkpoint = output_dir / f\"model/{hparams.criterion}/{hparams.src}.pt\"\n\n self.hparams = hparams\n self.verbose = verbose\n self.device = device\n self.model_checkpoint = model_checkpoint\n self.src_tokenizer_lang = src_tokenizer_lang\n self.pipeline = Pipeline()\n self.vocab_src = None\n self.model = None\n self.translate_fn = None\n self.n_translated = 0\n\n @staticmethod\n def make_pipeline(hparams):\n # Loading pre/post-processing models\n if hparams.verbose:\n print(\"Loading pre/post-processing models\", file=sys.stderr)\n\n postprocess = []\n\n # Tokenization\n if hparams.detokenize:\n postprocess.append(Detokenizer(hparams.tgt))\n\n # Case\n if hparams.lowercase and hparams.truecaser_prefix:\n raise ValueError(\"You cannot set --lowercase to true and provide a --truecaser_prefix at the same time\")\n\n if hparams.recase:\n postprocess.append(Recaser(hparams.tgt))\n\n # Word segmentation\n if hparams.bpe.merge:\n postprocess.append(WordDesegmenter(separator=hparams.subword_token))\n\n return Pipeline(pre=[], post=list(reversed(postprocess)))\n\n def load_statics(self):\n # Loading vocabulary\n if self.verbose:\n t0 = time.time()\n print(f\"Loading vocabularies src={self.hparams.src} tgt={self.hparams.tgt}\", file=sys.stderr)\n self.vocab_src = load_vocabularies_monolingual(self.hparams)\n\n # Load pre/post processing models and configure a pipeline\n self.pipeline = GenerationEngine.make_pipeline(self.hparams)\n\n if self.verbose:\n print(f\"Restoring model selected wrt {self.hparams.criterion} from {self.model_checkpoint}\", file=sys.stderr)\n\n model, _, _, translate_fn = create_model(self.hparams, self.vocab_src)\n\n if self.hparams.use_gpu:\n model.load_state_dict(torch.load(self.model_checkpoint))\n else:\n model.load_state_dict(torch.load(self.model_checkpoint, map_location='cpu'))\n\n self.model = model.to(self.device)\n self.translate_fn = translate_fn\n self.model.eval()\n if self.verbose:\n print(\"Done loading in %.2f seconds\" % (time.time() - t0), file=sys.stderr)\n\n def generate(self,lines , num_samples: int, stdout=sys.stdout):\n hparams = self.hparams\n batch_size=hparams.batch_size\n\n # Translate the data.\n num_translated = 0\n all_hypotheses = []\n\n if lines is not None:\n #Use inference network to obtain latent codes from input sentences\n if hparams.split_sentences: # This is a type of pre-processing we do not a post-processing counterpart for\n if hparams.verbose:\n print(f\"Running sentence splitter for {len(lines)} lines\")\n lines = SentenceSplitter(hparams.src).split(lines)\n if hparams.verbose:\n print(f\"Produced {len(lines)} sentences\")\n input_data = InputTextDataset(\n generator=(self.pipeline.pre(line) for line in lines),\n max_length=hparams.max_sentence_length,\n split=True)\n input_dl = DataLoader(\n input_data, batch_size=hparams.batch_size,\n shuffle=False, num_workers=4)\n input_size = len(input_data)\n\n for input_sentences in input_dl:\n\n # Sort the input sentences from long to short.\n input_sentences = np.array(input_sentences)\n seq_len = np.array([len(s.split()) for s in input_sentences])\n sort_keys = np.argsort(-seq_len)\n input_sentences = input_sentences[sort_keys]\n\n t1 = time.time()\n # Translate the sentences using the trained model.\n hypotheses = self.translate_fn(\n self.model, input_sentences,None,\n self.vocab_src,\n self.device, hparams)\n\n num_translated += len(input_sentences)\n\n # Restore the original ordering.\n inverse_sort_keys = np.argsort(sort_keys)\n all_hypotheses += hypotheses[inverse_sort_keys].tolist()\n\n if self.verbose:\n print(f\"{num_translated}/{input_size} sentences translated in {time.time() - t1:.2f} seconds.\", file=sys.stderr)\n\n else:\n input_size=num_samples\n #Sample from the prior to obtain latent codes\n if self.verbose:\n print(f\"Sampling {num_samples} sentences...\", file=sys.stderr)\n\n num_batches=num_samples//batch_size\n if num_samples % batch_size > 0:\n num_batches+=1\n\n for batch_idx in range(num_batches):\n local_batch_size=batch_size\n if batch_idx == num_batches -1 and num_samples % batch_size > 0:\n local_batch_size=num_samples % batch_size\n\n t1 = time.time()\n # Translate the sentences using the trained model.\n hypotheses = self.translate_fn(\n self.model, None,local_batch_size,\n self.vocab_src,\n self.device, hparams)\n\n num_translated += local_batch_size\n\n # Restore the original ordering.\n all_hypotheses += hypotheses.tolist()\n\n if self.verbose:\n print(f\"{num_translated}/{num_samples} sentences translated in {time.time() - t1:.2f} seconds.\", file=sys.stderr)\n\n if hparams.show_raw_output:\n for i in range(input_size):\n print(i + self.n_translated, '|||' '|||', all_hypotheses[i], file=sys.stderr)\n\n # Post-processing\n all_hypotheses = [self.pipeline.post(h) for h in all_hypotheses]\n\n if stdout is not None:\n for hypothesis in all_hypotheses:\n print(hypothesis, file=stdout)\n\n self.n_translated += input_size\n\n return all_hypotheses\n\n def generate_file(self, input_path=None, output_path=None, num_samples=100, stdout=None):\n if output_path is None:\n stdout = sys.stdout\n\n if input_path is not None:\n with open(input_path) as f:\n translations = self.generate(lines=f.readlines(), num_samples=num_samples, stdout=stdout)\n else:\n translations = self.generate(lines=None,num_samples=num_samples, stdout=stdout)\n\n # If an output file is given write the output to that file.\n if output_path is not None:\n if self.verbose:\n print(f\"\\nWriting translation output to {output_path}\", file=sys.stderr)\n with open(output_path, \"w\") as f:\n for translation in translations:\n f.write(f\"{translation}\\n\")\n\n\n\ndef main(hparams=None):\n # Load command line hyperparameters (and if provided from an hparams_file).\n if hparams is None:\n if \"--hparams_file\" not in sys.argv:\n # TODO This is added to prevent incorrect overriding of arguments, see Issue #14\n # When resolved, hparams.update_from_file can be used instead.\n output_dir = Path(sys.argv[sys.argv.index(\"--output_dir\") + 1])\n hparams_file = str(output_dir / \"hparams\")\n sys.argv = [sys.argv[0]] + ['--hparams_file', hparams_file] + sys.argv[1:]\n hparams = Hyperparameters(check_required=False)\n\n engine = GenerationEngine(hparams)\n\n engine.load_statics()\n\n #if hparams.translation.interactive > 0:\n # if hparams.translation.interactive == 1:\n # engine.interactive_translation()\n # else:\n # engine.interactive_translation_n(wait_for=hparams.translation.interactive)\n #elif hparams.translation.input_file == '-':\n # engine.translate_stdin()\n #else:\n # if hparams.translation.ref_file and hparams.split_sentences:\n # raise ValueError(\"If you enable sentence splitting you will compromise line-alignment with the reference\")\n # engine.translate_file(\n # input_path=hparams.translation.input_file,\n # output_path=hparams.translation.output_file,\n # reference_path=hparams.translation.ref_file\n # )\n\n if hparams.translation.input_file is not None and hparams.translation.num_prior_samples is not None:\n raise ValueError(\"If you specify an input file, you cannot sample from the prior\")\n if hparams.translation.input_file is None and hparams.translation.num_prior_samples is None:\n raise ValueError(\"You must specify either an input file or a number of prior samples\")\n engine.generate_file(input_path=hparams.translation.input_file,output_path=hparams.translation.output_file,num_samples=hparams.translation.num_prior_samples)\n\nif __name__ == \"__main__\":\n main()\n", "import torch\nimport torch.nn as nn\nfrom torch.distributions import Distribution, Independent, Normal, Beta, Uniform\n\nfrom probabll.distributions import MixtureOfGaussians, ProductOfDistributions, MixtureD01C01\n\n\nclass PriorLayer(nn.Module):\n \"\"\"\n Use this to instantiate a prior -- that is set it's parameters.\n A prior in AEVNMT is a multivariate distribution, it's batch_shape is [] and event_shape is [latent_size].\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n\nclass GaussianPriorLayer(PriorLayer):\n\n def __init__(self, latent_size, loc=0., scale=1.):\n super().__init__()\n if scale <= 0:\n raise ValueError(\"The scale of a Gaussian distribution is strictly positive: %r\" % params)\n self.register_buffer(\"loc\", torch.full([latent_size], loc))\n self.register_buffer(\"scale\", torch.full([latent_size], scale))\n\n def forward(self) -> Distribution:\n return Independent(torch.distributions.Normal(loc=self.loc, scale=self.scale), 1)\n\n\nclass BetaPriorLayer(PriorLayer):\n\n def __init__(self, latent_size, a=0.5, b=0.5):\n super().__init__()\n if a <= 0. or b <= 0.:\n raise ValueError(\"The shape parameters of a Beta distribution are strictly positive: %r\" % params)\n self.register_buffer(\"a\", torch.full([latent_size], a))\n self.register_buffer(\"b\", torch.full([latent_size], b))\n\n def forward(self) -> Distribution:\n return Independent(Beta(self.a, self.b), 1)\n\n\nclass MoGPriorLayer(PriorLayer):\n\n def __init__(self, latent_size, num_components=10, radius=10, scale=0.5):\n super().__init__()\n if scale <= 0:\n raise ValueError(\"The prior variance must be strictly positive.\")\n # uniform prior over components\n self.register_buffer(\"logits\", torch.ones(num_components))\n self.register_buffer(\"locations\", - radius + torch.rand([num_components, latent_size]) * 2 * radius)\n self.register_buffer(\"scales\", torch.full([num_components, latent_size], scale))\n\n def forward(self) -> Distribution:\n p = MixtureOfGaussians(logits=self.logits, locations=self.locations, scales=self.scales)\n return p\n\n\nclass HardUniformPriorLayer(PriorLayer):\n\n def __init__(self, latent_size, p0=0.4, p1=0.4):\n super().__init__()\n if not (0. < p0 < 1. and 0. < p1 < 1. and p0 + p1 < 1.):\n raise ValueError(\"The shape parameters of the HardUniform distribution must be strictly positive and sum to less than 1: %r\" % params)\n self.register_buffer(\"p0\", torch.full([latent_size], p0))\n self.register_buffer(\"p1\", torch.full([latent_size], p1))\n self.register_buffer(\"pc\", torch.full([latent_size], 1. - p0 - p1))\n\n def forward(self) -> Distribution:\n cont = Uniform(torch.zeros_like(self.p0), torch.ones_like(self.p0))\n probs = torch.stack([self.p0, self.p1, self.pc], -1)\n return Independent(MixtureD01C01(cont, probs=probs), 1)\n\n\nclass ProductOfPriorsLayer(PriorLayer):\n \"\"\"\n A product of priors layer instantiates a number of independent priors.\n \"\"\"\n\n def __init__(self, priors: list):\n super().__init__()\n self.priors = nn.ModuleList(priors)\n\n def forward(self) -> Distribution:\n return ProductOfDistributions([prior() for prior in self.priors])\n\n" ]
[ [ "torch.load", "torch.utils.data.DataLoader", "torch.device", "numpy.argsort", "numpy.array" ], [ "torch.ones", "torch.full", "torch.distributions.Beta", "torch.nn.ModuleList", "torch.zeros_like", "torch.rand", "torch.distributions.Normal", "torch.stack", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MuhammadFadhilArkan/gym-pybullet-drones
[ "b0670e4b0289210954632c6919ff9b5dc740a61f", "b0670e4b0289210954632c6919ff9b5dc740a61f" ]
[ "experiments/performance/script.py", "assignments/aer1216_fall2020_hw1_sim.py" ]
[ "\"\"\"Only used to spawn multiple simulations and evaluate performance.\n\nThis script is similar to `examples/fly.py` and used by `parallelism.sh`.\n\n\"\"\"\nimport os\nimport time\nimport argparse\nfrom datetime import datetime\nimport pdb\nimport math\nimport random\nimport numpy as np\nimport pybullet as p\nimport matplotlib.pyplot as plt\n\nfrom gym_pybullet_drones.envs.BaseAviary import DroneModel, Physics\nfrom gym_pybullet_drones.envs.CtrlAviary import CtrlAviary\nfrom gym_pybullet_drones.envs.VisionAviary import VisionAviary\nfrom gym_pybullet_drones.control.DSLPIDControl import DSLPIDControl\nfrom gym_pybullet_drones.utils.Logger import Logger\nfrom gym_pybullet_drones.utils.utils import sync, str2bool\n\nif __name__ == \"__main__\":\n\n #### Define and parse (optional) arguments for the script ##\n parser = argparse.ArgumentParser(description='Helix flight script using CtrlAviary or VisionAviary and DSLPIDControl')\n parser.add_argument('--drone', default=\"cf2x\", type=DroneModel, help='Drone model (default: CF2X)', metavar='', choices=DroneModel)\n parser.add_argument('--num_drones', default=3, type=int, help='Number of drones (default: 3)', metavar='')\n parser.add_argument('--physics', default=\"pyb\", type=Physics, help='Physics updates (default: PYB)', metavar='', choices=Physics)\n parser.add_argument('--vision', default=False, type=str2bool, help='Whether to use VisionAviary (default: False)', metavar='')\n parser.add_argument('--gui', default=True, type=str2bool, help='Whether to use PyBullet GUI (default: True)', metavar='')\n parser.add_argument('--record_video', default=False, type=str2bool, help='Whether to record a video (default: False)', metavar='')\n parser.add_argument('--plot', default=True, type=str2bool, help='Whether to plot the simulation results (default: True)', metavar='')\n parser.add_argument('--user_debug_gui', default=False, type=str2bool, help='Whether to add debug lines and parameters to the GUI (default: False)', metavar='')\n parser.add_argument('--aggregate', default=False, type=str2bool, help='Whether to aggregate physics steps (default: False)', metavar='')\n parser.add_argument('--obstacles', default=True, type=str2bool, help='Whether to add obstacles to the environment (default: True)', metavar='')\n parser.add_argument('--simulation_freq_hz', default=240, type=int, help='Simulation frequency in Hz (default: 240)', metavar='')\n parser.add_argument('--control_freq_hz', default=48, type=int, help='Control frequency in Hz (default: 48)', metavar='')\n parser.add_argument('--duration_sec', default=5, type=int, help='Duration of the simulation in seconds (default: 5)', metavar='')\n ARGS = parser.parse_args()\n\n #### Initialize the simulation #############################\n H = .1\n H_STEP = .05\n R = .3\n INIT_XYZS = np.array([[R*np.cos((i/6)*2*np.pi+np.pi/2), R*np.sin((i/6)*2*np.pi+np.pi/2)-R, H+i*H_STEP] for i in range(ARGS.num_drones)])\n AGGR_PHY_STEPS = int(ARGS.simulation_freq_hz/ARGS.control_freq_hz) if ARGS.aggregate else 1\n\n #### Create the environment with or without video capture ##\n if ARGS.vision:\n env = VisionAviary(drone_model=ARGS.drone,\n num_drones=ARGS.num_drones,\n initial_xyzs=INIT_XYZS,\n physics=ARGS.physics,\n neighbourhood_radius=10,\n freq=ARGS.simulation_freq_hz,\n aggregate_phy_steps=AGGR_PHY_STEPS,\n gui=ARGS.gui,\n record=ARGS.record_video,\n obstacles=ARGS.obstacles\n )\n else: \n env = CtrlAviary(drone_model=ARGS.drone,\n num_drones=ARGS.num_drones,\n initial_xyzs=INIT_XYZS,\n physics=ARGS.physics,\n neighbourhood_radius=10,\n freq=ARGS.simulation_freq_hz,\n aggregate_phy_steps=AGGR_PHY_STEPS,\n gui=ARGS.gui,\n record=ARGS.record_video,\n obstacles=ARGS.obstacles,\n user_debug_gui=ARGS.user_debug_gui\n )\n\n #### Initialize a circular trajectory ######################\n PERIOD = 10\n NUM_WP = ARGS.control_freq_hz*PERIOD\n TARGET_POS = np.zeros((NUM_WP, 3))\n for i in range(NUM_WP):\n TARGET_POS[i, :] = R*np.cos((i/NUM_WP)*(2*np.pi)+np.pi/2)+INIT_XYZS[0, 0], R*np.sin((i/NUM_WP)*(2*np.pi)+np.pi/2)-R+INIT_XYZS[0, 1], INIT_XYZS[0, 2]\n wp_counters = np.array([int((i*NUM_WP/6)%NUM_WP) for i in range(ARGS.num_drones)])\n\n #### Initialize the logger #################################\n logger = Logger(logging_freq_hz=int(ARGS.simulation_freq_hz/AGGR_PHY_STEPS),\n num_drones=ARGS.num_drones\n )\n\n #### Initialize the controllers ############################\n ctrl = [DSLPIDControl(drone_model=ARGS.drone) for i in range(ARGS.num_drones)]\n\n #### Run the simulation ####################################\n CTRL_EVERY_N_STEPS= int(np.floor(env.SIM_FREQ/ARGS.control_freq_hz))\n action = {str(i): np.array([0, 0, 0, 0]) for i in range(ARGS.num_drones)}\n START = time.time()\n for i in range(0, int(ARGS.duration_sec*env.SIM_FREQ), AGGR_PHY_STEPS):\n\n #### Step the simulation ###################################\n obs, reward, done, info = env.step(action)\n\n #### Compute control at the desired frequency @@@@@#########\n if i%CTRL_EVERY_N_STEPS == 0:\n\n #### Compute control for the current way point #############\n for j in range(ARGS.num_drones):\n action[str(j)], _, _ = ctrl[j].computeControlFromState(control_timestep=CTRL_EVERY_N_STEPS*env.TIMESTEP,\n state=obs[str(j)][\"state\"],\n target_pos=np.hstack([TARGET_POS[wp_counters[j], 0:2], H+j*H_STEP])\n )\n\n #### Go to the next way point and loop #####################\n for j in range(ARGS.num_drones):\n wp_counters[j] = wp_counters[j] + 1 if wp_counters[j] < (NUM_WP-1) else 0\n\n #### Log the simulation ####################################\n for j in range(ARGS.num_drones):\n logger.log(drone=j,\n timestamp=i/env.SIM_FREQ,\n state= obs[str(j)][\"state\"],\n control=np.hstack([TARGET_POS[wp_counters[j], 0:2], H+j*H_STEP, np.zeros(9)])\n )\n\n #### Printout ##############################################\n if i%env.SIM_FREQ == 0:\n env.render()\n #### Print matrices with the images captured by each drone #\n if ARGS.vision:\n for j in range(ARGS.num_drones): \n print(obs[str(j)][\"rgb\"].shape, np.average(obs[str(j)][\"rgb\"]),\n obs[str(j)][\"dep\"].shape, np.average(obs[str(j)][\"dep\"]),\n obs[str(j)][\"seg\"].shape, np.average(obs[str(j)][\"seg\"])\n )\n\n #### Sync the simulation ###################################\n if ARGS.gui:\n sync(i, START, env.TIMESTEP)\n\n #### Close the environment #################################\n env.close()\n\n #### Save the simulation results ###########################\n logger.save()\n\n #### Plot the simulation results ###########################\n if ARGS.plot:\n logger.plot()\n", "\"\"\"Simulation script for assignment 1.\n\nThe script uses the control defined in file `aer1216_fall2020_hw1_ctrl.py`.\n\nExample\n-------\nTo run the simulation, type in a terminal:\n\n $ python aer1216_fall2020_hw1_sim.py\n\n\"\"\"\nimport time\nimport random\nimport numpy as np\nimport pybullet as p\n\n#### Uncomment the following 2 lines if \"module gym_pybullet_drones cannot be found\"\n# import sys\n# sys.path.append('../')\n\nfrom gym_pybullet_drones.envs.CtrlAviary import CtrlAviary\nfrom gym_pybullet_drones.utils.Logger import Logger\nfrom gym_pybullet_drones.utils.utils import sync\nfrom aer1216_fall2020_hw1_ctrl import HW1Control\n\nDURATION = 10\n\"\"\"int: The duration of the simulation in seconds.\"\"\"\nGUI = True\n\"\"\"bool: Whether to use PyBullet graphical interface.\"\"\"\nRECORD = False\n\"\"\"bool: Whether to save a video under /files/videos. Requires ffmpeg\"\"\"\n\nif __name__ == \"__main__\":\n\n #### Create the ENVironment ################################\n ENV = CtrlAviary(gui=GUI, record=RECORD)\n PYB_CLIENT = ENV.getPyBulletClient()\n\n #### Initialize the LOGGER #################################\n LOGGER = Logger(logging_freq_hz=ENV.SIM_FREQ)\n\n #### Initialize the controller #############################\n CTRL = HW1Control(ENV)\n\n #### Initialize the ACTION #################################\n ACTION = {}\n OBS = ENV.reset()\n STATE = OBS[\"0\"][\"state\"]\n ACTION[\"0\"] = CTRL.compute_control(current_position=STATE[0:3],\n current_velocity=STATE[10:13],\n target_position=STATE[0:3],\n target_velocity=np.zeros(3),\n target_acceleration=np.zeros(3)\n )\n\n #### Initialize target trajectory ##########################\n TARGET_POSITION = np.array([[0, 0, 1.0] for i in range(DURATION*ENV.SIM_FREQ)])\n TARGET_VELOCITY = np.zeros([DURATION * ENV.SIM_FREQ, 3])\n TARGET_ACCELERATION = np.zeros([DURATION * ENV.SIM_FREQ, 3])\n\n #### Derive the target trajectory to obtain target velocities and accelerations\n TARGET_VELOCITY[1:, :] = (TARGET_POSITION[1:, :] - TARGET_POSITION[0:-1, :]) / ENV.SIM_FREQ\n TARGET_ACCELERATION[1:, :] = (TARGET_VELOCITY[1:, :] - TARGET_VELOCITY[0:-1, :]) / ENV.SIM_FREQ\n\n #### Run the simulation ####################################\n START = time.time()\n for i in range(0, DURATION*ENV.SIM_FREQ):\n\n ### Secret control performance booster #####################\n # if i/ENV.SIM_FREQ>3 and i%30==0 and i/ENV.SIM_FREQ<10: p.loadURDF(\"duck_vhacd.urdf\", [random.gauss(0, 0.3), random.gauss(0, 0.3), 3], p.getQuaternionFromEuler([random.randint(0, 360),random.randint(0, 360),random.randint(0, 360)]), physicsClientId=PYB_CLIENT)\n\n #### Step the simulation ###################################\n OBS, _, _, _ = ENV.step(ACTION)\n\n #### Compute control #######################################\n STATE = OBS[\"0\"][\"state\"]\n ACTION[\"0\"] = CTRL.compute_control(current_position=STATE[0:3],\n current_velocity=STATE[10:13],\n target_position=TARGET_POSITION[i, :],\n target_velocity=TARGET_VELOCITY[i, :],\n target_acceleration=TARGET_ACCELERATION[i, :]\n )\n\n #### Log the simulation ####################################\n LOGGER.log(drone=0, timestamp=i/ENV.SIM_FREQ, state=STATE)\n\n #### Printout ##############################################\n if i%ENV.SIM_FREQ == 0:\n ENV.render()\n\n #### Sync the simulation ###################################\n if GUI:\n sync(i, START, ENV.TIMESTEP)\n\n #### Close the ENVironment #################################\n ENV.close()\n\n #### Save the simulation results ###########################\n LOGGER.save()\n\n #### Plot the simulation results ###########################\n LOGGER.plot()\n" ]
[ [ "numpy.hstack", "numpy.cos", "numpy.sin", "numpy.floor", "numpy.array", "numpy.zeros" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AbhilashReddyM/curvpack
[ "74351624ec9ec50ec4445c7be85a48a4eabb029a" ]
[ "curvpack/CurvatureISF.py" ]
[ "\r\n\"\"\"\r\nAbhilash Reddy Malipeddi. January 2017\r\nCalculate the mean and gaussian curvature at a vertex in a tri mesh using\r\nusing an iterative fitting method similar to what is given in [Garimella and Swartz],\r\n[Yazdani and Bagchi], etc.\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom numpy.linalg import lstsq\r\nfrom .utils import triangle_neighbors,GetVertexNormals,get_surf_neighbors,fastcross,normr\r\n\r\n\r\ndef CurvatureISF1(vertices,faces):\r\n '''\r\n This uses a two-ring neighborhood around a point. \r\n '''\r\n tol=1e-10\r\n \r\n npt=vertices.shape[0]\r\n neighbor_tri=triangle_neighbors(faces,npt)\r\n\r\n neighbor_verts= np.array([get_surf_neighbors(faces,neighbor_tri, k)\r\n for k in range(npt)])\r\n\r\n e0=vertices[faces[:,2]]-vertices[faces[:,1]]\r\n e1=vertices[faces[:,0]]-vertices[faces[:,2]]\r\n e2=vertices[faces[:,1]]-vertices[faces[:,0]]\r\n\r\n e0_norm=normr(e0)\r\n e1_norm=normr(e1)\r\n e2_norm=normr(e2)\r\n\r\n FaceNormals=0.5*fastcross(e0,e1)\r\n VN=GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2)\r\n up = np.zeros(vertices.shape)\r\n #Calculate initial coordinate system\r\n up[faces[:,0]]=e2_norm\r\n up[faces[:,1]]=e0_norm\r\n up[faces[:,2]]=e1_norm\r\n\r\n #Calculate initial vertex coordinate system\r\n up=fastcross(VN,up)\r\n up=normr(up)\r\n vp=fastcross(up,VN)\r\n vp=normr(vp)\r\n\r\n qj=np.zeros([30,3])\r\n A =np.zeros([36,5])\r\n B =np.zeros([36,1])\r\n\r\n H=np.zeros(npt)\r\n K=np.zeros(npt)\r\n\r\n\r\n for i in range(npt):\r\n n1=up[i]\r\n n2=vp[i]\r\n n3=VN[i]\r\n\r\n nbrs=np.unique(np.hstack(neighbor_verts[neighbor_verts[i]].flat))\r\n nbrs=np.setdiff1d(nbrs,i)\r\n\r\n for _ in range(30):\r\n for j,pj in enumerate(vertices[nbrs]):\r\n qj[j]=np.array([np.dot(pj-vertices[i],n1),\r\n np.dot(pj-vertices[i],n2),\r\n np.dot(pj-vertices[i],n3)])\r\n j=0\r\n k=0\r\n for (x,y,z) in qj:\r\n k+=1\r\n if k==len(nbrs):\r\n break\r\n scale = 2/(x**2+y**2)\r\n A[j] = scale*np.array([ x**2, x*y, y**2, x, y])\r\n B[j] = scale*z\r\n j+=1\r\n\r\n X=lstsq(A[:len(nbrs),:],B[:len(nbrs)],rcond=None)\r\n\r\n a,b,c,d,e=X[0]\r\n\r\n factor=1.0/np.sqrt(1.0+d[0]**2+e[0]**2)\r\n oldn3=n3.copy()\r\n n3=factor*np.array([-d[0],-e[0],1.0])\r\n\r\n n3=np.c_[n1,n2,oldn3].dot(n3)#new normal in local coordinates\r\n VN[i]=n3 #new normal in global coordinates. up,vp,VN system is not orthogonal anymore, but that is okay as it is not used again\r\n n2=np.cross(n1,n3)\r\n n2=n2/np.linalg.norm(n2)\r\n n1=np.cross(n3,n2)\r\n n1=n1/np.linalg.norm(n1)\r\n\r\n H[i]=factor**3*(a+c+a*e**2+c*d**2-b*d*e)\r\n K[i]=factor**4*(4*a*c-b**2)\r\n if np.linalg.norm(n3-oldn3) <tol:\r\n break\r\n return K,-H,VN\r\n\r\ndef CurvatureISF2(vertices,faces):\r\n '''\r\n This is a slight modification of the previous. Here we only use the one ring\r\n but we include the vertex normals in the fitting procedure. This indirectly has\r\n two ring support because the vertex normals themselves are calculated\r\n as a weighted average of the face normals. Sidenote: I wonder what happens if we include both\r\n vertex and face normals in the fitting procedure....\r\n '''\r\n tol=1e-10\r\n npt=vertices.shape[0]\r\n neighbor_tri=triangle_neighbors(faces,npt)\r\n\r\n neighbor_verts= np.array([get_surf_neighbors(faces,neighbor_tri, k)\r\n for k in range(npt)])\r\n\r\n e0=vertices[faces[:,2]]-vertices[faces[:,1]]\r\n e1=vertices[faces[:,0]]-vertices[faces[:,2]]\r\n e2=vertices[faces[:,1]]-vertices[faces[:,0]]\r\n\r\n e0_norm=normr(e0)\r\n e1_norm=normr(e1)\r\n e2_norm=normr(e2)\r\n\r\n FaceNormals=0.5*fastcross(e0,e1)\r\n VN=GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2)\r\n up = np.zeros(vertices.shape)\r\n #Calculate initial coordinate system\r\n up[faces[:,0]]=e2_norm\r\n up[faces[:,1]]=e0_norm\r\n up[faces[:,2]]=e1_norm\r\n\r\n #Calculate initial vertex coordinate system\r\n up=fastcross(VN,up)\r\n up=normr(up)\r\n vp=fastcross(up,VN)\r\n vp=normr(vp)\r\n\r\n qj=np.zeros([12,5])\r\n A =np.zeros([36,5])\r\n B =np.zeros([36,1])\r\n\r\n H=np.zeros(npt)\r\n K=np.zeros(npt)\r\n VNnew=np.zeros_like(VN)\r\n\r\n for i in range(npt):\r\n n1=up[i]\r\n n2=vp[i]\r\n n3=VN[i]\r\n for iter in range(30):\r\n for j,(pj,nj) in enumerate(zip(vertices[neighbor_verts[i]],VN[neighbor_verts[i]])):\r\n qj[j]=np.array([np.dot(pj-vertices[i],n1),\r\n np.dot(pj-vertices[i],n2),\r\n np.dot(pj-vertices[i],n3),\r\n -np.dot(nj,n1)/np.dot(nj,n3),\r\n -np.dot(nj,n2)/np.dot(nj,n3)])\r\n j=0\r\n k=0\r\n for (x,y,z,nx,ny) in qj:\r\n k+=1\r\n if k==len(neighbor_verts[i]):\r\n break\r\n scale=2/(x**2+y**2)\r\n A[j] = scale*np.array([ x**2, x*y, y**2, x, y])\r\n A[j+1] = scale*np.array([ 2*x, y, 0, 1, 0])\r\n A[j+2] = scale*np.array([ 0, x, 2*y, 0, 1])\r\n B[j] = scale*z\r\n B[j+1] = scale*nx\r\n B[j+2] = scale*ny\r\n j+=3\r\n\r\n X=lstsq(A[:3*len(neighbor_verts[i]),:],B[:3*len(neighbor_verts[i])],rcond=None)\r\n a,b,c,d,e=X[0]\r\n factor=1.0/np.sqrt(1.0+d[0]**2+e[0]**2)\r\n H[i]=factor**3*(a+c+a*e**2+c*d**2-b*d*e)\r\n K[i]=factor**4*(4*a*c-b**2)\r\n\r\n oldn3=n3.copy()\r\n n3=factor*np.array([-d[0],-e[0],1.0])#new normal in local coordinates\r\n n3=np.c_[n1,n2,oldn3].dot(n3) #new normal in global coordinates\r\n n2=np.cross(n1,n3)\r\n n2=n2/np.linalg.norm(n2)\r\n n1=np.cross(n3,n2)\r\n n1=n1/np.linalg.norm(n1)\r\n\r\n\r\n if np.linalg.norm(n3-oldn3) <tol:\r\n up[i]=n1\r\n vp[i]=n2\r\n VN[i]=n3\r\n break\r\n return K,-H,VN\r\n\r\ndef CurvatureISF3(vertices,faces):\r\n '''\r\n Uses two ring vertices and normals.\r\n '''\r\n tol=1e-10\r\n npt=vertices.shape[0]\r\n neighbor_tri=triangle_neighbors(faces,npt)\r\n\r\n neighbor_verts= np.array([get_surf_neighbors(faces,neighbor_tri, k)\r\n for k in range(npt)])\r\n\r\n e0=vertices[faces[:,2]]-vertices[faces[:,1]]\r\n e1=vertices[faces[:,0]]-vertices[faces[:,2]]\r\n e2=vertices[faces[:,1]]-vertices[faces[:,0]]\r\n\r\n e0_norm=normr(e0)\r\n e1_norm=normr(e1)\r\n e2_norm=normr(e2)\r\n\r\n FaceNormals=0.5*fastcross(e0,e1)\r\n VN=GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2)\r\n up = np.zeros(vertices.shape)\r\n #Calculate initial coordinate system\r\n up[faces[:,0]]=e2_norm\r\n up[faces[:,1]]=e0_norm\r\n up[faces[:,2]]=e1_norm\r\n\r\n #Calculate initial vertex coordinate system\r\n up=fastcross(VN,up)\r\n up=normr(up)\r\n vp=fastcross(up,VN)\r\n vp=normr(vp)\r\n\r\n qj=np.zeros([100,5])\r\n A =np.zeros([200,5])\r\n B =np.zeros([200,1])\r\n\r\n H=np.zeros(npt)\r\n K=np.zeros(npt)\r\n VNnew=np.zeros_like(VN)\r\n\r\n for i in range(npt):\r\n n1=up[i]\r\n n2=vp[i]\r\n n3=VN[i]\r\n nbrs=np.unique(np.hstack(neighbor_verts[neighbor_verts[i]].flat))\r\n nbrs=np.setdiff1d(nbrs,i)\r\n\r\n for iter in range(30):\r\n for j,(pj,nj) in enumerate(zip(vertices[nbrs],VN[nbrs])):\r\n qj[j]=np.array([np.dot(pj-vertices[i],n1),\r\n np.dot(pj-vertices[i],n2),\r\n np.dot(pj-vertices[i],n3),\r\n -np.dot(nj,n1)/np.dot(nj,n3),\r\n -np.dot(nj,n2)/np.dot(nj,n3)])\r\n j=0\r\n k=0\r\n for (x,y,z,nx,ny) in qj:\r\n k+=1\r\n if k==len(nbrs):\r\n break\r\n scale=2/(x**2+y**2)\r\n A[j] = scale*np.array([ x**2, x*y, y**2, x, y])\r\n A[j+1] = scale*np.array([ 2*x, y, 0, 1, 0])\r\n A[j+2] = scale*np.array([ 0, x, 2*y, 0, 1])\r\n B[j] = scale*z\r\n B[j+1] = scale*nx\r\n B[j+2] = scale*ny\r\n j+=3\r\n\r\n X=lstsq(A[:3*len(nbrs),:],B[:3*len(nbrs)],rcond=None)\r\n a,b,c,d,e=X[0]\r\n factor=1.0/np.sqrt(1.0+d[0]**2+e[0]**2)\r\n H[i]=factor**3*(a+c+a*e**2+c*d**2-b*d*e)\r\n K[i]=factor**4*(4*a*c-b**2)\r\n\r\n oldn3=n3.copy()\r\n n3=factor*np.array([-d[0],-e[0],1.0])#new normal in local coordinates\r\n n3=np.c_[n1,n2,oldn3].dot(n3) #new normal in global coordinates\r\n n2=np.cross(n1,n3)\r\n n2=n2/np.linalg.norm(n2)\r\n n1=np.cross(n3,n2)\r\n n1=n1/np.linalg.norm(n1)\r\n\r\n if np.linalg.norm(n3-oldn3) <tol:\r\n up[i]=n1\r\n vp[i]=n2\r\n VN[i]=n3\r\n break\r\n return K,-H,VN\r\n" ]
[ [ "numpy.hstack", "numpy.dot", "numpy.sqrt", "numpy.linalg.norm", "numpy.setdiff1d", "numpy.zeros_like", "numpy.cross", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Khushbu0610/allennlp
[ "60deece9fca2da6b66bfcde44484384bdefa3fe7" ]
[ "allennlp/modules/token_embedders/pretrained_transformer_embedder.py" ]
[ "import math\nfrom typing import Optional, Tuple\n\nfrom overrides import overrides\n\nimport torch\nimport torch.nn.functional as F\nfrom transformers import XLNetConfig\n\nfrom allennlp.data.tokenizers import PretrainedTransformerTokenizer\nfrom allennlp.modules.token_embedders.token_embedder import TokenEmbedder\nfrom allennlp.nn.util import batched_index_select\n\n\[email protected](\"pretrained_transformer\")\nclass PretrainedTransformerEmbedder(TokenEmbedder):\n \"\"\"\n Uses a pretrained model from `transformers` as a `TokenEmbedder`.\n\n Registered as a `TokenEmbedder` with name \"pretrained_transformer\".\n\n # Parameters\n\n model_name : `str`\n The name of the `transformers` model to use. Should be the same as the corresponding\n `PretrainedTransformerIndexer`.\n max_length : `int`, optional (default = `None`)\n If positive, folds input token IDs into multiple segments of this length, pass them\n through the transformer model independently, and concatenate the final representations.\n Should be set to the same value as the `max_length` option on the\n `PretrainedTransformerIndexer`.\n sub_module: `str`, optional (default = `None`)\n The name of a submodule of the transformer to be used as the embedder. Some transformers naturally act\n as embedders such as BERT. However, other models consist of encoder and decoder, in which case we just\n want to use the encoder.\n train_parameters: `bool`, optional (default = `True`)\n If this is `True`, the transformer weights get updated during training.\n \"\"\"\n\n def __init__(\n self,\n model_name: str,\n *,\n max_length: int = None,\n sub_module: str = None,\n train_parameters: bool = True,\n override_weights_file: Optional[str] = None,\n override_weights_strip_prefix: Optional[str] = None\n ) -> None:\n super().__init__()\n from allennlp.common import cached_transformers\n\n self.transformer_model = cached_transformers.get(\n model_name, True, override_weights_file, override_weights_strip_prefix\n )\n self.config = self.transformer_model.config\n if sub_module:\n assert hasattr(self.transformer_model, sub_module)\n self.transformer_model = getattr(self.transformer_model, sub_module)\n self._max_length = max_length\n # I'm not sure if this works for all models; open an issue on github if you find a case\n # where it doesn't work.\n self.output_dim = self.config.hidden_size\n\n tokenizer = PretrainedTransformerTokenizer(model_name)\n self._num_added_start_tokens = len(tokenizer.single_sequence_start_tokens)\n self._num_added_end_tokens = len(tokenizer.single_sequence_end_tokens)\n self._num_added_tokens = self._num_added_start_tokens + self._num_added_end_tokens\n\n if not train_parameters:\n for param in self.transformer_model.parameters():\n param.requires_grad = False\n\n @overrides\n def get_output_dim(self):\n return self.output_dim\n\n def _number_of_token_type_embeddings(self):\n if isinstance(self.config, XLNetConfig):\n return 3 # XLNet has 3 type ids\n elif hasattr(self.config, \"type_vocab_size\"):\n return self.config.type_vocab_size\n else:\n return 0\n\n @overrides\n def forward(\n self,\n token_ids: torch.LongTensor,\n mask: torch.BoolTensor,\n type_ids: Optional[torch.LongTensor] = None,\n segment_concat_mask: Optional[torch.BoolTensor] = None,\n ) -> torch.Tensor: # type: ignore\n \"\"\"\n # Parameters\n\n token_ids: `torch.LongTensor`\n Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.\n num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the\n middle, e.g. the length of: \"[CLS] A B C [SEP] [CLS] D E F [SEP]\" (see indexer logic).\n mask: `torch.BoolTensor`\n Shape: [batch_size, num_wordpieces].\n type_ids: `Optional[torch.LongTensor]`\n Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.\n segment_concat_mask: `Optional[torch.BoolTensor]`\n Shape: `[batch_size, num_segment_concat_wordpieces]`.\n\n # Returns\n\n `torch.Tensor`\n Shape: `[batch_size, num_wordpieces, embedding_size]`.\n\n \"\"\"\n # Some of the huggingface transformers don't support type ids at all and crash when you supply\n # them. For others, you can supply a tensor of zeros, and if you don't, they act as if you did.\n # There is no practical difference to the caller, so here we pretend that one case is the same\n # as another case.\n if type_ids is not None:\n max_type_id = type_ids.max()\n if max_type_id == 0:\n type_ids = None\n else:\n if max_type_id >= self._number_of_token_type_embeddings():\n raise ValueError(\"Found type ids too large for the chosen transformer model.\")\n assert token_ids.shape == type_ids.shape\n\n fold_long_sequences = self._max_length is not None and token_ids.size(1) > self._max_length\n if fold_long_sequences:\n batch_size, num_segment_concat_wordpieces = token_ids.size()\n token_ids, segment_concat_mask, type_ids = self._fold_long_sequences(\n token_ids, segment_concat_mask, type_ids\n )\n\n transformer_mask = segment_concat_mask if self._max_length is not None else mask\n # Shape: [batch_size, num_wordpieces, embedding_size],\n # or if self._max_length is not None:\n # [batch_size * num_segments, self._max_length, embedding_size]\n\n # We call this with kwargs because some of the huggingface models don't have the\n # token_type_ids parameter and fail even when it's given as None.\n # Also, as of transformers v2.5.1, they are taking FloatTensor masks.\n parameters = {\"input_ids\": token_ids, \"attention_mask\": transformer_mask.float()}\n if type_ids is not None:\n parameters[\"token_type_ids\"] = type_ids\n embeddings = self.transformer_model(**parameters)[0]\n\n if fold_long_sequences:\n embeddings = self._unfold_long_sequences(\n embeddings, segment_concat_mask, batch_size, num_segment_concat_wordpieces\n )\n\n return embeddings\n\n def _fold_long_sequences(\n self,\n token_ids: torch.LongTensor,\n mask: torch.BoolTensor,\n type_ids: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.LongTensor, torch.LongTensor, Optional[torch.LongTensor]]:\n \"\"\"\n We fold 1D sequences (for each element in batch), returned by `PretrainedTransformerIndexer`\n that are in reality multiple segments concatenated together, to 2D tensors, e.g.\n\n [ [CLS] A B C [SEP] [CLS] D E [SEP] ]\n -> [ [ [CLS] A B C [SEP] ], [ [CLS] D E [SEP] [PAD] ] ]\n The [PAD] positions can be found in the returned `mask`.\n\n # Parameters\n\n token_ids: `torch.LongTensor`\n Shape: `[batch_size, num_segment_concat_wordpieces]`.\n num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the\n middle, i.e. the length of: \"[CLS] A B C [SEP] [CLS] D E F [SEP]\" (see indexer logic).\n mask: `torch.BoolTensor`\n Shape: `[batch_size, num_segment_concat_wordpieces]`.\n The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`\n in `forward()`.\n type_ids: `Optional[torch.LongTensor]`\n Shape: [batch_size, num_segment_concat_wordpieces].\n\n # Returns:\n\n token_ids: `torch.LongTensor`\n Shape: [batch_size * num_segments, self._max_length].\n mask: `torch.BoolTensor`\n Shape: [batch_size * num_segments, self._max_length].\n \"\"\"\n num_segment_concat_wordpieces = token_ids.size(1)\n num_segments = math.ceil(num_segment_concat_wordpieces / self._max_length)\n padded_length = num_segments * self._max_length\n length_to_pad = padded_length - num_segment_concat_wordpieces\n\n def fold(tensor): # Shape: [batch_size, num_segment_concat_wordpieces]\n # Shape: [batch_size, num_segments * self._max_length]\n tensor = F.pad(tensor, [0, length_to_pad], value=0)\n # Shape: [batch_size * num_segments, self._max_length]\n return tensor.reshape(-1, self._max_length)\n\n return fold(token_ids), fold(mask), fold(type_ids) if type_ids is not None else None\n\n def _unfold_long_sequences(\n self,\n embeddings: torch.FloatTensor,\n mask: torch.BoolTensor,\n batch_size: int,\n num_segment_concat_wordpieces: int,\n ) -> torch.FloatTensor:\n \"\"\"\n We take 2D segments of a long sequence and flatten them out to get the whole sequence\n representation while remove unnecessary special tokens.\n\n [ [ [CLS]_emb A_emb B_emb C_emb [SEP]_emb ], [ [CLS]_emb D_emb E_emb [SEP]_emb [PAD]_emb ] ]\n -> [ [CLS]_emb A_emb B_emb C_emb D_emb E_emb [SEP]_emb ]\n\n We truncate the start and end tokens for all segments, recombine the segments,\n and manually add back the start and end tokens.\n\n # Parameters\n\n embeddings: `torch.FloatTensor`\n Shape: [batch_size * num_segments, self._max_length, embedding_size].\n mask: `torch.BoolTensor`\n Shape: [batch_size * num_segments, self._max_length].\n The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`\n in `forward()`.\n batch_size: `int`\n num_segment_concat_wordpieces: `int`\n The length of the original \"[ [CLS] A B C [SEP] [CLS] D E F [SEP] ]\", i.e.\n the original `token_ids.size(1)`.\n\n # Returns:\n\n embeddings: `torch.FloatTensor`\n Shape: [batch_size, self._num_wordpieces, embedding_size].\n \"\"\"\n\n def lengths_to_mask(lengths, max_len, device):\n return torch.arange(max_len, device=device).expand(\n lengths.size(0), max_len\n ) < lengths.unsqueeze(1)\n\n device = embeddings.device\n num_segments = int(embeddings.size(0) / batch_size)\n embedding_size = embeddings.size(2)\n\n # We want to remove all segment-level special tokens but maintain sequence-level ones\n num_wordpieces = num_segment_concat_wordpieces - (num_segments - 1) * self._num_added_tokens\n\n embeddings = embeddings.reshape(batch_size, num_segments * self._max_length, embedding_size)\n mask = mask.reshape(batch_size, num_segments * self._max_length)\n # We assume that all 1s in the mask precede all 0s, and add an assert for that.\n # Open an issue on GitHub if this breaks for you.\n # Shape: (batch_size,)\n seq_lengths = mask.sum(-1)\n if not (lengths_to_mask(seq_lengths, mask.size(1), device) == mask).all():\n raise ValueError(\n \"Long sequence splitting only supports masks with all 1s preceding all 0s.\"\n )\n # Shape: (batch_size, self._num_added_end_tokens); this is a broadcast op\n end_token_indices = (\n seq_lengths.unsqueeze(-1) - torch.arange(self._num_added_end_tokens, device=device) - 1\n )\n\n # Shape: (batch_size, self._num_added_start_tokens, embedding_size)\n start_token_embeddings = embeddings[:, : self._num_added_start_tokens, :]\n # Shape: (batch_size, self._num_added_end_tokens, embedding_size)\n end_token_embeddings = batched_index_select(embeddings, end_token_indices)\n\n embeddings = embeddings.reshape(batch_size, num_segments, self._max_length, embedding_size)\n embeddings = embeddings[\n :, :, self._num_added_start_tokens : -self._num_added_end_tokens, :\n ] # truncate segment-level start/end tokens\n embeddings = embeddings.reshape(batch_size, -1, embedding_size) # flatten\n\n # Now try to put end token embeddings back which is a little tricky.\n\n # The number of segment each sequence spans, excluding padding. Mimicking ceiling operation.\n # Shape: (batch_size,)\n num_effective_segments = (seq_lengths + self._max_length - 1) / self._max_length\n # The number of indices that end tokens should shift back.\n num_removed_non_end_tokens = (\n num_effective_segments * self._num_added_tokens - self._num_added_end_tokens\n )\n # Shape: (batch_size, self._num_added_end_tokens)\n end_token_indices -= num_removed_non_end_tokens.unsqueeze(-1)\n assert (end_token_indices >= self._num_added_start_tokens).all()\n # Add space for end embeddings\n embeddings = torch.cat([embeddings, torch.zeros_like(end_token_embeddings)], 1)\n # Add end token embeddings back\n embeddings.scatter_(\n 1, end_token_indices.unsqueeze(-1).expand_as(end_token_embeddings), end_token_embeddings\n )\n\n # Now put back start tokens. We can do this before putting back end tokens, but then\n # we need to change `num_removed_non_end_tokens` a little.\n embeddings = torch.cat([start_token_embeddings, embeddings], 1)\n\n # Truncate to original length\n embeddings = embeddings[:, :num_wordpieces, :]\n return embeddings\n" ]
[ [ "torch.zeros_like", "torch.arange", "torch.nn.functional.pad", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Frandium/nni
[ "a8c12fb75af3c695b61f48a2525fd2d520860a99" ]
[ "examples/model_compress/pruning/v2/activation_pruning_torch.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\n'''\nNNI example for supported ActivationAPoZRank and ActivationMeanRank pruning algorithms.\nIn this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning.\nNote that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required.\n\n'''\nimport argparse\nimport sys\n\nimport torch\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom nni.compression.pytorch import ModelSpeedup\nfrom nni.compression.pytorch.utils.counter import count_flops_params\nfrom nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ActivationAPoZRankPruner, ActivationMeanRankPruner\nfrom nni.algorithms.compression.v2.pytorch.utils import trace_parameters\n\nfrom pathlib import Path\nsys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))\nfrom cifar10.vgg import VGG\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nnormalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\ng_epoch = 0\n\ntrain_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ]), download=True),\n batch_size=128, shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=128, shuffle=False)\n\ndef trainer(model, optimizer, criterion):\n global g_epoch\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx and batch_idx % 100 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n g_epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n g_epoch += 1\n\ndef evaluator(model):\n model.eval()\n correct = 0.0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n acc = 100 * correct / len(test_loader.dataset)\n print('Accuracy: {}%\\n'.format(acc))\n return acc\n\ndef optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160):\n optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay)\n scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1)\n return optimizer, scheduler\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch Example for model comporession')\n parser.add_argument('--pruner', type=str, default='apoz',\n choices=['apoz', 'mean'],\n help='pruner to use')\n parser.add_argument('--pretrain-epochs', type=int, default=20,\n help='number of epochs to pretrain the model')\n parser.add_argument('--fine-tune-epochs', type=int, default=20,\n help='number of epochs to fine tune the model')\n args = parser.parse_args()\n\n print('\\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50)\n model = VGG().to(device)\n optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs)\n criterion = torch.nn.CrossEntropyLoss()\n pre_best_acc = 0.0\n best_state_dict = None\n\n for i in range(args.pretrain_epochs):\n trainer(model, optimizer, criterion)\n scheduler.step()\n acc = evaluator(model)\n if acc > pre_best_acc:\n pre_best_acc = acc\n best_state_dict = model.state_dict()\n print(\"Best accuracy: {}\".format(pre_best_acc))\n model.load_state_dict(best_state_dict)\n pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device))\n g_epoch = 0\n\n # Start to prune and speedup\n print('\\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50)\n config_list = [{\n 'total_sparsity': 0.5,\n 'op_types': ['Conv2d'],\n }]\n\n # make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize\n traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)\n if 'apoz' in args.pruner:\n pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)\n else:\n pruner = ActivationMeanRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)\n _, masks = pruner.compress()\n pruner.show_pruned_weights()\n pruner._unwrap_model()\n ModelSpeedup(model, dummy_input=torch.rand([10, 3, 32, 32]).to(device), masks_file=masks).speedup_model()\n print('\\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50)\n evaluator(model)\n\n # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage.\n print('\\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50)\n optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs)\n\n best_acc = 0.0\n g_epoch = 0\n for i in range(args.fine_tune_epochs):\n trainer(model, optimizer, criterion)\n scheduler.step()\n best_acc = max(evaluator(model), best_acc)\n flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device))\n print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%')\n print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%')\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.randn", "torch.no_grad", "torch.rand", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
taconite/MetaAvatar-release
[ "c9403a478ee82232633d25f65f108befd21d04e9", "c9403a478ee82232633d25f65f108befd21d04e9", "c9403a478ee82232633d25f65f108befd21d04e9" ]
[ "evaluation/lib/mesh_distance.py", "torchmeta/datasets/cub.py", "depth2mesh/training.py" ]
[ "\"\"\"\nOriginal Author: Garvita\nEdited by: Bharat\n\"\"\"\n\nimport torch\nimport kaolin as kal\nfrom kaolin.rep import Mesh\nimport kaolin.cuda.tri_distance as td\nimport numpy as np\nfrom kaolin.metrics.mesh import _compute_edge_dist, _compute_planar_dist, TriangleDistance, point_to_surface\nfrom kaolin.rep import SDF as sdf\nfrom kaolin.rep import TriangleMesh as tm\nfrom evaluation.lib.torch_functions import batch_gather\n\ndef point_to_surface_vec(points,mesh):\n \"\"\"Computes the minimum distances from a set of points to a mesh\n Args:\n points (torch.Tensor): set of points\n mesh (Mesh): mesh to calculate distance\n\n Returns:\n distance: distance between points and surface (not averaged like Kaolin point_to_surface)\n \"\"\"\n\n # extract triangle defs from mesh\n v1 = torch.index_select(mesh.vertices.clone(), 0, mesh.faces[:, 0])\n v2 = torch.index_select(mesh.vertices.clone(), 0, mesh.faces[:, 1])\n v3 = torch.index_select(mesh.vertices.clone(), 0, mesh.faces[:, 2])\n\n # if quad mesh the separate the triangles\n if mesh.faces.shape[-1] == 4:\n v4 = torch.index_select(mesh.vertices.clone(), 0, mesh.faces[:, 3])\n temp1 = v1.clone()\n temp2 = v2.clone()\n temp3 = v3.clone()\n v1 = torch.cat((v1, v1), dim=0)\n v2 = torch.cat((v2, v4), dim=0)\n v3 = torch.cat((v3, v3), dim=0)\n\n if points.is_cuda:\n\n tri_minimum_dist = TriangleDistance()\n # pass to cuda\n distance, indx, dist_type = tri_minimum_dist(points, v1, v2, v3)\n indx = indx.data.cpu().numpy()\n dist_type = torch.LongTensor(dist_type.data.cpu().numpy())\n # reconpute distances to define gradient\n grad_dist = _recompute_point_to_surface_vec(\n [v1, v2, v3], points, indx, dist_type)\n # sanity check\n # print(distance.mean(), grad_dist)\n else:\n raise NotImplementedError\n\n return grad_dist\n\n\ndef _recompute_point_to_surface_vec(verts, p, indecies, dist_type):\n # recompute surface based the calcualted correct assignments of points and triangles\n # and the type of distacne, type 1 to 3 idicates which edge to calcualte to,\n # type 4 indicates the distance is from a point on the triangle not an edge\n v1, v2, v3 = verts\n v1 = v1[indecies]\n v2 = v2[indecies]\n v3 = v3[indecies]\n\n type_1 = (dist_type == 0)\n type_2 = (dist_type == 1)\n type_3 = (dist_type == 2)\n type_4 = (dist_type == 3)\n\n v21 = v2 - v1\n v32 = v3 - v2\n v13 = v1 - v3\n\n p1 = p - v1\n p2 = p - v2\n p3 = p - v3\n\n dists = []\n dists.append(_compute_edge_dist(v21[type_1], p1[type_1]).view(-1))\n dists.append(_compute_edge_dist(v32[type_2], p2[type_2]).view(-1))\n dists.append(_compute_edge_dist(v13[type_3], p3[type_3]).view(-1))\n\n if len(np.where(type_4)[0]) > 0:\n nor = torch.cross(v21[type_4], v13[type_4])\n dists.append(_compute_planar_dist(nor, p1[type_4]))\n\n distances = torch.cat(dists)\n return distances\n\ndef normal_consistency_face(pred_trimesh, gt_Mesh, gt_trimesh):\n \"\"\"\n :param pred: predicted trimesh\n :param gt_Mesh: GT mesh in psbody.mesh.Mesh\n :param gt trimesh: GT mesh trimesh\n \"\"\"\n pred_vertices = np.array(pred_trimesh.vertices)\n pred_normals = np.array(pred_trimesh.vertex_normals)\n closest_face, _ = gt_Mesh.closest_faces_and_points(pred_vertices)\n gt_normals = np.array(gt_trimesh.face_normals[closest_face.ravel()])\n consistency = np.linalg.norm(pred_normals - gt_normals, axis=-1).mean()\n\n return consistency\n\ndef normal_consistency_vertex(pred_trimesh, gt_trimesh, part_mask):\n \"\"\"\n :param pred: predicted trimesh\n :param gt trimesh: GT mesh trimesh\n \"\"\"\n pred_vertices = np.array(pred_trimesh.vertices)\n pred_normals = np.array(pred_trimesh.vertex_normals)\n\n gt_vertices = np.array(gt_trimesh.vertices)[part_mask, ...].copy()\n gt_normals = np.array(gt_trimesh.vertex_normals)[part_mask, ...].copy()\n\n from scipy.spatial import cKDTree as KDTree\n # kdtree = KDTree(gt_vertices)\n # _, ind = kdtree.query(pred_vertices)\n\n # gt_normals = gt_normals[ind, :]\n\n kdtree = KDTree(pred_vertices)\n _, ind = kdtree.query(gt_vertices)\n\n pred_normals = pred_normals[ind, :]\n\n consistency = 1 - np.linalg.norm(pred_normals - gt_normals, axis=-1).mean()\n\n return consistency\n\ndef chamfer_l1_distance(s1, s2, w1=1., w2=1.):\n \"\"\"\n :param s1: B x N x 3\n :param s2: B x M x 3\n :param w1: weight for distance from s1 to s2\n :param w2: weight for distance from s2 to s1\n \"\"\"\n from kaolin.metrics.point import SidedDistance\n\n assert s1.is_cuda and s2.is_cuda\n sided_minimum_dist = SidedDistance()\n closest_index_in_s2 = sided_minimum_dist(s1, s2)\n closest_index_in_s1 = sided_minimum_dist(s2, s1)\n closest_s2 = batch_gather(s2, closest_index_in_s2)\n closest_s1 = batch_gather(s1, closest_index_in_s1)\n\n dist_to_s2 = torch.abs(s1 - closest_s2).sum(-1).mean() * w1\n dist_to_s1 = torch.abs(s2 - closest_s1).sum(-1).mean() * w2\n\n return dist_to_s2 + dist_to_s1\n\ndef chamfer_l2_distance(s1, s2, w1=1., w2=1.):\n \"\"\"\n :param s1: B x N x 3\n :param s2: B x M x 3\n :param w1: weight for distance from s1 to s2\n :param w2: weight for distance from s2 to s1\n \"\"\"\n from kaolin.metrics.point import SidedDistance\n\n assert s1.is_cuda and s2.is_cuda\n sided_minimum_dist = SidedDistance()\n closest_index_in_s2 = sided_minimum_dist(s1, s2)\n closest_index_in_s1 = sided_minimum_dist(s2, s1)\n closest_s2 = batch_gather(s2, closest_index_in_s2)\n closest_s1 = batch_gather(s1, closest_index_in_s1)\n\n dist_to_s2 = torch.norm(s1 - closest_s2, dim=-1)\n dist_to_s1 = torch.norm(s2 - closest_s1, dim=-1)\n\n return dist_to_s2.mean() * w1 + dist_to_s1.mean() * w2, dist_to_s2, dist_to_s1, closest_index_in_s2, closest_index_in_s1\n\ndef chamfer_distance(s1, s2, w1=1., w2=1.):\n \"\"\"\n :param s1: B x N x 3\n :param s2: B x M x 3\n :param w1: weight for distance from s1 to s2\n :param w2: weight for distance from s2 to s1\n \"\"\"\n from kaolin.metrics.point import SidedDistance\n\n assert s1.is_cuda and s2.is_cuda\n sided_minimum_dist = SidedDistance()\n closest_index_in_s2 = sided_minimum_dist(s1, s2)\n closest_index_in_s1 = sided_minimum_dist(s2, s1)\n closest_s2 = batch_gather(s2, closest_index_in_s2)\n closest_s1 = batch_gather(s1, closest_index_in_s1)\n\n dist_to_s2 = (((s1 - closest_s2) ** 2).sum(dim=-1)).mean() * w1\n dist_to_s1 = (((s2 - closest_s1) ** 2).sum(dim=-1)).mean() * w2\n\n return dist_to_s2 + dist_to_s1\n\ndef batch_point_to_surface(points, meshes):\n \"\"\"\n Naive implementation. Just loops over the set of points and meshes.\n This is a bit tricky to batch-ify because number of points and\n mesh structure could be different for each entry in the batch.\n \"\"\"\n distance = [point_to_surface(p, m) for p, m in zip(points, meshes)]\n return torch.stack(distance)\n\ndef batch_point_to_surface_vec(points, meshes):\n distance = [point_to_surface_vec(p, m) for p, m in zip(points, meshes)]\n return torch.stack(distance)\n\ndef batch_point_to_surface_vec_signed(meshes, points):\n prelu = torch.nn.PReLU(init=25. *25.).cuda()\n dist = []\n for m, p in zip(meshes, points):\n dist_val = point_to_surface_vec(p, m)\n sign_val = torch.ones_like(dist_val)\n sign_bool = sdf.check_sign(m,p)[0] == 0\n sign_val[sign_bool] = -1.\n signed_dist = prelu(sign_val*dist_val)\n dist.append(torch.mean(signed_dist*signed_dist))\n\n return torch.stack(dist)\n", "import numpy as np\nfrom PIL import Image\nimport os\nimport io\nimport json\nimport glob\n\nfrom torchmeta.utils.data import Dataset, ClassDataset, CombinationMetaDataset\nfrom torchvision.datasets.utils import download_url\nfrom torchmeta.datasets.utils import get_asset\n\n\nclass CUB(CombinationMetaDataset):\n \"\"\"\n The Caltech-UCSD Birds dataset, introduced in [1]. This dataset is based on\n images from 200 species of birds from the Caltech-UCSD Birds dataset [2].\n\n Parameters\n ----------\n root : string\n Root directory where the dataset folder `cub` exists.\n\n num_classes_per_task : int\n Number of classes per tasks. This corresponds to \"N\" in \"N-way\"\n classification.\n\n meta_train : bool (default: `False`)\n Use the meta-train split of the dataset. If set to `True`, then the\n arguments `meta_val` and `meta_test` must be set to `False`. Exactly one\n of these three arguments must be set to `True`.\n\n meta_val : bool (default: `False`)\n Use the meta-validation split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_test` must be set to `False`. Exactly one\n of these three arguments must be set to `True`.\n\n meta_test : bool (default: `False`)\n Use the meta-test split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_val` must be set to `False`. Exactly one\n of these three arguments must be set to `True`.\n\n meta_split : string in {'train', 'val', 'test'}, optional\n Name of the split to use. This overrides the arguments `meta_train`,\n `meta_val` and `meta_test` if all three are set to `False`.\n\n transform : callable, optional\n A function/transform that takes a `PIL` image, and returns a transformed\n version. See also `torchvision.transforms`.\n\n target_transform : callable, optional\n A function/transform that takes a target, and returns a transformed\n version. See also `torchvision.transforms`.\n\n dataset_transform : callable, optional\n A function/transform that takes a dataset (ie. a task), and returns a\n transformed version of it. E.g. `torchmeta.transforms.ClassSplitter()`.\n\n class_augmentations : list of callable, optional\n A list of functions that augment the dataset with new classes. These classes\n are transformations of existing classes. E.g.\n `torchmeta.transforms.HorizontalFlip()`.\n\n download : bool (default: `False`)\n If `True`, downloads the pickle files and processes the dataset in the root\n directory (under the `cub` folder). If the dataset is already\n available, this does not download/process the dataset again.\n\n Notes\n -----\n The dataset is downloaded from [2]. The dataset contains images from 200\n classes. The meta train/validation/test splits are over 100/50/50 classes.\n The splits are taken from [3] ([code](https://github.com/wyharveychen/CloserLookFewShot)\n for reproducibility).\n\n References\n ----------\n .. [1] Hilliard, N., Phillips, L., Howland, S., Yankov, A., Corley, C. D.,\n Hodas, N. O. (2018). Few-Shot Learning with Metric-Agnostic Conditional\n Embeddings. (https://arxiv.org/abs/1802.04376)\n .. [2] Wah, C., Branson, S., Welinder, P., Perona, P., Belongie, S. (2011).\n The Caltech-UCSD Birds-200-2011 Dataset\n (http://www.vision.caltech.edu/visipedia/CUB-200-2011.html)\n .. [3] Chen, W., Liu, Y. and Kira, Z. and Wang, Y. and Huang, J. (2019).\n A Closer Look at Few-shot Classification. International Conference on\n Learning Representations (https://openreview.net/forum?id=HkxLXnAcFQ)\n\n \"\"\"\n def __init__(self, root, num_classes_per_task=None, meta_train=False,\n meta_val=False, meta_test=False, meta_split=None,\n transform=None, target_transform=None, dataset_transform=None,\n class_augmentations=None, download=False):\n dataset = CUBClassDataset(root, meta_train=meta_train, meta_val=meta_val,\n meta_test=meta_test, meta_split=meta_split, transform=transform,\n class_augmentations=class_augmentations, download=download)\n super(CUB, self).__init__(dataset, num_classes_per_task,\n target_transform=target_transform, dataset_transform=dataset_transform)\n\n\nclass CUBClassDataset(ClassDataset):\n folder = 'cub'\n download_url = 'http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz'\n tgz_md5 = '97eceeb196236b17998738112f37df78'\n image_folder = 'CUB_200_2011/images'\n\n filename = '{0}_data.hdf5'\n filename_labels = '{0}_labels.json'\n\n def __init__(self, root, meta_train=False, meta_val=False, meta_test=False,\n meta_split=None, transform=None, class_augmentations=None,\n download=False):\n super(CUBClassDataset, self).__init__(meta_train=meta_train,\n meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,\n class_augmentations=class_augmentations)\n\n self.root = os.path.join(os.path.expanduser(root), self.folder)\n self.transform = transform\n\n self.split_filename = os.path.join(self.root,\n self.filename.format(self.meta_split))\n self.split_filename_labels = os.path.join(self.root,\n self.filename_labels.format(self.meta_split))\n\n self._data_file = None\n self._data = None\n self._labels = None\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError('CUB integrity check failed')\n self._num_classes = len(self.labels)\n\n def __getitem__(self, index):\n label = self.labels[index % self.num_classes]\n data = self.data[label]\n transform = self.get_transform(index, self.transform)\n target_transform = self.get_target_transform(index)\n\n return CUBDataset(index, data, label, transform=transform,\n target_transform=target_transform)\n\n @property\n def num_classes(self):\n return self._num_classes\n\n @property\n def data(self):\n if self._data is None:\n self._data_file = h5py.File(self.split_filename, 'r')\n self._data = self._data_file['datasets']\n return self._data\n\n @property\n def labels(self):\n if self._labels is None:\n with open(self.split_filename_labels, 'r') as f:\n self._labels = json.load(f)\n return self._labels\n\n def _check_integrity(self):\n return (os.path.isfile(self.split_filename)\n and os.path.isfile(self.split_filename_labels))\n\n def close(self):\n if self._data_file is not None:\n self._data_file.close()\n self._data_file = None\n self._data = None\n\n def download(self):\n import tarfile\n import shutil\n import glob\n from tqdm import tqdm\n\n if self._check_integrity():\n return\n\n filename = os.path.basename(self.download_url)\n download_url(self.download_url, self.root, filename, self.tgz_md5)\n\n tgz_filename = os.path.join(self.root, filename)\n with tarfile.open(tgz_filename, 'r') as f:\n f.extractall(self.root)\n image_folder = os.path.join(self.root, self.image_folder)\n\n for split in ['train', 'val', 'test']:\n filename = os.path.join(self.root, self.filename.format(split))\n if os.path.isfile(filename):\n continue\n\n labels = get_asset(self.folder, '{0}.json'.format(split))\n labels_filename = os.path.join(self.root, self.filename_labels.format(split))\n with open(labels_filename, 'w') as f:\n json.dump(labels, f)\n\n with h5py.File(filename, 'w') as f:\n group = f.create_group('datasets')\n dtype = h5py.special_dtype(vlen=np.uint8)\n for i, label in enumerate(tqdm(labels, desc=filename)):\n images = glob.glob(os.path.join(image_folder, label, '*.jpg'))\n images.sort()\n dataset = group.create_dataset(label, (len(images),), dtype=dtype)\n for i, image in enumerate(images):\n with open(image, 'rb') as f:\n array = bytearray(f.read())\n dataset[i] = np.asarray(array, dtype=np.uint8)\n\n tar_folder, _ = os.path.splitext(tgz_filename)\n if os.path.isdir(tar_folder):\n shutil.rmtree(tar_folder)\n\n attributes_filename = os.path.join(self.root, 'attributes.txt')\n if os.path.isfile(attributes_filename):\n os.remove(attributes_filename)\n\n\nclass CUBDataset(Dataset):\n def __init__(self, index, data, label,\n transform=None, target_transform=None):\n super(CUBDataset, self).__init__(index, transform=transform,\n target_transform=target_transform)\n self.data = data\n self.label = label\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n image = Image.open(io.BytesIO(self.data[index])).convert('RGB')\n target = self.label\n\n if self.transform is not None:\n image = self.transform(image)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return (image, target)\n", "import numpy as np\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\n\nclass BaseTrainer(object):\n ''' Base trainer class.\n '''\n\n def evaluate(self, val_loader, val_dataset=None):\n ''' Performs an evaluation.\n Args:\n val_loader (dataloader): pytorch dataloader\n '''\n eval_list = defaultdict(list)\n\n for data in tqdm(val_loader):\n if val_dataset is not None:\n idx = data['idx'].numpy().reshape([-1]).astype(np.int64)\n if idx.size == 1:\n model_dict = val_dataset.get_model_dict(idx[0])\n else:\n model_dict = None\n else:\n model_dict = None\n\n eval_step_dict = self.eval_step(data, model_dict=model_dict)\n\n for k, v in eval_step_dict.items():\n eval_list[k].append(v)\n\n eval_dict = {k: np.mean(v) for k, v in eval_list.items()}\n return eval_dict\n\n def train_step(self, *args, **kwargs):\n ''' Performs a training step.\n '''\n raise NotImplementedError\n\n def eval_step(self, *args, **kwargs):\n ''' Performs an evaluation step.\n '''\n raise NotImplementedError\n\n def visualize(self, *args, **kwargs):\n ''' Performs visualization.\n '''\n raise NotImplementedError\n" ]
[ [ "torch.mean", "torch.abs", "torch.norm", "torch.ones_like", "torch.cat", "torch.nn.PReLU", "numpy.linalg.norm", "torch.stack", "numpy.array", "numpy.where", "scipy.spatial.cKDTree", "torch.cross" ], [ "numpy.asarray" ], [ "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ddrake/convex_m
[ "6e506133c03bb1e0cf38143a907ac595082d524c" ]
[ "admm/check_output.py" ]
[ "from matplotlib import pyplot as plt\nfrom mat_util import *\n\ndatadir = 'from_coeus/data'\nnxs = load('nxs', datadir)\nfs = load('fs', datadir)\n\nplt.plot(fs)\nplt.xlabel(\"Steps\")\nplt.ylabel(\"Objective Function Values\")\nplt.title(\"Convergence of Objective Function\")\nplt.show()\n\ninput(\"press a key\")\n\nplt.plot(nxs)\nplt.xlabel(\"Steps\")\nplt.ylabel(\"1-Norm of x Values\")\nplt.title(\"Convergence of x in 1-Norm\")\nplt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jgori-ouistiti/CoopIHC-zoo
[ "5efb1e1f9e773d793b4634b4671124d43aae3e7d" ]
[ "coopihczoo/eye/tests/test_PlIE.py" ]
[ "from coopihc.space.Space import Space\nfrom coopihc.space.State import State\nfrom coopihc.space.StateElement import StateElement\nfrom coopihc.space.utils import autospace\n\nfrom coopihczoo.eye.utils import ProvideLikelihoodInferenceEngine\n\nimport numpy\n\ngame_state = State(\n game_info=State(\n turn_index=StateElement(\n numpy.array([0]), autospace([0, 1, 2, 3]), out_of_bounds_mode=\"raw\"\n ),\n round_index=StateElement(\n numpy.array([0]), autospace([0, 1]), out_of_bounds_mode=\"raw\"\n ),\n ),\n task_state=State(\n target=StateElement(\n numpy.array([[-0.30282614]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"clip\",\n ),\n fixation=StateElement(\n numpy.array([[0.0]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"clip\",\n ),\n ),\n user_state=State(\n **{\n \"belief-mu\": StateElement(\n numpy.array([[0.0]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"warning\",\n ),\n \"belief-sigma\": StateElement(\n numpy.array([[1000.0]]),\n autospace([[[-numpy.inf]], [[numpy.inf]]]),\n out_of_bounds_mode=\"warning\",\n ),\n \"y\": StateElement(\n numpy.array([[0.0]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"warning\",\n ),\n \"Sigma_0\": StateElement(\n numpy.array([[1000.0]]),\n autospace([[[-numpy.inf]], [[numpy.inf]]]),\n out_of_bounds_mode=\"warning\",\n ),\n }\n ),\n assistant_state=State(),\n user_action=State(\n action=StateElement(\n numpy.array([[0.15020657]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"warning\",\n )\n ),\n assistant_action=State(\n action=StateElement(\n numpy.array([1]), autospace([0, 1]), out_of_bounds_mode=\"warning\"\n )\n ),\n)\n\nprint(game_state)\n\n\nclass Test(ProvideLikelihoodInferenceEngine):\n def __init__(self, noise_level, observation, *args, **kwargs):\n class Host:\n pass\n\n super().__init__(noise_level, *args, **kwargs)\n self.host = Host()\n self.host.role = \"user\"\n self.buffer = [observation]\n\n\ninference_engine = Test(0.5, game_state)\nstate, reward = inference_engine.infer()\nprint(state)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RPGroup-PBoC/mwc_activation
[ "6ef3f02a53ecd80877082006ecc4b8fe4204c1d6", "6ef3f02a53ecd80877082006ecc4b8fe4204c1d6" ]
[ "act/_fit_bivariate_normal_AstroML.py", "code/processing/20190206_r1_28yfp_xan_titration/viz.py" ]
[ "\"\"\"\nNotes:\n These functions were used with modification from the astroML python\n function `fit_bivariate_normal` which can be found [here]\n (http://www.astroml.org/book_figures/chapter3/fig_robust_pca.html). In\n this modified version, the percentiles are computed using the numpy\n function `nanpercentile`.\nLicense:\n These functions were borrowed from the AstroML library in agreement with\n their 3-clause BSD license. These functions retain this 3-clause BSD\n licensing and are the copyright of the owners.\n\"\"\"\nimport numpy as np\nfrom scipy import stats\n\n#from scipy.special import erfinv\n#sigmaG_factor = 1. / (2 * np.sqrt(2) * erfinv(0.5))\nsigmaG_factor = 0.74130110925280102\n\n\ndef mean_sigma(a, axis=None, dtype=None, ddof=0, keepdims=False):\n \"\"\"\n Compute mean and standard deviation for an array\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose mean is desired. If `a` is not an\n array, a conversion is attempted.\n axis : int, optional\n Axis along which the means are computed. The default is to compute\n the mean of the flattened array.\n dtype : dtype, optional\n Type to use in computing the standard deviation. For arrays of\n integer type the default is float64, for arrays of float types it is\n the same as the array type.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n mu : ndarray, see dtype parameter above\n array containing the mean values\n sigma : ndarray, see dtype parameter above.\n array containing the standard deviation\n\n See Also\n --------\n median_sigmaG : robust rank-based version of this calculation.\n Notes\n\n -----\n This routine simply calls ``np.mean`` and ``np.std``, passing the\n keyword arguments to them. It is provided for ease of comparison\n with the function median_sigmaG()\n \"\"\"\n mu = np.mean(a, axis=axis, dtype=dtype)\n sigma = np.std(a, axis=axis, dtype=dtype, ddof=ddof)\n\n if keepdims:\n if axis is None:\n newshape = a.ndim * (1,)\n else:\n newshape = np.asarray(a.shape)\n newshape[axis] = 1\n\n mu = mu.reshape(newshape)\n sigma = sigma.reshape(newshape)\n\n return mu, sigma\n\n\ndef median_sigmaG(a, axis=None, overwrite_input=False, keepdims=False):\n \"\"\"\n Compute median and rank-based estimate of the standard deviation\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose mean is desired. If `a` is not an\n array, a conversion is attempted.\n axis : int, optional\n Axis along which the means are computed. The default is to compute\n the mean of the flattened array.\n overwrite_input : bool, optional\n If True, then allow use of memory of input array `a` for\n calculations. The input array will be modified by the call to\n median. This will save memory when you do not need to preserve\n the contents of the input array. Treat the input as undefined,\n but it will probably be fully or partially sorted.\n Default is False. Note that, if `overwrite_input` is True and the\n input is not already an array, an error will be raised.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n median : ndarray, see dtype parameter above\n array containing the median values\n sigmaG : ndarray, see dtype parameter above.\n array containing the robust estimator of the standard deviation\n\n See Also\n --------\n mean_sigma : non-robust version of this calculation\n sigmaG : robust rank-based estimate of standard deviation\n\n Notes\n -----\n This routine uses a single call to ``np.nanpercentile`` to find the\n quartiles along the given axis, and uses these to compute the\n median and sigmaG:\n median = q50\n sigmaG = (q75 - q25) * 0.7413\n where 0.7413 ~ 1 / (2 sqrt(2) erf^-1(0.5))\n \"\"\"\n q25, median, q75 = np.nanpercentile(a, [25, 50, 75],\n axis=axis,\n overwrite_input=overwrite_input)\n sigmaG = sigmaG_factor * (q75 - q25)\n\n if keepdims:\n if axis is None:\n newshape = a.ndim * (1,)\n else:\n newshape = np.asarray(a.shape)\n newshape[axis] = 1\n\n median = median.reshape(newshape)\n sigmaG = sigmaG.reshape(newshape)\n\n return median, sigmaG\n\n\ndef sigmaG(a, axis=None, overwrite_input=False, keepdims=False):\n \"\"\"\n Compute the rank-based estimate of the standard deviation\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose mean is desired. If `a` is not an\n array, a conversion is attempted.\n axis : int, optional\n Axis along which the means are computed. The default is to compute\n the mean of the flattened array.\n overwrite_input : bool, optional\n If True, then allow use of memory of input array `a` for\n calculations. The input array will be modified by the call to\n median. This will save memory when you do not need to preserve\n the contents of the input array. Treat the input as undefined,\n but it will probably be fully or partially sorted.\n Default is False. Note that, if `overwrite_input` is True and the\n input is not already an array, an error will be raised.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n median : ndarray, see dtype parameter above\n array containing the median values\n sigmaG : ndarray, see dtype parameter above.\n array containing the robust estimator of the standard deviation\n\n See Also\n --------\n median_sigmaG : robust rank-based estimate of mean and standard deviation\n\n Notes\n -----\n This routine uses a single call to ``np.nanpercentile`` to find the\n quartiles along the given axis, and uses these to compute the\n sigmaG, a robust estimate of the standard deviation sigma:\n sigmaG = 0.7413 * (q75 - q25)\n where 0.7413 ~ 1 / (2 sqrt(2) erf^-1(0.5))\n \"\"\"\n q25, q75 = np.nanpercentile(a, [25, 75],\n axis=axis,\n overwrite_input=overwrite_input)\n sigmaG = sigmaG_factor * (q75 - q25)\n\n if keepdims:\n if axis is None:\n newshape = a.ndim * (1,)\n else:\n newshape = np.asarray(a.shape)\n newshape[axis] = 1\n\n sigmaG = sigmaG.reshape(newshape)\n\n return sigmaG\n\n\ndef fit_bivariate_normal(x, y, robust=False):\n \"\"\"\n Fit bivariate normal parameters to a 2D distribution of points\n\n Parameters\n ----------\n x, y : array_like\n The x, y coordinates of the points\n robust : boolean (optional, default=False)\n If True, then use rank-based statistics which are robust to outliers\n Otherwise, use mean/std statistics which are not robust\n\n Returns\n -------\n mu : tuple\n (x, y) location of the best-fit bivariate normal\n sigma_1, sigma_2 : float\n The best-fit gaussian widths in the uncorrelated frame\n alpha : float\n The rotation angle in radians of the uncorrelated frame\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n\n assert x.shape == y.shape\n\n if robust:\n # use quartiles to compute center and spread\n med_x, sigmaG_x = median_sigmaG(x)\n med_y, sigmaG_y = median_sigmaG(y)\n\n # define the principal variables from Shevlyakov & Smirnov (2011)\n sx = 2 * sigmaG_x\n sy = 2 * sigmaG_y\n\n u = (x / sx + y / sy) / np.sqrt(2)\n v = (x / sx - y / sy) / np.sqrt(2)\n\n med_u, sigmaG_u = median_sigmaG(u)\n med_v, sigmaG_v = median_sigmaG(v)\n\n r_xy = ((sigmaG_u ** 2 - sigmaG_v ** 2) /\n (sigmaG_u ** 2 + sigmaG_v ** 2))\n\n # rename estimators\n mu_x, mu_y = med_x, med_y\n sigma_x, sigma_y = sigmaG_x, sigmaG_y\n else:\n mu_x = np.mean(x)\n sigma_x = np.std(x)\n\n mu_y = np.mean(y)\n sigma_y = np.std(y)\n\n r_xy = stats.pearsonr(x, y)[0]\n\n # We need to use the full (-180, 180) version of arctan: this is\n # np.arctan2(x, y) = np.arctan(x / y), modulo 180 degrees\n sigma_xy = r_xy * sigma_x * sigma_y\n alpha = 0.5 * np.arctan2(2 * sigma_xy, sigma_x ** 2 - sigma_y ** 2)\n\n sigma1 = np.sqrt((0.5 * (sigma_x ** 2 + sigma_y ** 2) +\n np.sqrt(0.25 * (sigma_x ** 2 - sigma_y ** 2) ** 2 +\n sigma_xy ** 2)))\n sigma2 = np.sqrt((0.5 * (sigma_x ** 2 + sigma_y ** 2) -\n np.sqrt(0.25 * (sigma_x ** 2 - sigma_y ** 2) ** 2 +\n sigma_xy ** 2)))\n\n return [mu_x, mu_y], sigma1, sigma2, alpha\n", "# -*- coding: utf-8 -*-\nimport sys\nsys.path.insert(0, '../../../')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport act.viz\nimport joypy\nimport glob\ncolors = act.viz.personal_style()\n\n# Define the experimental parameters. \nDATE = '20190206'\nRUN_NO = 1\npromoter = '28yfp'\n\n# Load the fold-change data\nfc_data = pd.read_csv(f'output/{DATE}_r{RUN_NO}_{promoter}_fold_change.csv')\n\n# Define the list of all flow files.\ngated = glob.glob(f'../../../data/flow/csv/{DATE}_r{RUN_NO}_{promoter}_dilution*.csv')\n\n# Plot the mean fold-change. \nfig, ax = plt.subplots(1, 1)\n_fc = fc_data[fc_data['strain']=='dilution']\n_fc.sort_values(by=['xan_mgml'], inplace=True)\nax.plot(_fc['xan_mgml'], _fc['fold_change'], '--o')\nax.set_xlabel('xanthosine [mg/mL]')\nax.set_ylabel('fold-change')\nplt.tight_layout()\nplt.savefig('output/foldchange.png')\n\ndfs = []\nfor f in gated:\n _, _, _, _, _, xan = f.split('/')[-1].split('_')\n xan = float(xan.split('mgml')[0])\n data = pd.read_csv(f)\n data = data[data['gate']==1].copy()\n data['xan_mgml'] = xan\n dfs.append(data)\ndists = pd.concat(dfs)\n\n\n\n\n# Write my own ridgeline plot generator\nn_conc = len(dists['xan_mgml'].unique())\n\n\n# Set the bins \nbins = np.linspace(np.round(dists['FITC-H'].min()), np.round(dists['FITC-H'].max()), 100) \n\nfig, ax = plt.subplots(n_conc, 1, figsize=(3, 6), sharex=True)\naxes = {n:ax[i] for i, n in enumerate(np.sort(dists['xan_mgml'].unique()))}\naxes \nfor g, d in dists.groupby(['xan_mgml']):\n _ = axes[g].hist(d['FITC-H'], bins=bins, density=True)\n _ = axes[g].set_yticks([])\n _ = axes[g].set_ylabel(f'{g}')\n \nax[-1].set_xlabel('fluorescence [a.u.]')\nfor a in ax:\n a.set_xlim([0, 1E5])\n\nplt.tight_layout()\nfig.text(-0.05, 0.55, 'xanthosine [mg/mL]', fontsize=9, rotation='vertical',\n backgroundcolor='#f1f2f6')\nplt.savefig('output/distributions.png', bbox_inches='tight')\n" ]
[ [ "numpy.nanpercentile", "numpy.sqrt", "numpy.asarray", "scipy.stats.pearsonr", "numpy.arctan2", "numpy.std", "numpy.mean" ], [ "pandas.concat", "matplotlib.pyplot.tight_layout", "pandas.read_csv", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
syuqings/Fashion-MMT
[ "809c88cade3328176b202db543e686bce99ef76a" ]
[ "readers/data.py" ]
[ "from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport json\nimport numpy as np\nimport random\nimport pdb\nimport math\n\nfrom cytoolz import partition_all\nimport torch.utils.data\nfrom torch.utils.data import Sampler\n\nUNK, PAD, BOS, EOS, MASK = 0, 1, 2, 3, 4\n\n\nclass MMTDataset(torch.utils.data.Dataset):\n def __init__(self, config, split, img_max=10, src_max=36, tgt_max=72, task='mmt', _logger=None):\n super(MMTDataset, self).__init__()\n\n if _logger is None:\n self.print_fn = print\n else:\n self.print_fn = _logger.info\n\n self.names = np.load(config.name_file[split])\n self.anno = json.load(open(config.anno_file))\n self.src = open(config.src_txt[split], 'r', encoding='utf-8').readlines()\n self.trg = open(config.tgt_txt[split], 'r', encoding='utf-8').readlines()\n self.num_text = len(self.src)\n self.print_fn('text size %d' % self.num_text)\n\n self.lens = []\n if task == 'xmlm':\n for i in range(len(self.trg)):\n self.lens.append((len(self.trg[i].strip().split())+len(self.src[i].strip().split())+2)/2)\n elif task == 'mmt':\n for i in range(len(self.trg)):\n self.lens.append(len(self.trg[i].strip().split())+2)\n elif task in ['attp', 'itm']:\n for i in range(len(self.src)):\n self.lens.append(len(self.src[i].strip().split())+2)\n \n self.sim_img = json.load(open(config.sim_img[split]))\n self.stoi = json.load(open(config.word2int_file))\n self.itos = json.load(open(config.int2word_file))\n self.atoi = json.load(open(config.attr2int_file))\n self.ft_root = config.ft_root\n self.img_max = img_max\n self.src_max = src_max\n self.tgt_max = tgt_max\n self.is_train = True if split == 'trn' else False\n self.task = task\n\n def mask_and_pad_sent(self, x, id=None, lang='src'):\n max_len = self.src_max if lang == 'src' else self.tgt_max\n\n # masking input sequence\n if self.task == 'xmlm' or (self.task == 'mmt' and lang == 'trg'): # cross-lingual masking or adapt to MMT\n x, output_label = self.mask_sent(x[:max_len-1])\n elif self.task == 'attp':\n x, output_label = self.get_attr(x[:max_len-1], id)\n else:\n output_label = [PAD] * (max_len-1)\n\n # padding input sequence\n prob = random.random()\n if self.task == 'mmt' and lang == 'trg' and prob < 0.12:\n padded = [BOS] + x[:max_len-1] + [MASK] + [PAD] * max(0, max_len - len(x) - 2)\n output_label = [PAD] + output_label + [EOS] + [PAD] * max(0, max_len - len(x) - 2)\n elif self.task == 'attp':\n padded = [BOS] + x[:max_len-1] + [EOS] + [PAD] * max(0, max_len - len(x) - 2)\n else:\n padded = [BOS] + x[:max_len-1] + [EOS] + [PAD] * max(0, max_len - len(x) - 2)\n output_label = [PAD] + output_label + [PAD] + [PAD] * max(0, max_len - len(x) - 2)\n\n # truncate with the max length\n length = min(len(x)+2, max_len)\n padded = padded[:max_len]\n if self.task != 'attp':\n output_label = output_label[:max_len]\n return np.array(padded), np.array(output_label), length\n\n def random_mask(self, x, i, prob):\n # 80% randomly change token to mask token\n if prob < 0.8:\n x[i] = MASK\n # 10% randomly change token to random token\n elif prob < 0.9:\n x[i] = random.choice(list(range(len(self.stoi))))\n # -> rest 10% randomly keep current token\n return x\n\n def mask_sent(self, x):\n output_label = []\n for i, token in enumerate(x):\n prob = random.random()\n # mask normal token with 15% probability\n if prob < 0.15:\n prob /= 0.15\n x = self.random_mask(x, i, prob)\n output_label.append(token)\n else:\n # no masking token (will be ignored by loss function later)\n output_label.append(PAD)\n return x, output_label\n\n def get_attr(self, x, id):\n attrs = []\n output_label = [0.] * len(self.atoi)\n for attr in self.anno[id]['attr']:\n try:\n output_label[self.atoi[attr]] = 1.\n prob = random.random()\n if self.stoi[attr] in x:\n x = self.random_mask(x, x.index(self.stoi[attr]), prob)\n elif self.stoi[attr+'s'] in x:\n x = self.random_mask(x, x.index(self.stoi[attr+'s']), prob)\n elif self.stoi[attr+'es'] in x:\n x = self.random_mask(x, x.index(self.stoi[attr+'es']), prob)\n except:\n pass\n return x, output_label\n\n def sent2int(self, str_sent):\n int_sent = [self.stoi.get(w, UNK) for w in str_sent.split()]\n return int_sent\n\n def int2sent(self, batch):\n with torch.cuda.device_of(batch):\n batch = batch.tolist()\n batch = [[self.itos.get(str(ind), '<unk>') for ind in ex] for ex in batch] # denumericalize\n \n def trim(s, t):\n sentence = []\n for w in s:\n if w == t:\n break\n sentence.append(w)\n return sentence\n batch = [trim(ex, '<eos>') for ex in batch] # trim past frst eos\n\n def filter_special(tok):\n return tok not in ('<sos>', '<pad>', '<mask>')\n batch = [\" \".join(filter(filter_special, ex)).replace(\"@@ \", \"\") for ex in batch]\n return batch\n\n def __len__(self):\n return self.num_text\n\n def __getitem__(self, idx):\n outs = {}\n name = self.names[idx]\n img_ft = np.zeros(shape=[self.img_max, 2048], dtype=np.float32)\n for i, img in enumerate(self.anno[name]['images']):\n if i >= self.img_max:\n break\n img_ft[i] = np.load(os.path.join(self.ft_root, img+\".npy\"))[0]\n ft_len = min(self.img_max, len(self.anno[name]['images']))\n\n if self.task in ['xmlm', 'mmt']:\n src_id, src_label, src_len = self.mask_and_pad_sent(self.sent2int(self.src[idx].strip()), id=name, lang='src')\n trg_id, trg_label, trg_len = self.mask_and_pad_sent(self.sent2int(self.trg[idx].strip()), id=name, lang='trg')\n elif self.task == 'attp':\n src_id, src_label, src_len = self.mask_and_pad_sent(self.sent2int(self.src[idx].strip()), id=name, lang='src')\n trg_id = np.array([BOS])\n trg_len = 1\n elif self.task == 'itm':\n rep_prob = random.random()\n if rep_prob < 0.5:\n old_idx = idx\n idx = random.choice(self.sim_img[self.anno[name]['category']])\n if old_idx == idx:\n align_label = 1\n else:\n align_label = 0\n else:\n align_label = 1\n src_id, src_label, src_len = self.mask_and_pad_sent(self.sent2int(self.src[idx].strip()), id=name, lang='src')\n trg_id = np.array([BOS])\n trg_len = 1\n\n outs['ft_len'] = ft_len\n outs['img_ft'] = img_ft\n outs['src_ids'] = src_id\n outs['src_lens'] = src_len\n outs['trg_ids'] = trg_id\n outs['trg_lens'] = trg_len\n outs['ref_sents'] = self.trg[idx].strip()\n if self.task == 'itm':\n outs['align_label'] = align_label\n elif self.task == 'attp':\n outs['attr_label'] = src_label\n else:\n outs['output_label'] = np.concatenate([src_label, trg_label], axis=0)\n return outs\n\n\nclass TokenBucketSampler(Sampler):\n def __init__(self, lens, bucket_size, batch_size, droplast=False, size_multiple=8):\n self._lens = lens\n self._max_tok = batch_size\n self._bucket_size = bucket_size\n self._droplast = droplast\n self._size_mul = size_multiple\n\n def _create_ids(self):\n return list(range(len(self._lens)))\n\n def _sort_fn(self, i):\n return self._lens[i]\n\n def __iter__(self):\n ids = self._create_ids()\n random.shuffle(ids)\n buckets = [sorted(ids[i:i+self._bucket_size], key=self._sort_fn, reverse=True)\n for i in range(0, len(ids), self._bucket_size)]\n # fill batches until max_token (include padding)\n batches = []\n for bucket in buckets:\n max_len = 0\n batch_indices = []\n for indices in partition_all(self._size_mul, bucket):\n max_len = max(max_len, max(self._lens[i] for i in indices))\n if (max_len * (len(batch_indices) + self._size_mul)\n > self._max_tok):\n if not batch_indices:\n raise ValueError(\"max_tokens too small / max_seq_len too long\")\n assert len(batch_indices) % self._size_mul == 0\n batches.append(batch_indices)\n batch_indices = list(indices)\n max_len = max(self._lens[i] for i in indices)\n else:\n batch_indices.extend(indices)\n if not self._droplast and batch_indices:\n batches.append(batch_indices)\n random.shuffle(batches)\n return iter(batches)\n\n def __len__(self):\n raise ValueError(\"NOT supported. \")\n\n\nclass MetaLoader(object):\n \"\"\" wraps multiple data loaders \"\"\"\n def __init__(self, loaders, accum_steps=1):\n assert isinstance(loaders, dict)\n self.name2loader = {}\n self.name2iter = {}\n self.sampling_pools = []\n for n, l in loaders.items():\n if isinstance(l, tuple):\n l, r = l\n elif isinstance(l, torch.utils.data.DataLoader):\n r = 1\n else:\n raise ValueError()\n self.name2loader[n] = l\n self.name2iter[n] = iter(l)\n self.sampling_pools.extend([n]*r)\n self.accum_steps = accum_steps\n self.step = 0\n\n def __iter__(self):\n \"\"\" this iterator will run indefinitely \"\"\"\n task = self.sampling_pools[0]\n while True:\n if self.step % self.accum_steps == 0:\n task = random.choice(self.sampling_pools)\n self.step += 1\n iter_ = self.name2iter[task]\n try:\n batch = next(iter_)\n except StopIteration:\n iter_ = iter(self.name2loader[task])\n batch = next(iter_)\n self.name2iter[task] = iter_\n\n yield task, batch\n \n\n\n" ]
[ [ "numpy.concatenate", "numpy.load", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
workingyifei/display-pattern-generator
[ "9444dce96954c546333d5aecc92a06c3bfd19aa5", "9444dce96954c546333d5aecc92a06c3bfd19aa5", "b27be84c6221fa93833f283109870737b05bfbf6", "b27be84c6221fa93833f283109870737b05bfbf6", "9444dce96954c546333d5aecc92a06c3bfd19aa5", "9444dce96954c546333d5aecc92a06c3bfd19aa5" ]
[ "VENV/lib/python3.6/site-packages/pandas/tests/indexes/common.py", "VENV/lib/python3.6/site-packages/pandas/tests/series/test_api.py", "VENV/lib/python3.6/site-packages/numpy/testing/_private/utils.py", "VENV/lib/python3.6/site-packages/numpy/lib/histograms.py", "VENV/lib/python3.6/site-packages/pandas/tests/reshape/merge/test_merge.py", "VENV/lib/python3.6/site-packages/pandas/core/sparse/array.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom pandas import compat\nfrom pandas.compat import PY3\n\nimport numpy as np\n\nfrom pandas import (Series, Index, Float64Index, Int64Index, UInt64Index,\n RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex,\n TimedeltaIndex, PeriodIndex, IntervalIndex, isna)\nfrom pandas.core.indexes.base import InvalidIndexError\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\nfrom pandas.core.dtypes.common import needs_i8_conversion\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas._libs.tslib import iNaT\n\nimport pandas.util.testing as tm\n\nimport pandas as pd\n\n\nclass Base(object):\n \"\"\" base class for index sub-class tests \"\"\"\n _holder = None\n _compat_props = ['shape', 'ndim', 'size', 'nbytes']\n\n def setup_indices(self):\n for name, idx in self.indices.items():\n setattr(self, name, idx)\n\n def verify_pickle(self, indices):\n unpickled = tm.round_trip_pickle(indices)\n assert indices.equals(unpickled)\n\n def test_pickle_compat_construction(self):\n # this is testing for pickle compat\n if self._holder is None:\n return\n\n # need an object to create with\n pytest.raises(TypeError, self._holder)\n\n def test_to_series(self):\n # assert that we are creating a copy of the index\n\n idx = self.create_index()\n s = idx.to_series()\n assert s.values is not idx.values\n assert s.index is not idx\n assert s.name == idx.name\n\n def test_to_series_with_arguments(self):\n # GH18699\n\n # index kwarg\n idx = self.create_index()\n s = idx.to_series(index=idx)\n\n assert s.values is not idx.values\n assert s.index is idx\n assert s.name == idx.name\n\n # name kwarg\n idx = self.create_index()\n s = idx.to_series(name='__test')\n\n assert s.values is not idx.values\n assert s.index is not idx\n assert s.name != idx.name\n\n def test_to_frame(self):\n # see gh-15230\n idx = self.create_index()\n name = idx.name or 0\n\n df = idx.to_frame()\n\n assert df.index is idx\n assert len(df.columns) == 1\n assert df.columns[0] == name\n assert df[name].values is not idx.values\n\n df = idx.to_frame(index=False)\n assert df.index is not idx\n\n def test_shift(self):\n\n # GH8083 test the base class for shift\n idx = self.create_index()\n pytest.raises(NotImplementedError, idx.shift, 1)\n pytest.raises(NotImplementedError, idx.shift, 1, 2)\n\n def test_create_index_existing_name(self):\n\n # GH11193, when an existing index is passed, and a new name is not\n # specified, the new index should inherit the previous object name\n expected = self.create_index()\n if not isinstance(expected, MultiIndex):\n expected.name = 'foo'\n result = pd.Index(expected)\n tm.assert_index_equal(result, expected)\n\n result = pd.Index(expected, name='bar')\n expected.name = 'bar'\n tm.assert_index_equal(result, expected)\n else:\n expected.names = ['foo', 'bar']\n result = pd.Index(expected)\n tm.assert_index_equal(\n result, Index(Index([('foo', 'one'), ('foo', 'two'),\n ('bar', 'one'), ('baz', 'two'),\n ('qux', 'one'), ('qux', 'two')],\n dtype='object'),\n names=['foo', 'bar']))\n\n result = pd.Index(expected, names=['A', 'B'])\n tm.assert_index_equal(\n result,\n Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),\n ('baz', 'two'), ('qux', 'one'), ('qux', 'two')],\n dtype='object'), names=['A', 'B']))\n\n def test_numeric_compat(self):\n\n idx = self.create_index()\n tm.assert_raises_regex(TypeError, \"cannot perform __mul__\",\n lambda: idx * 1)\n tm.assert_raises_regex(TypeError, \"cannot perform __rmul__\",\n lambda: 1 * idx)\n\n div_err = \"cannot perform __truediv__\" if PY3 \\\n else \"cannot perform __div__\"\n tm.assert_raises_regex(TypeError, div_err, lambda: idx / 1)\n div_err = div_err.replace(' __', ' __r')\n tm.assert_raises_regex(TypeError, div_err, lambda: 1 / idx)\n tm.assert_raises_regex(TypeError, \"cannot perform __floordiv__\",\n lambda: idx // 1)\n tm.assert_raises_regex(TypeError, \"cannot perform __rfloordiv__\",\n lambda: 1 // idx)\n\n def test_logical_compat(self):\n idx = self.create_index()\n tm.assert_raises_regex(TypeError, 'cannot perform all',\n lambda: idx.all())\n tm.assert_raises_regex(TypeError, 'cannot perform any',\n lambda: idx.any())\n\n def test_boolean_context_compat(self):\n\n # boolean context compat\n idx = self.create_index()\n\n def f():\n if idx:\n pass\n\n tm.assert_raises_regex(ValueError, 'The truth value of a', f)\n\n def test_reindex_base(self):\n idx = self.create_index()\n expected = np.arange(idx.size, dtype=np.intp)\n\n actual = idx.get_indexer(idx)\n tm.assert_numpy_array_equal(expected, actual)\n\n with tm.assert_raises_regex(ValueError, 'Invalid fill method'):\n idx.get_indexer(idx, method='invalid')\n\n def test_get_indexer_consistency(self):\n # See GH 16819\n for name, index in self.indices.items():\n if isinstance(index, IntervalIndex):\n continue\n\n if index.is_unique or isinstance(index, CategoricalIndex):\n indexer = index.get_indexer(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n else:\n e = \"Reindexing only valid with uniquely valued Index objects\"\n with tm.assert_raises_regex(InvalidIndexError, e):\n indexer = index.get_indexer(index[0:2])\n\n indexer, _ = index.get_indexer_non_unique(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n\n def test_ndarray_compat_properties(self):\n idx = self.create_index()\n assert idx.T.equals(idx)\n assert idx.transpose().equals(idx)\n\n values = idx.values\n for prop in self._compat_props:\n assert getattr(idx, prop) == getattr(values, prop)\n\n # test for validity\n idx.nbytes\n idx.values.nbytes\n\n def test_repr_roundtrip(self):\n\n idx = self.create_index()\n tm.assert_index_equal(eval(repr(idx)), idx)\n\n def test_str(self):\n\n # test the string repr\n idx = self.create_index()\n idx.name = 'foo'\n assert \"'foo'\" in str(idx)\n assert idx.__class__.__name__ in str(idx)\n\n def test_dtype_str(self, indices):\n dtype = indices.dtype_str\n assert isinstance(dtype, compat.string_types)\n assert dtype == str(indices.dtype)\n\n def test_repr_max_seq_item_setting(self):\n # GH10182\n idx = self.create_index()\n idx = idx.repeat(50)\n with pd.option_context(\"display.max_seq_items\", None):\n repr(idx)\n assert '...' not in str(idx)\n\n def test_wrong_number_names(self, indices):\n def testit(ind):\n ind.names = [\"apple\", \"banana\", \"carrot\"]\n tm.assert_raises_regex(ValueError, \"^Length\", testit, indices)\n\n def test_set_name_methods(self, indices):\n new_name = \"This is the new name for this index\"\n\n # don't tests a MultiIndex here (as its tested separated)\n if isinstance(indices, MultiIndex):\n return\n original_name = indices.name\n new_ind = indices.set_names([new_name])\n assert new_ind.name == new_name\n assert indices.name == original_name\n res = indices.rename(new_name, inplace=True)\n\n # should return None\n assert res is None\n assert indices.name == new_name\n assert indices.names == [new_name]\n # with tm.assert_raises_regex(TypeError, \"list-like\"):\n # # should still fail even if it would be the right length\n # ind.set_names(\"a\")\n with tm.assert_raises_regex(ValueError, \"Level must be None\"):\n indices.set_names(\"a\", level=0)\n\n # rename in place just leaves tuples and other containers alone\n name = ('A', 'B')\n indices.rename(name, inplace=True)\n assert indices.name == name\n assert indices.names == [name]\n\n def test_hash_error(self, indices):\n index = indices\n tm.assert_raises_regex(TypeError, \"unhashable type: %r\" %\n type(index).__name__, hash, indices)\n\n def test_copy_name(self):\n # gh-12309: Check that the \"name\" argument\n # passed at initialization is honored.\n\n for name, index in compat.iteritems(self.indices):\n if isinstance(index, MultiIndex):\n continue\n\n first = index.__class__(index, copy=True, name='mario')\n second = first.__class__(first, copy=False)\n\n # Even though \"copy=False\", we want a new object.\n assert first is not second\n\n # Not using tm.assert_index_equal() since names differ.\n assert index.equals(first)\n\n assert first.name == 'mario'\n assert second.name == 'mario'\n\n s1 = Series(2, index=first)\n s2 = Series(3, index=second[:-1])\n\n if not isinstance(index, CategoricalIndex):\n # See gh-13365\n s3 = s1 * s2\n assert s3.index.name == 'mario'\n\n def test_ensure_copied_data(self):\n # Check the \"copy\" argument of each Index.__new__ is honoured\n # GH12309\n for name, index in compat.iteritems(self.indices):\n init_kwargs = {}\n if isinstance(index, PeriodIndex):\n # Needs \"freq\" specification:\n init_kwargs['freq'] = index.freq\n elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):\n # RangeIndex cannot be initialized from data\n # MultiIndex and CategoricalIndex are tested separately\n continue\n\n index_type = index.__class__\n result = index_type(index.values, copy=True, **init_kwargs)\n tm.assert_index_equal(index, result)\n tm.assert_numpy_array_equal(index.values, result.values,\n check_same='copy')\n\n if isinstance(index, PeriodIndex):\n # .values an object array of Period, thus copied\n result = index_type(ordinal=index.asi8, copy=False,\n **init_kwargs)\n tm.assert_numpy_array_equal(index._ndarray_values,\n result._ndarray_values,\n check_same='same')\n elif isinstance(index, IntervalIndex):\n # checked in test_interval.py\n pass\n else:\n result = index_type(index.values, copy=False, **init_kwargs)\n tm.assert_numpy_array_equal(index.values, result.values,\n check_same='same')\n tm.assert_numpy_array_equal(index._ndarray_values,\n result._ndarray_values,\n check_same='same')\n\n def test_copy_and_deepcopy(self, indices):\n from copy import copy, deepcopy\n\n if isinstance(indices, MultiIndex):\n return\n for func in (copy, deepcopy):\n idx_copy = func(indices)\n assert idx_copy is not indices\n assert idx_copy.equals(indices)\n\n new_copy = indices.copy(deep=True, name=\"banana\")\n assert new_copy.name == \"banana\"\n\n def test_duplicates(self, indices):\n if type(indices) is not self._holder:\n return\n if not len(indices) or isinstance(indices, MultiIndex):\n return\n idx = self._holder([indices[0]] * 5)\n assert not idx.is_unique\n assert idx.has_duplicates\n\n def test_unique(self, indices):\n # don't test a MultiIndex here (as its tested separated)\n # don't test a CategoricalIndex because categories change (GH 18291)\n if isinstance(indices, (MultiIndex, CategoricalIndex)):\n return\n\n # GH 17896\n expected = indices.drop_duplicates()\n for level in 0, indices.name, None:\n result = indices.unique(level=level)\n tm.assert_index_equal(result, expected)\n\n for level in 3, 'wrong':\n pytest.raises((IndexError, KeyError), indices.unique, level=level)\n\n def test_unique_na(self):\n idx = pd.Index([2, np.nan, 2, 1], name='my_index')\n expected = pd.Index([2, np.nan, 1], name='my_index')\n result = idx.unique()\n tm.assert_index_equal(result, expected)\n\n def test_get_unique_index(self, indices):\n # MultiIndex tested separately\n if not len(indices) or isinstance(indices, MultiIndex):\n return\n\n idx = indices[[0] * 5]\n idx_unique = indices[[0]]\n\n # We test against `idx_unique`, so first we make sure it's unique\n # and doesn't contain nans.\n assert idx_unique.is_unique\n try:\n assert not idx_unique.hasnans\n except NotImplementedError:\n pass\n\n for dropna in [False, True]:\n result = idx._get_unique_index(dropna=dropna)\n tm.assert_index_equal(result, idx_unique)\n\n # nans:\n if not indices._can_hold_na:\n return\n\n if needs_i8_conversion(indices):\n vals = indices.asi8[[0] * 5]\n vals[0] = iNaT\n else:\n vals = indices.values[[0] * 5]\n vals[0] = np.nan\n\n vals_unique = vals[:2]\n idx_nan = indices._shallow_copy(vals)\n idx_unique_nan = indices._shallow_copy(vals_unique)\n assert idx_unique_nan.is_unique\n\n assert idx_nan.dtype == indices.dtype\n assert idx_unique_nan.dtype == indices.dtype\n\n for dropna, expected in zip([False, True],\n [idx_unique_nan,\n idx_unique]):\n for i in [idx_nan, idx_unique_nan]:\n result = i._get_unique_index(dropna=dropna)\n tm.assert_index_equal(result, expected)\n\n def test_sort(self, indices):\n pytest.raises(TypeError, indices.sort)\n\n def test_mutability(self, indices):\n if not len(indices):\n return\n pytest.raises(TypeError, indices.__setitem__, 0, indices[0])\n\n def test_view(self, indices):\n assert indices.view().name == indices.name\n\n def test_compat(self, indices):\n assert indices.tolist() == list(indices)\n\n def test_memory_usage(self):\n for name, index in compat.iteritems(self.indices):\n result = index.memory_usage()\n if len(index):\n index.get_loc(index[0])\n result2 = index.memory_usage()\n result3 = index.memory_usage(deep=True)\n\n # RangeIndex, IntervalIndex\n # don't have engines\n if not isinstance(index, (RangeIndex, IntervalIndex)):\n assert result2 > result\n\n if index.inferred_type == 'object':\n assert result3 > result2\n\n else:\n\n # we report 0 for no-length\n assert result == 0\n\n def test_argsort(self):\n for k, ind in self.indices.items():\n\n # separately tested\n if k in ['catIndex']:\n continue\n\n result = ind.argsort()\n expected = np.array(ind).argsort()\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n def test_numpy_argsort(self):\n for k, ind in self.indices.items():\n result = np.argsort(ind)\n expected = ind.argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n # these are the only two types that perform\n # pandas compatibility input validation - the\n # rest already perform separate (or no) such\n # validation via their 'values' attribute as\n # defined in pandas.core.indexes/base.py - they\n # cannot be changed at the moment due to\n # backwards compatibility concerns\n if isinstance(type(ind), (CategoricalIndex, RangeIndex)):\n msg = \"the 'axis' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg,\n np.argsort, ind, axis=1)\n\n msg = \"the 'kind' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.argsort,\n ind, kind='mergesort')\n\n msg = \"the 'order' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.argsort,\n ind, order=('a', 'b'))\n\n def test_pickle(self, indices):\n self.verify_pickle(indices)\n original_name, indices.name = indices.name, 'foo'\n self.verify_pickle(indices)\n indices.name = original_name\n\n def test_take(self):\n indexer = [4, 3, 0, 2]\n for k, ind in self.indices.items():\n\n # separate\n if k in ['boolIndex', 'tuples', 'empty']:\n continue\n\n result = ind.take(indexer)\n expected = ind[indexer]\n assert result.equals(expected)\n\n if not isinstance(ind,\n (DatetimeIndex, PeriodIndex, TimedeltaIndex)):\n # GH 10791\n with pytest.raises(AttributeError):\n ind.freq\n\n def test_take_invalid_kwargs(self):\n idx = self.create_index()\n indices = [1, 2]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n tm.assert_raises_regex(TypeError, msg, idx.take,\n indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, mode='clip')\n\n def test_repeat(self):\n rep = 2\n i = self.create_index()\n expected = pd.Index(i.values.repeat(rep), name=i.name)\n tm.assert_index_equal(i.repeat(rep), expected)\n\n i = self.create_index()\n rep = np.arange(len(i))\n expected = pd.Index(i.values.repeat(rep), name=i.name)\n tm.assert_index_equal(i.repeat(rep), expected)\n\n def test_numpy_repeat(self):\n rep = 2\n i = self.create_index()\n expected = i.repeat(rep)\n tm.assert_index_equal(np.repeat(i, rep), expected)\n\n msg = \"the 'axis' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.repeat,\n i, rep, axis=0)\n\n @pytest.mark.parametrize('klass', [list, tuple, np.array, Series])\n def test_where(self, klass):\n i = self.create_index()\n\n cond = [True] * len(i)\n result = i.where(klass(cond))\n expected = i\n tm.assert_index_equal(result, expected)\n\n cond = [False] + [True] * len(i[1:])\n expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)\n result = i.where(klass(cond))\n tm.assert_index_equal(result, expected)\n\n def test_setops_errorcases(self):\n for name, idx in compat.iteritems(self.indices):\n # # non-iterable input\n cases = [0.5, 'xxx']\n methods = [idx.intersection, idx.union, idx.difference,\n idx.symmetric_difference]\n\n for method in methods:\n for case in cases:\n tm.assert_raises_regex(TypeError,\n \"Input must be Index \"\n \"or array-like\",\n method, case)\n\n def test_intersection_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[:5]\n second = idx[:3]\n intersect = first.intersection(second)\n\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n assert tm.equalContents(intersect, second)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assert_raises_regex(ValueError, msg):\n result = first.intersection(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.intersection(case)\n assert tm.equalContents(result, second)\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assert_raises_regex(TypeError, msg):\n result = first.intersection([1, 2, 3])\n\n def test_union_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[3:]\n second = idx[:5]\n everything = idx\n union = first.union(second)\n assert tm.equalContents(union, everything)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assert_raises_regex(ValueError, msg):\n result = first.union(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.union(case)\n assert tm.equalContents(result, everything)\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assert_raises_regex(TypeError, msg):\n result = first.union([1, 2, 3])\n\n def test_difference_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[2:]\n second = idx[:4]\n answer = idx[4:]\n result = first.difference(second)\n\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n assert tm.equalContents(result, answer)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assert_raises_regex(ValueError, msg):\n result = first.difference(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):\n assert result.__class__ == answer.__class__\n tm.assert_numpy_array_equal(result.sort_values().asi8,\n answer.sort_values().asi8)\n else:\n result = first.difference(case)\n assert tm.equalContents(result, answer)\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assert_raises_regex(TypeError, msg):\n result = first.difference([1, 2, 3])\n\n def test_symmetric_difference(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[1:]\n second = idx[:-1]\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n answer = idx[[0, -1]]\n result = first.symmetric_difference(second)\n assert tm.equalContents(result, answer)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assert_raises_regex(ValueError, msg):\n result = first.symmetric_difference(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.symmetric_difference(case)\n assert tm.equalContents(result, answer)\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assert_raises_regex(TypeError, msg):\n first.symmetric_difference([1, 2, 3])\n\n def test_insert_base(self):\n\n for name, idx in compat.iteritems(self.indices):\n result = idx[1:4]\n\n if not len(idx):\n continue\n\n # test 0th element\n assert idx[0:4].equals(result.insert(0, idx[0]))\n\n def test_delete_base(self):\n\n for name, idx in compat.iteritems(self.indices):\n\n if not len(idx):\n continue\n\n if isinstance(idx, RangeIndex):\n # tested in class\n continue\n\n expected = idx[1:]\n result = idx.delete(0)\n assert result.equals(expected)\n assert result.name == expected.name\n\n expected = idx[:-1]\n result = idx.delete(-1)\n assert result.equals(expected)\n assert result.name == expected.name\n\n with pytest.raises((IndexError, ValueError)):\n # either depending on numpy version\n result = idx.delete(len(idx))\n\n def test_equals(self):\n\n for name, idx in compat.iteritems(self.indices):\n assert idx.equals(idx)\n assert idx.equals(idx.copy())\n assert idx.equals(idx.astype(object))\n\n assert not idx.equals(list(idx))\n assert not idx.equals(np.array(idx))\n\n # Cannot pass in non-int64 dtype to RangeIndex\n if not isinstance(idx, RangeIndex):\n same_values = Index(idx, dtype=object)\n assert idx.equals(same_values)\n assert same_values.equals(idx)\n\n if idx.nlevels == 1:\n # do not test MultiIndex\n assert not idx.equals(pd.Series(idx))\n\n def test_equals_op(self):\n # GH9947, GH10637\n index_a = self.create_index()\n if isinstance(index_a, PeriodIndex):\n return\n\n n = len(index_a)\n index_b = index_a[0:-1]\n index_c = index_a[0:-1].append(index_a[-2:-1])\n index_d = index_a[0:1]\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == index_b\n expected1 = np.array([True] * n)\n expected2 = np.array([True] * (n - 1) + [False])\n tm.assert_numpy_array_equal(index_a == index_a, expected1)\n tm.assert_numpy_array_equal(index_a == index_c, expected2)\n\n # test comparisons with numpy arrays\n array_a = np.array(index_a)\n array_b = np.array(index_a[0:-1])\n array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))\n array_d = np.array(index_a[0:1])\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == array_b\n tm.assert_numpy_array_equal(index_a == array_a, expected1)\n tm.assert_numpy_array_equal(index_a == array_c, expected2)\n\n # test comparisons with Series\n series_a = Series(array_a)\n series_b = Series(array_b)\n series_c = Series(array_c)\n series_d = Series(array_d)\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == series_b\n\n tm.assert_numpy_array_equal(index_a == series_a, expected1)\n tm.assert_numpy_array_equal(index_a == series_c, expected2)\n\n # cases where length is 1 for one of them\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == index_d\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == series_d\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == array_d\n msg = \"Can only compare identically-labeled Series objects\"\n with tm.assert_raises_regex(ValueError, msg):\n series_a == series_d\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n series_a == array_d\n\n # comparing with a scalar should broadcast; note that we are excluding\n # MultiIndex because in this case each item in the index is a tuple of\n # length 2, and therefore is considered an array of length 2 in the\n # comparison instead of a scalar\n if not isinstance(index_a, MultiIndex):\n expected3 = np.array([False] * (len(index_a) - 2) + [True, False])\n # assuming the 2nd to last item is unique in the data\n item = index_a[-2]\n tm.assert_numpy_array_equal(index_a == item, expected3)\n tm.assert_series_equal(series_a == item, Series(expected3))\n\n def test_numpy_ufuncs(self):\n # test ufuncs of numpy 1.9.2. see:\n # http://docs.scipy.org/doc/numpy/reference/ufuncs.html\n\n # some functions are skipped because it may return different result\n # for unicode input depending on numpy version\n\n for name, idx in compat.iteritems(self.indices):\n for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,\n np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin,\n np.arccos, np.arctan, np.sinh, np.cosh, np.tanh,\n np.arcsinh, np.arccosh, np.arctanh, np.deg2rad,\n np.rad2deg]:\n if isinstance(idx, DatetimeIndexOpsMixin):\n # raise TypeError or ValueError (PeriodIndex)\n # PeriodIndex behavior should be changed in future version\n with pytest.raises(Exception):\n with np.errstate(all='ignore'):\n func(idx)\n elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):\n # coerces to float (e.g. np.sin)\n with np.errstate(all='ignore'):\n result = func(idx)\n exp = Index(func(idx.values), name=idx.name)\n\n tm.assert_index_equal(result, exp)\n assert isinstance(result, pd.Float64Index)\n else:\n # raise AttributeError or TypeError\n if len(idx) == 0:\n continue\n else:\n with pytest.raises(Exception):\n with np.errstate(all='ignore'):\n func(idx)\n\n for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:\n if isinstance(idx, DatetimeIndexOpsMixin):\n # raise TypeError or ValueError (PeriodIndex)\n with pytest.raises(Exception):\n func(idx)\n elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):\n # Results in bool array\n result = func(idx)\n assert isinstance(result, np.ndarray)\n assert not isinstance(result, Index)\n else:\n if len(idx) == 0:\n continue\n else:\n with pytest.raises(Exception):\n func(idx)\n\n def test_hasnans_isnans(self):\n # GH 11343, added tests for hasnans / isnans\n for name, index in self.indices.items():\n if isinstance(index, MultiIndex):\n pass\n else:\n idx = index.copy()\n\n # cases in indices doesn't include NaN\n expected = np.array([False] * len(idx), dtype=bool)\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert not idx.hasnans\n\n idx = index.copy()\n values = idx.values\n\n if len(index) == 0:\n continue\n elif isinstance(index, DatetimeIndexOpsMixin):\n values[1] = iNaT\n elif isinstance(index, (Int64Index, UInt64Index)):\n continue\n else:\n values[1] = np.nan\n\n if isinstance(index, PeriodIndex):\n idx = index.__class__(values, freq=index.freq)\n else:\n idx = index.__class__(values)\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans\n\n def test_fillna(self):\n # GH 11343\n for name, index in self.indices.items():\n if len(index) == 0:\n pass\n elif isinstance(index, MultiIndex):\n idx = index.copy()\n msg = \"isna is not defined for MultiIndex\"\n with tm.assert_raises_regex(NotImplementedError, msg):\n idx.fillna(idx[0])\n else:\n idx = index.copy()\n result = idx.fillna(idx[0])\n tm.assert_index_equal(result, idx)\n assert result is not idx\n\n msg = \"'value' must be a scalar, passed: \"\n with tm.assert_raises_regex(TypeError, msg):\n idx.fillna([idx[0]])\n\n idx = index.copy()\n values = idx.values\n\n if isinstance(index, DatetimeIndexOpsMixin):\n values[1] = iNaT\n elif isinstance(index, (Int64Index, UInt64Index)):\n continue\n else:\n values[1] = np.nan\n\n if isinstance(index, PeriodIndex):\n idx = index.__class__(values, freq=index.freq)\n else:\n idx = index.__class__(values)\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans\n\n def test_nulls(self):\n # this is really a smoke test for the methods\n # as these are adequately tested for function elsewhere\n\n for name, index in self.indices.items():\n if len(index) == 0:\n tm.assert_numpy_array_equal(\n index.isna(), np.array([], dtype=bool))\n elif isinstance(index, MultiIndex):\n idx = index.copy()\n msg = \"isna is not defined for MultiIndex\"\n with tm.assert_raises_regex(NotImplementedError, msg):\n idx.isna()\n else:\n\n if not index.hasnans:\n tm.assert_numpy_array_equal(\n index.isna(), np.zeros(len(index), dtype=bool))\n tm.assert_numpy_array_equal(\n index.notna(), np.ones(len(index), dtype=bool))\n else:\n result = isna(index)\n tm.assert_numpy_array_equal(index.isna(), result)\n tm.assert_numpy_array_equal(index.notna(), ~result)\n\n def test_empty(self):\n # GH 15270\n index = self.create_index()\n assert not index.empty\n assert index[:0].empty\n\n def test_join_self_unique(self, join_type):\n index = self.create_index()\n if index.is_unique:\n joined = index.join(index, how=join_type)\n assert (index == joined).all()\n\n def test_searchsorted_monotonic(self, indices):\n # GH17271\n # not implemented for tuple searches in MultiIndex\n # or Intervals searches in IntervalIndex\n if isinstance(indices, (MultiIndex, IntervalIndex)):\n return\n\n # nothing to test if the index is empty\n if indices.empty:\n return\n value = indices[0]\n\n # determine the expected results (handle dupes for 'right')\n expected_left, expected_right = 0, (indices == value).argmin()\n if expected_right == 0:\n # all values are the same, expected_right should be length\n expected_right = len(indices)\n\n # test _searchsorted_monotonic in all cases\n # test searchsorted only for increasing\n if indices.is_monotonic_increasing:\n ssm_left = indices._searchsorted_monotonic(value, side='left')\n assert expected_left == ssm_left\n\n ssm_right = indices._searchsorted_monotonic(value, side='right')\n assert expected_right == ssm_right\n\n ss_left = indices.searchsorted(value, side='left')\n assert expected_left == ss_left\n\n ss_right = indices.searchsorted(value, side='right')\n assert expected_right == ss_right\n\n elif indices.is_monotonic_decreasing:\n ssm_left = indices._searchsorted_monotonic(value, side='left')\n assert expected_left == ssm_left\n\n ssm_right = indices._searchsorted_monotonic(value, side='right')\n assert expected_right == ssm_right\n\n else:\n # non-monotonic should raise.\n with pytest.raises(ValueError):\n indices._searchsorted_monotonic(value, side='left')\n\n def test_map(self):\n # callable\n index = self.create_index()\n\n # we don't infer UInt64\n if isinstance(index, pd.UInt64Index):\n expected = index.astype('int64')\n else:\n expected = index\n\n result = index.map(lambda x: x)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"mapper\",\n [\n lambda values, index: {i: e for e, i in zip(values, index)},\n lambda values, index: pd.Series(values, index)])\n def test_map_dictlike(self, mapper):\n\n index = self.create_index()\n if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):\n pytest.skip(\"skipping tests for {}\".format(type(index)))\n\n identity = mapper(index.values, index)\n\n # we don't infer to UInt64 for a dict\n if isinstance(index, pd.UInt64Index) and isinstance(identity, dict):\n expected = index.astype('int64')\n else:\n expected = index\n\n result = index.map(identity)\n tm.assert_index_equal(result, expected)\n\n # empty mappable\n expected = pd.Index([np.nan] * len(index))\n result = index.map(mapper(expected, index))\n tm.assert_index_equal(result, expected)\n\n def test_putmask_with_wrong_mask(self):\n # GH18368\n index = self.create_index()\n\n with pytest.raises(ValueError):\n index.putmask(np.ones(len(index) + 1, np.bool), 1)\n\n with pytest.raises(ValueError):\n index.putmask(np.ones(len(index) - 1, np.bool), 1)\n\n with pytest.raises(ValueError):\n index.putmask('foo', 1)\n\n @pytest.mark.parametrize('copy', [True, False])\n @pytest.mark.parametrize('name', [None, 'foo'])\n @pytest.mark.parametrize('ordered', [True, False])\n def test_astype_category(self, copy, name, ordered):\n # GH 18630\n index = self.create_index()\n if name:\n index = index.rename(name)\n\n # standard categories\n dtype = CategoricalDtype(ordered=ordered)\n result = index.astype(dtype, copy=copy)\n expected = CategoricalIndex(index.values, name=name, ordered=ordered)\n tm.assert_index_equal(result, expected)\n\n # non-standard categories\n dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered)\n result = index.astype(dtype, copy=copy)\n expected = CategoricalIndex(index.values, name=name, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n if ordered is False:\n # dtype='category' defaults to ordered=False, so only test once\n result = index.astype('category', copy=copy)\n expected = CategoricalIndex(index.values, name=name)\n tm.assert_index_equal(result, expected)\n", "# coding=utf-8\n# pylint: disable-msg=E1101,W0612\nfrom collections import OrderedDict\nimport pydoc\n\nimport pytest\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandas import Index, Series, DataFrame, date_range\nfrom pandas.core.indexes.datetimes import Timestamp\n\nfrom pandas.compat import range, lzip, isidentifier, string_types\nfrom pandas import (compat, Categorical, period_range, timedelta_range,\n DatetimeIndex, PeriodIndex, TimedeltaIndex)\nimport pandas.io.formats.printing as printing\nfrom pandas.util.testing import (assert_series_equal,\n ensure_clean)\nimport pandas.util.testing as tm\n\nfrom .common import TestData\n\n\nclass SharedWithSparse(object):\n \"\"\"\n A collection of tests Series and SparseSeries can share.\n\n In generic tests on this class, use ``self._assert_series_equal()``\n which is implemented in sub-classes.\n \"\"\"\n def _assert_series_equal(self, left, right):\n \"\"\"Dispatch to series class dependent assertion\"\"\"\n raise NotImplementedError\n\n def test_scalarop_preserve_name(self):\n result = self.ts * 2\n assert result.name == self.ts.name\n\n def test_copy_name(self):\n result = self.ts.copy()\n assert result.name == self.ts.name\n\n def test_copy_index_name_checking(self):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n\n self.ts.index.name = None\n assert self.ts.index.name is None\n assert self.ts is self.ts\n\n cp = self.ts.copy()\n cp.index.name = 'foo'\n printing.pprint_thing(self.ts.index.name)\n assert self.ts.index.name is None\n\n def test_append_preserve_name(self):\n result = self.ts[:5].append(self.ts[5:])\n assert result.name == self.ts.name\n\n def test_binop_maybe_preserve_name(self):\n # names match, preserve\n result = self.ts * self.ts\n assert result.name == self.ts.name\n result = self.ts.mul(self.ts)\n assert result.name == self.ts.name\n\n result = self.ts * self.ts[:-2]\n assert result.name == self.ts.name\n\n # names don't match, don't preserve\n cp = self.ts.copy()\n cp.name = 'something else'\n result = self.ts + cp\n assert result.name is None\n result = self.ts.add(cp)\n assert result.name is None\n\n ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow']\n ops = ops + ['r' + op for op in ops]\n for op in ops:\n # names match, preserve\n s = self.ts.copy()\n result = getattr(s, op)(s)\n assert result.name == self.ts.name\n\n # names don't match, don't preserve\n cp = self.ts.copy()\n cp.name = 'changed'\n result = getattr(s, op)(cp)\n assert result.name is None\n\n def test_combine_first_name(self):\n result = self.ts.combine_first(self.ts[:5])\n assert result.name == self.ts.name\n\n def test_getitem_preserve_name(self):\n result = self.ts[self.ts > 0]\n assert result.name == self.ts.name\n\n result = self.ts[[0, 2, 4]]\n assert result.name == self.ts.name\n\n result = self.ts[5:10]\n assert result.name == self.ts.name\n\n def test_pickle(self):\n unp_series = self._pickle_roundtrip(self.series)\n unp_ts = self._pickle_roundtrip(self.ts)\n assert_series_equal(unp_series, self.series)\n assert_series_equal(unp_ts, self.ts)\n\n def _pickle_roundtrip(self, obj):\n\n with ensure_clean() as path:\n obj.to_pickle(path)\n unpickled = pd.read_pickle(path)\n return unpickled\n\n def test_argsort_preserve_name(self):\n result = self.ts.argsort()\n assert result.name == self.ts.name\n\n def test_sort_index_name(self):\n result = self.ts.sort_index(ascending=False)\n assert result.name == self.ts.name\n\n def test_to_sparse_pass_name(self):\n result = self.ts.to_sparse()\n assert result.name == self.ts.name\n\n def test_constructor_dict(self):\n d = {'a': 0., 'b': 1., 'c': 2.}\n result = self.series_klass(d)\n expected = self.series_klass(d, index=sorted(d.keys()))\n self._assert_series_equal(result, expected)\n\n result = self.series_klass(d, index=['b', 'c', 'd', 'a'])\n expected = self.series_klass([1, 2, np.nan, 0],\n index=['b', 'c', 'd', 'a'])\n self._assert_series_equal(result, expected)\n\n def test_constructor_subclass_dict(self):\n data = tm.TestSubDict((x, 10.0 * x) for x in range(10))\n series = self.series_klass(data)\n expected = self.series_klass(dict(compat.iteritems(data)))\n self._assert_series_equal(series, expected)\n\n def test_constructor_ordereddict(self):\n # GH3283\n data = OrderedDict(\n ('col%s' % i, np.random.random()) for i in range(12))\n\n series = self.series_klass(data)\n expected = self.series_klass(list(data.values()), list(data.keys()))\n self._assert_series_equal(series, expected)\n\n # Test with subclass\n class A(OrderedDict):\n pass\n\n series = self.series_klass(A(data))\n self._assert_series_equal(series, expected)\n\n def test_constructor_dict_multiindex(self):\n d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}\n _d = sorted(d.items())\n result = self.series_klass(d)\n expected = self.series_klass(\n [x[1] for x in _d],\n index=pd.MultiIndex.from_tuples([x[0] for x in _d]))\n self._assert_series_equal(result, expected)\n\n d['z'] = 111.\n _d.insert(0, ('z', d['z']))\n result = self.series_klass(d)\n expected = self.series_klass([x[1] for x in _d],\n index=pd.Index([x[0] for x in _d],\n tupleize_cols=False))\n result = result.reindex(index=expected.index)\n self._assert_series_equal(result, expected)\n\n def test_constructor_dict_timedelta_index(self):\n # GH #12169 : Resample category data with timedelta index\n # construct Series from dict as data and TimedeltaIndex as index\n # will result NaN in result Series data\n expected = self.series_klass(\n data=['A', 'B', 'C'],\n index=pd.to_timedelta([0, 10, 20], unit='s')\n )\n\n result = self.series_klass(\n data={pd.to_timedelta(0, unit='s'): 'A',\n pd.to_timedelta(10, unit='s'): 'B',\n pd.to_timedelta(20, unit='s'): 'C'},\n index=pd.to_timedelta([0, 10, 20], unit='s')\n )\n self._assert_series_equal(result, expected)\n\n def test_from_array_deprecated(self):\n\n with tm.assert_produces_warning(FutureWarning):\n self.series_klass.from_array([1, 2, 3])\n\n\nclass TestSeriesMisc(TestData, SharedWithSparse):\n\n series_klass = Series\n # SharedWithSparse tests use generic, series_klass-agnostic assertion\n _assert_series_equal = staticmethod(tm.assert_series_equal)\n\n def test_tab_completion(self):\n # GH 9910\n s = Series(list('abcd'))\n # Series of str values should have .str but not .dt/.cat in __dir__\n assert 'str' in dir(s)\n assert 'dt' not in dir(s)\n assert 'cat' not in dir(s)\n\n # similarly for .dt\n s = Series(date_range('1/1/2015', periods=5))\n assert 'dt' in dir(s)\n assert 'str' not in dir(s)\n assert 'cat' not in dir(s)\n\n # Similarly for .cat, but with the twist that str and dt should be\n # there if the categories are of that type first cat and str.\n s = Series(list('abbcd'), dtype=\"category\")\n assert 'cat' in dir(s)\n assert 'str' in dir(s) # as it is a string categorical\n assert 'dt' not in dir(s)\n\n # similar to cat and str\n s = Series(date_range('1/1/2015', periods=5)).astype(\"category\")\n assert 'cat' in dir(s)\n assert 'str' not in dir(s)\n assert 'dt' in dir(s) # as it is a datetime categorical\n\n def test_tab_completion_with_categorical(self):\n # test the tab completion display\n ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories',\n 'add_categories', 'remove_categories',\n 'rename_categories', 'reorder_categories',\n 'remove_unused_categories', 'as_ordered', 'as_unordered']\n\n def get_dir(s):\n results = [r for r in s.cat.__dir__() if not r.startswith('_')]\n return list(sorted(set(results)))\n\n s = Series(list('aabbcde')).astype('category')\n results = get_dir(s)\n tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))\n\n @pytest.mark.parametrize(\"index\", [\n tm.makeUnicodeIndex(10),\n tm.makeStringIndex(10),\n tm.makeCategoricalIndex(10),\n Index(['foo', 'bar', 'baz'] * 2),\n tm.makeDateIndex(10),\n tm.makePeriodIndex(10),\n tm.makeTimedeltaIndex(10),\n tm.makeIntIndex(10),\n tm.makeUIntIndex(10),\n tm.makeIntIndex(10),\n tm.makeFloatIndex(10),\n Index([True, False]),\n Index(['a{}'.format(i) for i in range(101)]),\n pd.MultiIndex.from_tuples(lzip('ABCD', 'EFGH')),\n pd.MultiIndex.from_tuples(lzip([0, 1, 2, 3], 'EFGH')), ])\n def test_index_tab_completion(self, index):\n # dir contains string-like values of the Index.\n s = pd.Series(index=index)\n dir_s = dir(s)\n for i, x in enumerate(s.index.unique(level=0)):\n if i < 100:\n assert (not isinstance(x, string_types) or\n not isidentifier(x) or x in dir_s)\n else:\n assert x not in dir_s\n\n def test_not_hashable(self):\n s_empty = Series()\n s = Series([1])\n pytest.raises(TypeError, hash, s_empty)\n pytest.raises(TypeError, hash, s)\n\n def test_contains(self):\n tm.assert_contains_all(self.ts.index, self.ts)\n\n def test_iter(self):\n for i, val in enumerate(self.series):\n assert val == self.series[i]\n\n for i, val in enumerate(self.ts):\n assert val == self.ts[i]\n\n def test_keys(self):\n # HACK: By doing this in two stages, we avoid 2to3 wrapping the call\n # to .keys() in a list()\n getkeys = self.ts.keys\n assert getkeys() is self.ts.index\n\n def test_values(self):\n tm.assert_almost_equal(self.ts.values, self.ts, check_dtype=False)\n\n def test_iteritems(self):\n for idx, val in compat.iteritems(self.series):\n assert val == self.series[idx]\n\n for idx, val in compat.iteritems(self.ts):\n assert val == self.ts[idx]\n\n # assert is lazy (genrators don't define reverse, lists do)\n assert not hasattr(self.series.iteritems(), 'reverse')\n\n def test_items(self):\n for idx, val in self.series.items():\n assert val == self.series[idx]\n\n for idx, val in self.ts.items():\n assert val == self.ts[idx]\n\n # assert is lazy (genrators don't define reverse, lists do)\n assert not hasattr(self.series.items(), 'reverse')\n\n def test_raise_on_info(self):\n s = Series(np.random.randn(10))\n with pytest.raises(AttributeError):\n s.info()\n\n def test_copy(self):\n\n for deep in [None, False, True]:\n s = Series(np.arange(10), dtype='float64')\n\n # default deep is True\n if deep is None:\n s2 = s.copy()\n else:\n s2 = s.copy(deep=deep)\n\n s2[::2] = np.NaN\n\n if deep is None or deep is True:\n # Did not modify original Series\n assert np.isnan(s2[0])\n assert not np.isnan(s[0])\n else:\n # we DID modify the original Series\n assert np.isnan(s2[0])\n assert np.isnan(s[0])\n\n # GH 11794\n # copy of tz-aware\n expected = Series([Timestamp('2012/01/01', tz='UTC')])\n expected2 = Series([Timestamp('1999/01/01', tz='UTC')])\n\n for deep in [None, False, True]:\n\n s = Series([Timestamp('2012/01/01', tz='UTC')])\n\n if deep is None:\n s2 = s.copy()\n else:\n s2 = s.copy(deep=deep)\n\n s2[0] = pd.Timestamp('1999/01/01', tz='UTC')\n\n # default deep is True\n if deep is None or deep is True:\n # Did not modify original Series\n assert_series_equal(s2, expected2)\n assert_series_equal(s, expected)\n else:\n # we DID modify the original Series\n assert_series_equal(s2, expected2)\n assert_series_equal(s, expected2)\n\n def test_axis_alias(self):\n s = Series([1, 2, np.nan])\n assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))\n assert s.dropna().sum('rows') == 3\n assert s._get_axis_number('rows') == 0\n assert s._get_axis_name('rows') == 'index'\n\n def test_class_axis(self):\n # https://github.com/pandas-dev/pandas/issues/18147\n # no exception and no empty docstring\n assert pydoc.getdoc(Series.index)\n\n def test_numpy_unique(self):\n # it works!\n np.unique(self.ts)\n\n def test_ndarray_compat(self):\n\n # test numpy compat with Series as sub-class of NDFrame\n tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],\n index=date_range('1/1/2000', periods=1000))\n\n def f(x):\n return x[x.idxmax()]\n\n result = tsdf.apply(f)\n expected = tsdf.max()\n tm.assert_series_equal(result, expected)\n\n # .item()\n s = Series([1])\n result = s.item()\n assert result == 1\n assert s.item() == s.iloc[0]\n\n # using an ndarray like function\n s = Series(np.random.randn(10))\n result = Series(np.ones_like(s))\n expected = Series(1, index=range(10), dtype='float64')\n tm.assert_series_equal(result, expected)\n\n # ravel\n s = Series(np.random.randn(10))\n tm.assert_almost_equal(s.ravel(order='F'), s.values.ravel(order='F'))\n\n # compress\n # GH 6658\n s = Series([0, 1., -1], index=list('abc'))\n result = np.compress(s > 0, s)\n tm.assert_series_equal(result, Series([1.], index=['b']))\n\n result = np.compress(s < -1, s)\n # result empty Index(dtype=object) as the same as original\n exp = Series([], dtype='float64', index=Index([], dtype='object'))\n tm.assert_series_equal(result, exp)\n\n s = Series([0, 1., -1], index=[.1, .2, .3])\n result = np.compress(s > 0, s)\n tm.assert_series_equal(result, Series([1.], index=[.2]))\n\n result = np.compress(s < -1, s)\n # result empty Float64Index as the same as original\n exp = Series([], dtype='float64', index=Index([], dtype='float64'))\n tm.assert_series_equal(result, exp)\n\n def test_str_attribute(self):\n # GH9068\n methods = ['strip', 'rstrip', 'lstrip']\n s = Series([' jack', 'jill ', ' jesse ', 'frank'])\n for method in methods:\n expected = Series([getattr(str, method)(x) for x in s.values])\n assert_series_equal(getattr(Series.str, method)(s.str), expected)\n\n # str accessor only valid with string values\n s = Series(range(5))\n with tm.assert_raises_regex(AttributeError,\n 'only use .str accessor'):\n s.str.repeat(2)\n\n def test_empty_method(self):\n s_empty = pd.Series()\n assert s_empty.empty\n\n for full_series in [pd.Series([1]), pd.Series(index=[1])]:\n assert not full_series.empty\n\n def test_tab_complete_warning(self, ip):\n # https://github.com/pandas-dev/pandas/issues/16409\n pytest.importorskip('IPython', minversion=\"6.0.0\")\n from IPython.core.completer import provisionalcompleter\n\n code = \"import pandas as pd; s = pd.Series()\"\n ip.run_code(code)\n with tm.assert_produces_warning(None):\n with provisionalcompleter('ignore'):\n list(ip.Completer.completions('s.', 1))\n\n\nclass TestCategoricalSeries(object):\n\n @pytest.mark.parametrize(\n \"method\",\n [\n lambda x: x.cat.set_categories([1, 2, 3]),\n lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),\n lambda x: x.cat.rename_categories([1, 2, 3]),\n lambda x: x.cat.remove_unused_categories(),\n lambda x: x.cat.remove_categories([2]),\n lambda x: x.cat.add_categories([4]),\n lambda x: x.cat.as_ordered(),\n lambda x: x.cat.as_unordered(),\n ])\n def test_getname_categorical_accessor(self, method):\n # GH 17509\n s = Series([1, 2, 3], name='A').astype('category')\n expected = 'A'\n result = method(s).name\n assert result == expected\n\n def test_cat_accessor(self):\n s = Series(Categorical([\"a\", \"b\", np.nan, \"a\"]))\n tm.assert_index_equal(s.cat.categories, Index([\"a\", \"b\"]))\n assert not s.cat.ordered, False\n\n exp = Categorical([\"a\", \"b\", np.nan, \"a\"], categories=[\"b\", \"a\"])\n s.cat.set_categories([\"b\", \"a\"], inplace=True)\n tm.assert_categorical_equal(s.values, exp)\n\n res = s.cat.set_categories([\"b\", \"a\"])\n tm.assert_categorical_equal(res.values, exp)\n\n s[:] = \"a\"\n s = s.cat.remove_unused_categories()\n tm.assert_index_equal(s.cat.categories, Index([\"a\"]))\n\n def test_cat_accessor_api(self):\n # GH 9322\n from pandas.core.arrays.categorical import CategoricalAccessor\n assert Series.cat is CategoricalAccessor\n s = Series(list('aabbcde')).astype('category')\n assert isinstance(s.cat, CategoricalAccessor)\n\n invalid = Series([1])\n with tm.assert_raises_regex(AttributeError,\n \"only use .cat accessor\"):\n invalid.cat\n assert not hasattr(invalid, 'cat')\n\n def test_cat_accessor_no_new_attributes(self):\n # https://github.com/pandas-dev/pandas/issues/10673\n c = Series(list('aabbcde')).astype('category')\n with tm.assert_raises_regex(AttributeError,\n \"You cannot add any new attribute\"):\n c.cat.xlabel = \"a\"\n\n def test_categorical_delegations(self):\n\n # invalid accessor\n pytest.raises(AttributeError, lambda: Series([1, 2, 3]).cat)\n tm.assert_raises_regex(\n AttributeError,\n r\"Can only use .cat accessor with a 'category' dtype\",\n lambda: Series([1, 2, 3]).cat)\n pytest.raises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)\n pytest.raises(AttributeError, lambda: Series(np.arange(5.)).cat)\n pytest.raises(AttributeError,\n lambda: Series([Timestamp('20130101')]).cat)\n\n # Series should delegate calls to '.categories', '.codes', '.ordered'\n # and the methods '.set_categories()' 'drop_unused_categories()' to the\n # categorical# -*- coding: utf-8 -*-\n s = Series(Categorical([\"a\", \"b\", \"c\", \"a\"], ordered=True))\n exp_categories = Index([\"a\", \"b\", \"c\"])\n tm.assert_index_equal(s.cat.categories, exp_categories)\n s.cat.categories = [1, 2, 3]\n exp_categories = Index([1, 2, 3])\n tm.assert_index_equal(s.cat.categories, exp_categories)\n\n exp_codes = Series([0, 1, 2, 0], dtype='int8')\n tm.assert_series_equal(s.cat.codes, exp_codes)\n\n assert s.cat.ordered\n s = s.cat.as_unordered()\n assert not s.cat.ordered\n s.cat.as_ordered(inplace=True)\n assert s.cat.ordered\n\n # reorder\n s = Series(Categorical([\"a\", \"b\", \"c\", \"a\"], ordered=True))\n exp_categories = Index([\"c\", \"b\", \"a\"])\n exp_values = np.array([\"a\", \"b\", \"c\", \"a\"], dtype=np.object_)\n s = s.cat.set_categories([\"c\", \"b\", \"a\"])\n tm.assert_index_equal(s.cat.categories, exp_categories)\n tm.assert_numpy_array_equal(s.values.__array__(), exp_values)\n tm.assert_numpy_array_equal(s.__array__(), exp_values)\n\n # remove unused categories\n s = Series(Categorical([\"a\", \"b\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"\n ]))\n exp_categories = Index([\"a\", \"b\"])\n exp_values = np.array([\"a\", \"b\", \"b\", \"a\"], dtype=np.object_)\n s = s.cat.remove_unused_categories()\n tm.assert_index_equal(s.cat.categories, exp_categories)\n tm.assert_numpy_array_equal(s.values.__array__(), exp_values)\n tm.assert_numpy_array_equal(s.__array__(), exp_values)\n\n # This method is likely to be confused, so test that it raises an error\n # on wrong inputs:\n def f():\n s.set_categories([4, 3, 2, 1])\n\n pytest.raises(Exception, f)\n # right: s.cat.set_categories([4,3,2,1])\n\n # GH18862 (let Series.cat.rename_categories take callables)\n s = Series(Categorical([\"a\", \"b\", \"c\", \"a\"], ordered=True))\n result = s.cat.rename_categories(lambda x: x.upper())\n expected = Series(Categorical([\"A\", \"B\", \"C\", \"A\"],\n categories=[\"A\", \"B\", \"C\"],\n ordered=True))\n tm.assert_series_equal(result, expected)\n\n def test_str_accessor_api_for_categorical(self):\n # https://github.com/pandas-dev/pandas/issues/10661\n from pandas.core.strings import StringMethods\n s = Series(list('aabb'))\n s = s + \" \" + s\n c = s.astype('category')\n assert isinstance(c.str, StringMethods)\n\n # str functions, which need special arguments\n special_func_defs = [\n ('cat', (list(\"zyxw\"),), {\"sep\": \",\"}),\n ('center', (10,), {}),\n ('contains', (\"a\",), {}),\n ('count', (\"a\",), {}),\n ('decode', (\"UTF-8\",), {}),\n ('encode', (\"UTF-8\",), {}),\n ('endswith', (\"a\",), {}),\n ('extract', (\"([a-z]*) \",), {\"expand\": False}),\n ('extract', (\"([a-z]*) \",), {\"expand\": True}),\n ('extractall', (\"([a-z]*) \",), {}),\n ('find', (\"a\",), {}),\n ('findall', (\"a\",), {}),\n ('index', (\" \",), {}),\n ('ljust', (10,), {}),\n ('match', (\"a\"), {}), # deprecated...\n ('normalize', (\"NFC\",), {}),\n ('pad', (10,), {}),\n ('partition', (\" \",), {\"expand\": False}), # not default\n ('partition', (\" \",), {\"expand\": True}), # default\n ('repeat', (3,), {}),\n ('replace', (\"a\", \"z\"), {}),\n ('rfind', (\"a\",), {}),\n ('rindex', (\" \",), {}),\n ('rjust', (10,), {}),\n ('rpartition', (\" \",), {\"expand\": False}), # not default\n ('rpartition', (\" \",), {\"expand\": True}), # default\n ('slice', (0, 1), {}),\n ('slice_replace', (0, 1, \"z\"), {}),\n ('split', (\" \",), {\"expand\": False}), # default\n ('split', (\" \",), {\"expand\": True}), # not default\n ('startswith', (\"a\",), {}),\n ('wrap', (2,), {}),\n ('zfill', (10,), {})\n ]\n _special_func_names = [f[0] for f in special_func_defs]\n\n # * get, join: they need a individual elements of type lists, but\n # we can't make a categorical with lists as individual categories.\n # -> `s.str.split(\" \").astype(\"category\")` will error!\n # * `translate` has different interfaces for py2 vs. py3\n _ignore_names = [\"get\", \"join\", \"translate\"]\n\n str_func_names = [f for f in dir(s.str) if not (\n f.startswith(\"_\") or\n f in _special_func_names or\n f in _ignore_names)]\n\n func_defs = [(f, (), {}) for f in str_func_names]\n func_defs.extend(special_func_defs)\n\n for func, args, kwargs in func_defs:\n res = getattr(c.str, func)(*args, **kwargs)\n exp = getattr(s.str, func)(*args, **kwargs)\n\n if isinstance(res, DataFrame):\n tm.assert_frame_equal(res, exp)\n else:\n tm.assert_series_equal(res, exp)\n\n invalid = Series([1, 2, 3]).astype('category')\n with tm.assert_raises_regex(AttributeError,\n \"Can only use .str \"\n \"accessor with string\"):\n invalid.str\n assert not hasattr(invalid, 'str')\n\n def test_dt_accessor_api_for_categorical(self):\n # https://github.com/pandas-dev/pandas/issues/10661\n from pandas.core.indexes.accessors import Properties\n\n s_dr = Series(date_range('1/1/2015', periods=5, tz=\"MET\"))\n c_dr = s_dr.astype(\"category\")\n\n s_pr = Series(period_range('1/1/2015', freq='D', periods=5))\n c_pr = s_pr.astype(\"category\")\n\n s_tdr = Series(timedelta_range('1 days', '10 days'))\n c_tdr = s_tdr.astype(\"category\")\n\n # only testing field (like .day)\n # and bool (is_month_start)\n get_ops = lambda x: x._datetimelike_ops\n\n test_data = [\n (\"Datetime\", get_ops(DatetimeIndex), s_dr, c_dr),\n (\"Period\", get_ops(PeriodIndex), s_pr, c_pr),\n (\"Timedelta\", get_ops(TimedeltaIndex), s_tdr, c_tdr)]\n\n assert isinstance(c_dr.dt, Properties)\n\n special_func_defs = [\n ('strftime', (\"%Y-%m-%d\",), {}),\n ('tz_convert', (\"EST\",), {}),\n ('round', (\"D\",), {}),\n ('floor', (\"D\",), {}),\n ('ceil', (\"D\",), {}),\n ('asfreq', (\"D\",), {}),\n # ('tz_localize', (\"UTC\",), {}),\n ]\n _special_func_names = [f[0] for f in special_func_defs]\n\n # the series is already localized\n _ignore_names = ['tz_localize', 'components']\n\n for name, attr_names, s, c in test_data:\n func_names = [f\n for f in dir(s.dt)\n if not (f.startswith(\"_\") or f in attr_names or f in\n _special_func_names or f in _ignore_names)]\n\n func_defs = [(f, (), {}) for f in func_names]\n for f_def in special_func_defs:\n if f_def[0] in dir(s.dt):\n func_defs.append(f_def)\n\n for func, args, kwargs in func_defs:\n res = getattr(c.dt, func)(*args, **kwargs)\n exp = getattr(s.dt, func)(*args, **kwargs)\n\n if isinstance(res, DataFrame):\n tm.assert_frame_equal(res, exp)\n elif isinstance(res, Series):\n tm.assert_series_equal(res, exp)\n else:\n tm.assert_almost_equal(res, exp)\n\n for attr in attr_names:\n try:\n res = getattr(c.dt, attr)\n exp = getattr(s.dt, attr)\n except Exception as e:\n print(name, attr)\n raise e\n\n if isinstance(res, DataFrame):\n tm.assert_frame_equal(res, exp)\n elif isinstance(res, Series):\n tm.assert_series_equal(res, exp)\n else:\n tm.assert_almost_equal(res, exp)\n\n invalid = Series([1, 2, 3]).astype('category')\n with tm.assert_raises_regex(\n AttributeError, \"Can only use .dt accessor with datetimelike\"):\n invalid.dt\n assert not hasattr(invalid, 'str')\n", "\"\"\"\nUtility function to facilitate testing.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport re\nimport gc\nimport operator\nimport warnings\nfrom functools import partial, wraps\nimport shutil\nimport contextlib\nfrom tempfile import mkdtemp, mkstemp\nfrom unittest.case import SkipTest\nfrom warnings import WarningMessage\nimport pprint\n\nfrom numpy.core import(\n float32, empty, arange, array_repr, ndarray, isnat, array)\nfrom numpy.lib.utils import deprecate\n\nif sys.version_info[0] >= 3:\n from io import StringIO\nelse:\n from StringIO import StringIO\n\n__all__ = [\n 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',\n 'assert_array_equal', 'assert_array_less', 'assert_string_equal',\n 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',\n 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',\n 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',\n 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',\n 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',\n 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',\n 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',\n 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',\n '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',\n ]\n\n\nclass KnownFailureException(Exception):\n '''Raise this exception to mark a test as a known failing test.'''\n pass\n\n\nKnownFailureTest = KnownFailureException # backwards compat\nverbose = 0\n\nIS_PYPY = '__pypy__' in sys.modules\nHAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None\n\n\ndef import_nose():\n \"\"\" Import nose only when needed.\n \"\"\"\n nose_is_good = True\n minimum_nose_version = (1, 0, 0)\n try:\n import nose\n except ImportError:\n nose_is_good = False\n else:\n if nose.__versioninfo__ < minimum_nose_version:\n nose_is_good = False\n\n if not nose_is_good:\n msg = ('Need nose >= %d.%d.%d for tests - see '\n 'http://nose.readthedocs.io' %\n minimum_nose_version)\n raise ImportError(msg)\n\n return nose\n\n\ndef assert_(val, msg=''):\n \"\"\"\n Assert that works in release mode.\n Accepts callable msg to allow deferring evaluation until failure.\n\n The Python built-in ``assert`` does not work when executing code in\n optimized mode (the ``-O`` flag) - no byte-code is generated for it.\n\n For documentation on usage, refer to the Python documentation.\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n if not val:\n try:\n smsg = msg()\n except TypeError:\n smsg = msg\n raise AssertionError(smsg)\n\n\ndef gisnan(x):\n \"\"\"like isnan, but always raise an error if type not supported instead of\n returning a TypeError object.\n\n Notes\n -----\n isnan and other ufunc sometimes return a NotImplementedType object instead\n of raising any exception. This function is a wrapper to make sure an\n exception is always raised.\n\n This should be removed once this problem is solved at the Ufunc level.\"\"\"\n from numpy.core import isnan\n st = isnan(x)\n if isinstance(st, type(NotImplemented)):\n raise TypeError(\"isnan not supported for this type\")\n return st\n\n\ndef gisfinite(x):\n \"\"\"like isfinite, but always raise an error if type not supported instead of\n returning a TypeError object.\n\n Notes\n -----\n isfinite and other ufunc sometimes return a NotImplementedType object instead\n of raising any exception. This function is a wrapper to make sure an\n exception is always raised.\n\n This should be removed once this problem is solved at the Ufunc level.\"\"\"\n from numpy.core import isfinite, errstate\n with errstate(invalid='ignore'):\n st = isfinite(x)\n if isinstance(st, type(NotImplemented)):\n raise TypeError(\"isfinite not supported for this type\")\n return st\n\n\ndef gisinf(x):\n \"\"\"like isinf, but always raise an error if type not supported instead of\n returning a TypeError object.\n\n Notes\n -----\n isinf and other ufunc sometimes return a NotImplementedType object instead\n of raising any exception. This function is a wrapper to make sure an\n exception is always raised.\n\n This should be removed once this problem is solved at the Ufunc level.\"\"\"\n from numpy.core import isinf, errstate\n with errstate(invalid='ignore'):\n st = isinf(x)\n if isinstance(st, type(NotImplemented)):\n raise TypeError(\"isinf not supported for this type\")\n return st\n\n\n@deprecate(message=\"numpy.testing.rand is deprecated in numpy 1.11. \"\n \"Use numpy.random.rand instead.\")\ndef rand(*args):\n \"\"\"Returns an array of random numbers with the given shape.\n\n This only uses the standard library, so it is useful for testing purposes.\n \"\"\"\n import random\n from numpy.core import zeros, float64\n results = zeros(args, float64)\n f = results.flat\n for i in range(len(f)):\n f[i] = random.random()\n return results\n\n\nif os.name == 'nt':\n # Code \"stolen\" from enthought/debug/memusage.py\n def GetPerformanceAttributes(object, counter, instance=None,\n inum=-1, format=None, machine=None):\n # NOTE: Many counters require 2 samples to give accurate results,\n # including \"% Processor Time\" (as by definition, at any instant, a\n # thread's CPU usage is either 0 or 100). To read counters like this,\n # you should copy this function, but keep the counter open, and call\n # CollectQueryData() each time you need to know.\n # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp\n # My older explanation for this was that the \"AddCounter\" process forced\n # the CPU to 100%, but the above makes more sense :)\n import win32pdh\n if format is None:\n format = win32pdh.PDH_FMT_LONG\n path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))\n hq = win32pdh.OpenQuery()\n try:\n hc = win32pdh.AddCounter(hq, path)\n try:\n win32pdh.CollectQueryData(hq)\n type, val = win32pdh.GetFormattedCounterValue(hc, format)\n return val\n finally:\n win32pdh.RemoveCounter(hc)\n finally:\n win32pdh.CloseQuery(hq)\n\n def memusage(processName=\"python\", instance=0):\n # from win32pdhutil, part of the win32all package\n import win32pdh\n return GetPerformanceAttributes(\"Process\", \"Virtual Bytes\",\n processName, instance,\n win32pdh.PDH_FMT_LONG, None)\nelif sys.platform[:5] == 'linux':\n\n def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):\n \"\"\"\n Return virtual memory size in bytes of the running python.\n\n \"\"\"\n try:\n f = open(_proc_pid_stat, 'r')\n l = f.readline().split(' ')\n f.close()\n return int(l[22])\n except Exception:\n return\nelse:\n def memusage():\n \"\"\"\n Return memory usage of running python. [Not implemented]\n\n \"\"\"\n raise NotImplementedError\n\n\nif sys.platform[:5] == 'linux':\n def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),\n _load_time=[]):\n \"\"\"\n Return number of jiffies elapsed.\n\n Return number of jiffies (1/100ths of a second) that this\n process has been scheduled in user mode. See man 5 proc.\n\n \"\"\"\n import time\n if not _load_time:\n _load_time.append(time.time())\n try:\n f = open(_proc_pid_stat, 'r')\n l = f.readline().split(' ')\n f.close()\n return int(l[13])\n except Exception:\n return int(100*(time.time()-_load_time[0]))\nelse:\n # os.getpid is not in all platforms available.\n # Using time is safe but inaccurate, especially when process\n # was suspended or sleeping.\n def jiffies(_load_time=[]):\n \"\"\"\n Return number of jiffies elapsed.\n\n Return number of jiffies (1/100ths of a second) that this\n process has been scheduled in user mode. See man 5 proc.\n\n \"\"\"\n import time\n if not _load_time:\n _load_time.append(time.time())\n return int(100*(time.time()-_load_time[0]))\n\n\ndef build_err_msg(arrays, err_msg, header='Items are not equal:',\n verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):\n msg = ['\\n' + header]\n if err_msg:\n if err_msg.find('\\n') == -1 and len(err_msg) < 79-len(header):\n msg = [msg[0] + ' ' + err_msg]\n else:\n msg.append(err_msg)\n if verbose:\n for i, a in enumerate(arrays):\n\n if isinstance(a, ndarray):\n # precision argument is only needed if the objects are ndarrays\n r_func = partial(array_repr, precision=precision)\n else:\n r_func = repr\n\n try:\n r = r_func(a)\n except Exception as exc:\n r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)\n if r.count('\\n') > 3:\n r = '\\n'.join(r.splitlines()[:3])\n r += '...'\n msg.append(' %s: %s' % (names[i], r))\n return '\\n'.join(msg)\n\n\ndef assert_equal(actual, desired, err_msg='', verbose=True):\n \"\"\"\n Raises an AssertionError if two objects are not equal.\n\n Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),\n check that all elements of these objects are equal. An exception is raised\n at the first conflicting values.\n\n Parameters\n ----------\n actual : array_like\n The object to check.\n desired : array_like\n The expected object.\n err_msg : str, optional\n The error message to be printed in case of failure.\n verbose : bool, optional\n If True, the conflicting values are appended to the error message.\n\n Raises\n ------\n AssertionError\n If actual and desired are not equal.\n\n Examples\n --------\n >>> np.testing.assert_equal([4,5], [4,6])\n ...\n <type 'exceptions.AssertionError'>:\n Items are not equal:\n item=1\n ACTUAL: 5\n DESIRED: 6\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n if isinstance(desired, dict):\n if not isinstance(actual, dict):\n raise AssertionError(repr(type(actual)))\n assert_equal(len(actual), len(desired), err_msg, verbose)\n for k, i in desired.items():\n if k not in actual:\n raise AssertionError(repr(k))\n assert_equal(actual[k], desired[k], 'key=%r\\n%s' % (k, err_msg), verbose)\n return\n if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):\n assert_equal(len(actual), len(desired), err_msg, verbose)\n for k in range(len(desired)):\n assert_equal(actual[k], desired[k], 'item=%r\\n%s' % (k, err_msg), verbose)\n return\n from numpy.core import ndarray, isscalar, signbit\n from numpy.lib import iscomplexobj, real, imag\n if isinstance(actual, ndarray) or isinstance(desired, ndarray):\n return assert_array_equal(actual, desired, err_msg, verbose)\n msg = build_err_msg([actual, desired], err_msg, verbose=verbose)\n\n # Handle complex numbers: separate into real/imag to handle\n # nan/inf/negative zero correctly\n # XXX: catch ValueError for subclasses of ndarray where iscomplex fail\n try:\n usecomplex = iscomplexobj(actual) or iscomplexobj(desired)\n except ValueError:\n usecomplex = False\n\n if usecomplex:\n if iscomplexobj(actual):\n actualr = real(actual)\n actuali = imag(actual)\n else:\n actualr = actual\n actuali = 0\n if iscomplexobj(desired):\n desiredr = real(desired)\n desiredi = imag(desired)\n else:\n desiredr = desired\n desiredi = 0\n try:\n assert_equal(actualr, desiredr)\n assert_equal(actuali, desiredi)\n except AssertionError:\n raise AssertionError(msg)\n\n # isscalar test to check cases such as [np.nan] != np.nan\n if isscalar(desired) != isscalar(actual):\n raise AssertionError(msg)\n\n # Inf/nan/negative zero handling\n try:\n isdesnan = gisnan(desired)\n isactnan = gisnan(actual)\n if isdesnan and isactnan:\n return # both nan, so equal\n\n # handle signed zero specially for floats\n if desired == 0 and actual == 0:\n if not signbit(desired) == signbit(actual):\n raise AssertionError(msg)\n\n except (TypeError, ValueError, NotImplementedError):\n pass\n\n try:\n isdesnat = isnat(desired)\n isactnat = isnat(actual)\n dtypes_match = array(desired).dtype.type == array(actual).dtype.type\n if isdesnat and isactnat:\n # If both are NaT (and have the same dtype -- datetime or\n # timedelta) they are considered equal.\n if dtypes_match:\n return\n else:\n raise AssertionError(msg)\n\n except (TypeError, ValueError, NotImplementedError):\n pass\n\n try:\n # Explicitly use __eq__ for comparison, gh-2552\n if not (desired == actual):\n raise AssertionError(msg)\n\n except (DeprecationWarning, FutureWarning) as e:\n # this handles the case when the two types are not even comparable\n if 'elementwise == comparison' in e.args[0]:\n raise AssertionError(msg)\n else:\n raise\n\n\ndef print_assert_equal(test_string, actual, desired):\n \"\"\"\n Test if two objects are equal, and print an error message if test fails.\n\n The test is performed with ``actual == desired``.\n\n Parameters\n ----------\n test_string : str\n The message supplied to AssertionError.\n actual : object\n The object to test for equality against `desired`.\n desired : object\n The expected result.\n\n Examples\n --------\n >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])\n >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])\n Traceback (most recent call last):\n ...\n AssertionError: Test XYZ of func xyz failed\n ACTUAL:\n [0, 1]\n DESIRED:\n [0, 2]\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n import pprint\n\n if not (actual == desired):\n msg = StringIO()\n msg.write(test_string)\n msg.write(' failed\\nACTUAL: \\n')\n pprint.pprint(actual, msg)\n msg.write('DESIRED: \\n')\n pprint.pprint(desired, msg)\n raise AssertionError(msg.getvalue())\n\n\ndef assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):\n \"\"\"\n Raises an AssertionError if two items are not equal up to desired\n precision.\n\n .. note:: It is recommended to use one of `assert_allclose`,\n `assert_array_almost_equal_nulp` or `assert_array_max_ulp`\n instead of this function for more consistent floating point\n comparisons.\n\n The test verifies that the elements of ``actual`` and ``desired`` satisfy.\n\n ``abs(desired-actual) < 1.5 * 10**(-decimal)``\n\n That is a looser test than originally documented, but agrees with what the\n actual implementation in `assert_array_almost_equal` did up to rounding\n vagaries. An exception is raised at conflicting values. For ndarrays this\n delegates to assert_array_almost_equal\n\n Parameters\n ----------\n actual : array_like\n The object to check.\n desired : array_like\n The expected object.\n decimal : int, optional\n Desired precision, default is 7.\n err_msg : str, optional\n The error message to be printed in case of failure.\n verbose : bool, optional\n If True, the conflicting values are appended to the error message.\n\n Raises\n ------\n AssertionError\n If actual and desired are not equal up to specified precision.\n\n See Also\n --------\n assert_allclose: Compare two array_like objects for equality with desired\n relative and/or absolute precision.\n assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal\n\n Examples\n --------\n >>> import numpy.testing as npt\n >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)\n >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)\n ...\n <type 'exceptions.AssertionError'>:\n Items are not equal:\n ACTUAL: 2.3333333333333002\n DESIRED: 2.3333333399999998\n\n >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),\n ... np.array([1.0,2.33333334]), decimal=9)\n ...\n <type 'exceptions.AssertionError'>:\n Arrays are not almost equal\n <BLANKLINE>\n (mismatch 50.0%)\n x: array([ 1. , 2.33333333])\n y: array([ 1. , 2.33333334])\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n from numpy.core import ndarray\n from numpy.lib import iscomplexobj, real, imag\n\n # Handle complex numbers: separate into real/imag to handle\n # nan/inf/negative zero correctly\n # XXX: catch ValueError for subclasses of ndarray where iscomplex fail\n try:\n usecomplex = iscomplexobj(actual) or iscomplexobj(desired)\n except ValueError:\n usecomplex = False\n\n def _build_err_msg():\n header = ('Arrays are not almost equal to %d decimals' % decimal)\n return build_err_msg([actual, desired], err_msg, verbose=verbose,\n header=header)\n\n if usecomplex:\n if iscomplexobj(actual):\n actualr = real(actual)\n actuali = imag(actual)\n else:\n actualr = actual\n actuali = 0\n if iscomplexobj(desired):\n desiredr = real(desired)\n desiredi = imag(desired)\n else:\n desiredr = desired\n desiredi = 0\n try:\n assert_almost_equal(actualr, desiredr, decimal=decimal)\n assert_almost_equal(actuali, desiredi, decimal=decimal)\n except AssertionError:\n raise AssertionError(_build_err_msg())\n\n if isinstance(actual, (ndarray, tuple, list)) \\\n or isinstance(desired, (ndarray, tuple, list)):\n return assert_array_almost_equal(actual, desired, decimal, err_msg)\n try:\n # If one of desired/actual is not finite, handle it specially here:\n # check that both are nan if any is a nan, and test for equality\n # otherwise\n if not (gisfinite(desired) and gisfinite(actual)):\n if gisnan(desired) or gisnan(actual):\n if not (gisnan(desired) and gisnan(actual)):\n raise AssertionError(_build_err_msg())\n else:\n if not desired == actual:\n raise AssertionError(_build_err_msg())\n return\n except (NotImplementedError, TypeError):\n pass\n if abs(desired - actual) >= 1.5 * 10.0**(-decimal):\n raise AssertionError(_build_err_msg())\n\n\ndef assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):\n \"\"\"\n Raises an AssertionError if two items are not equal up to significant\n digits.\n\n .. note:: It is recommended to use one of `assert_allclose`,\n `assert_array_almost_equal_nulp` or `assert_array_max_ulp`\n instead of this function for more consistent floating point\n comparisons.\n\n Given two numbers, check that they are approximately equal.\n Approximately equal is defined as the number of significant digits\n that agree.\n\n Parameters\n ----------\n actual : scalar\n The object to check.\n desired : scalar\n The expected object.\n significant : int, optional\n Desired precision, default is 7.\n err_msg : str, optional\n The error message to be printed in case of failure.\n verbose : bool, optional\n If True, the conflicting values are appended to the error message.\n\n Raises\n ------\n AssertionError\n If actual and desired are not equal up to specified precision.\n\n See Also\n --------\n assert_allclose: Compare two array_like objects for equality with desired\n relative and/or absolute precision.\n assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal\n\n Examples\n --------\n >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)\n >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,\n significant=8)\n >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,\n significant=8)\n ...\n <type 'exceptions.AssertionError'>:\n Items are not equal to 8 significant digits:\n ACTUAL: 1.234567e-021\n DESIRED: 1.2345672000000001e-021\n\n the evaluated condition that raises the exception is\n\n >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)\n True\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n import numpy as np\n\n (actual, desired) = map(float, (actual, desired))\n if desired == actual:\n return\n # Normalized the numbers to be in range (-10.0,10.0)\n # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))\n with np.errstate(invalid='ignore'):\n scale = 0.5*(np.abs(desired) + np.abs(actual))\n scale = np.power(10, np.floor(np.log10(scale)))\n try:\n sc_desired = desired/scale\n except ZeroDivisionError:\n sc_desired = 0.0\n try:\n sc_actual = actual/scale\n except ZeroDivisionError:\n sc_actual = 0.0\n msg = build_err_msg([actual, desired], err_msg,\n header='Items are not equal to %d significant digits:' %\n significant,\n verbose=verbose)\n try:\n # If one of desired/actual is not finite, handle it specially here:\n # check that both are nan if any is a nan, and test for equality\n # otherwise\n if not (gisfinite(desired) and gisfinite(actual)):\n if gisnan(desired) or gisnan(actual):\n if not (gisnan(desired) and gisnan(actual)):\n raise AssertionError(msg)\n else:\n if not desired == actual:\n raise AssertionError(msg)\n return\n except (TypeError, NotImplementedError):\n pass\n if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):\n raise AssertionError(msg)\n\n\ndef assert_array_compare(comparison, x, y, err_msg='', verbose=True,\n header='', precision=6, equal_nan=True,\n equal_inf=True):\n __tracebackhide__ = True # Hide traceback for py.test\n from numpy.core import array, isnan, inf, bool_\n x = array(x, copy=False, subok=True)\n y = array(y, copy=False, subok=True)\n\n def isnumber(x):\n return x.dtype.char in '?bhilqpBHILQPefdgFDG'\n\n def istime(x):\n return x.dtype.char in \"Mm\"\n\n def func_assert_same_pos(x, y, func=isnan, hasval='nan'):\n \"\"\"Handling nan/inf: combine results of running func on x and y,\n checking that they are True at the same locations.\"\"\"\n # Both the != True comparison here and the cast to bool_ at\n # the end are done to deal with `masked`, which cannot be\n # compared usefully, and for which .all() yields masked.\n x_id = func(x)\n y_id = func(y)\n if (x_id == y_id).all() != True:\n msg = build_err_msg([x, y],\n err_msg + '\\nx and y %s location mismatch:'\n % (hasval), verbose=verbose, header=header,\n names=('x', 'y'), precision=precision)\n raise AssertionError(msg)\n # If there is a scalar, then here we know the array has the same\n # flag as it everywhere, so we should return the scalar flag.\n if x_id.ndim == 0:\n return bool_(x_id)\n elif y_id.ndim == 0:\n return bool_(y_id)\n else:\n return y_id\n\n try:\n cond = (x.shape == () or y.shape == ()) or x.shape == y.shape\n if not cond:\n msg = build_err_msg([x, y],\n err_msg\n + '\\n(shapes %s, %s mismatch)' % (x.shape,\n y.shape),\n verbose=verbose, header=header,\n names=('x', 'y'), precision=precision)\n raise AssertionError(msg)\n\n flagged = bool_(False)\n if isnumber(x) and isnumber(y):\n if equal_nan:\n flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')\n\n if equal_inf:\n flagged |= func_assert_same_pos(x, y,\n func=lambda xy: xy == +inf,\n hasval='+inf')\n flagged |= func_assert_same_pos(x, y,\n func=lambda xy: xy == -inf,\n hasval='-inf')\n\n elif istime(x) and istime(y):\n # If one is datetime64 and the other timedelta64 there is no point\n if equal_nan and x.dtype.type == y.dtype.type:\n flagged = func_assert_same_pos(x, y, func=isnat, hasval=\"NaT\")\n\n if flagged.ndim > 0:\n x, y = x[~flagged], y[~flagged]\n # Only do the comparison if actual values are left\n if x.size == 0:\n return\n elif flagged:\n # no sense doing comparison if everything is flagged.\n return\n\n val = comparison(x, y)\n\n if isinstance(val, bool):\n cond = val\n reduced = [0]\n else:\n reduced = val.ravel()\n cond = reduced.all()\n reduced = reduced.tolist()\n # The below comparison is a hack to ensure that fully masked\n # results, for which val.ravel().all() returns np.ma.masked,\n # do not trigger a failure (np.ma.masked != True evaluates as\n # np.ma.masked, which is falsy).\n if cond != True:\n match = 100-100.0*reduced.count(1)/len(reduced)\n msg = build_err_msg([x, y],\n err_msg\n + '\\n(mismatch %s%%)' % (match,),\n verbose=verbose, header=header,\n names=('x', 'y'), precision=precision)\n raise AssertionError(msg)\n except ValueError:\n import traceback\n efmt = traceback.format_exc()\n header = 'error during assertion:\\n\\n%s\\n\\n%s' % (efmt, header)\n\n msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,\n names=('x', 'y'), precision=precision)\n raise ValueError(msg)\n\n\ndef assert_array_equal(x, y, err_msg='', verbose=True):\n \"\"\"\n Raises an AssertionError if two array_like objects are not equal.\n\n Given two array_like objects, check that the shape is equal and all\n elements of these objects are equal. An exception is raised at\n shape mismatch or conflicting values. In contrast to the standard usage\n in numpy, NaNs are compared like numbers, no assertion is raised if\n both objects have NaNs in the same positions.\n\n The usual caution for verifying equality with floating point numbers is\n advised.\n\n Parameters\n ----------\n x : array_like\n The actual object to check.\n y : array_like\n The desired, expected object.\n err_msg : str, optional\n The error message to be printed in case of failure.\n verbose : bool, optional\n If True, the conflicting values are appended to the error message.\n\n Raises\n ------\n AssertionError\n If actual and desired objects are not equal.\n\n See Also\n --------\n assert_allclose: Compare two array_like objects for equality with desired\n relative and/or absolute precision.\n assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal\n\n Examples\n --------\n The first assert does not raise an exception:\n\n >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],\n ... [np.exp(0),2.33333, np.nan])\n\n Assert fails with numerical inprecision with floats:\n\n >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],\n ... [1, np.sqrt(np.pi)**2, np.nan])\n ...\n <type 'exceptions.ValueError'>:\n AssertionError:\n Arrays are not equal\n <BLANKLINE>\n (mismatch 50.0%)\n x: array([ 1. , 3.14159265, NaN])\n y: array([ 1. , 3.14159265, NaN])\n\n Use `assert_allclose` or one of the nulp (number of floating point values)\n functions for these cases instead:\n\n >>> np.testing.assert_allclose([1.0,np.pi,np.nan],\n ... [1, np.sqrt(np.pi)**2, np.nan],\n ... rtol=1e-10, atol=0)\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,\n verbose=verbose, header='Arrays are not equal')\n\n\ndef assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):\n \"\"\"\n Raises an AssertionError if two objects are not equal up to desired\n precision.\n\n .. note:: It is recommended to use one of `assert_allclose`,\n `assert_array_almost_equal_nulp` or `assert_array_max_ulp`\n instead of this function for more consistent floating point\n comparisons.\n\n The test verifies identical shapes and that the elements of ``actual`` and\n ``desired`` satisfy.\n\n ``abs(desired-actual) < 1.5 * 10**(-decimal)``\n\n That is a looser test than originally documented, but agrees with what the\n actual implementation did up to rounding vagaries. An exception is raised\n at shape mismatch or conflicting values. In contrast to the standard usage\n in numpy, NaNs are compared like numbers, no assertion is raised if both\n objects have NaNs in the same positions.\n\n Parameters\n ----------\n x : array_like\n The actual object to check.\n y : array_like\n The desired, expected object.\n decimal : int, optional\n Desired precision, default is 6.\n err_msg : str, optional\n The error message to be printed in case of failure.\n verbose : bool, optional\n If True, the conflicting values are appended to the error message.\n\n Raises\n ------\n AssertionError\n If actual and desired are not equal up to specified precision.\n\n See Also\n --------\n assert_allclose: Compare two array_like objects for equality with desired\n relative and/or absolute precision.\n assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal\n\n Examples\n --------\n the first assert does not raise an exception\n\n >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],\n [1.0,2.333,np.nan])\n\n >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],\n ... [1.0,2.33339,np.nan], decimal=5)\n ...\n <type 'exceptions.AssertionError'>:\n AssertionError:\n Arrays are not almost equal\n <BLANKLINE>\n (mismatch 50.0%)\n x: array([ 1. , 2.33333, NaN])\n y: array([ 1. , 2.33339, NaN])\n\n >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],\n ... [1.0,2.33333, 5], decimal=5)\n <type 'exceptions.ValueError'>:\n ValueError:\n Arrays are not almost equal\n x: array([ 1. , 2.33333, NaN])\n y: array([ 1. , 2.33333, 5. ])\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n from numpy.core import around, number, float_, result_type, array\n from numpy.core.numerictypes import issubdtype\n from numpy.core.fromnumeric import any as npany\n\n def compare(x, y):\n try:\n if npany(gisinf(x)) or npany( gisinf(y)):\n xinfid = gisinf(x)\n yinfid = gisinf(y)\n if not (xinfid == yinfid).all():\n return False\n # if one item, x and y is +- inf\n if x.size == y.size == 1:\n return x == y\n x = x[~xinfid]\n y = y[~yinfid]\n except (TypeError, NotImplementedError):\n pass\n\n # make sure y is an inexact type to avoid abs(MIN_INT); will cause\n # casting of x later.\n dtype = result_type(y, 1.)\n y = array(y, dtype=dtype, copy=False, subok=True)\n z = abs(x - y)\n\n if not issubdtype(z.dtype, number):\n z = z.astype(float_) # handle object arrays\n\n return z < 1.5 * 10.0**(-decimal)\n\n assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,\n header=('Arrays are not almost equal to %d decimals' % decimal),\n precision=decimal)\n\n\ndef assert_array_less(x, y, err_msg='', verbose=True):\n \"\"\"\n Raises an AssertionError if two array_like objects are not ordered by less\n than.\n\n Given two array_like objects, check that the shape is equal and all\n elements of the first object are strictly smaller than those of the\n second object. An exception is raised at shape mismatch or incorrectly\n ordered values. Shape mismatch does not raise if an object has zero\n dimension. In contrast to the standard usage in numpy, NaNs are\n compared, no assertion is raised if both objects have NaNs in the same\n positions.\n\n\n\n Parameters\n ----------\n x : array_like\n The smaller object to check.\n y : array_like\n The larger object to compare.\n err_msg : string\n The error message to be printed in case of failure.\n verbose : bool\n If True, the conflicting values are appended to the error message.\n\n Raises\n ------\n AssertionError\n If actual and desired objects are not equal.\n\n See Also\n --------\n assert_array_equal: tests objects for equality\n assert_array_almost_equal: test objects for equality up to precision\n\n\n\n Examples\n --------\n >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])\n >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])\n ...\n <type 'exceptions.ValueError'>:\n Arrays are not less-ordered\n (mismatch 50.0%)\n x: array([ 1., 1., NaN])\n y: array([ 1., 2., NaN])\n\n >>> np.testing.assert_array_less([1.0, 4.0], 3)\n ...\n <type 'exceptions.ValueError'>:\n Arrays are not less-ordered\n (mismatch 50.0%)\n x: array([ 1., 4.])\n y: array(3)\n\n >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])\n ...\n <type 'exceptions.ValueError'>:\n Arrays are not less-ordered\n (shapes (3,), (1,) mismatch)\n x: array([ 1., 2., 3.])\n y: array([4])\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,\n verbose=verbose,\n header='Arrays are not less-ordered',\n equal_inf=False)\n\n\ndef runstring(astr, dict):\n exec(astr, dict)\n\n\ndef assert_string_equal(actual, desired):\n \"\"\"\n Test if two strings are equal.\n\n If the given strings are equal, `assert_string_equal` does nothing.\n If they are not equal, an AssertionError is raised, and the diff\n between the strings is shown.\n\n Parameters\n ----------\n actual : str\n The string to test for equality against the expected string.\n desired : str\n The expected string.\n\n Examples\n --------\n >>> np.testing.assert_string_equal('abc', 'abc')\n >>> np.testing.assert_string_equal('abc', 'abcd')\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n ...\n AssertionError: Differences in strings:\n - abc+ abcd? +\n\n \"\"\"\n # delay import of difflib to reduce startup time\n __tracebackhide__ = True # Hide traceback for py.test\n import difflib\n\n if not isinstance(actual, str):\n raise AssertionError(repr(type(actual)))\n if not isinstance(desired, str):\n raise AssertionError(repr(type(desired)))\n if re.match(r'\\A'+desired+r'\\Z', actual, re.M):\n return\n\n diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))\n diff_list = []\n while diff:\n d1 = diff.pop(0)\n if d1.startswith(' '):\n continue\n if d1.startswith('- '):\n l = [d1]\n d2 = diff.pop(0)\n if d2.startswith('? '):\n l.append(d2)\n d2 = diff.pop(0)\n if not d2.startswith('+ '):\n raise AssertionError(repr(d2))\n l.append(d2)\n if diff:\n d3 = diff.pop(0)\n if d3.startswith('? '):\n l.append(d3)\n else:\n diff.insert(0, d3)\n if re.match(r'\\A'+d2[2:]+r'\\Z', d1[2:]):\n continue\n diff_list.extend(l)\n continue\n raise AssertionError(repr(d1))\n if not diff_list:\n return\n msg = 'Differences in strings:\\n%s' % (''.join(diff_list)).rstrip()\n if actual != desired:\n raise AssertionError(msg)\n\n\ndef rundocs(filename=None, raise_on_error=True):\n \"\"\"\n Run doctests found in the given file.\n\n By default `rundocs` raises an AssertionError on failure.\n\n Parameters\n ----------\n filename : str\n The path to the file for which the doctests are run.\n raise_on_error : bool\n Whether to raise an AssertionError when a doctest fails. Default is\n True.\n\n Notes\n -----\n The doctests can be run by the user/developer by adding the ``doctests``\n argument to the ``test()`` call. For example, to run all tests (including\n doctests) for `numpy.lib`:\n\n >>> np.lib.test(doctests=True) #doctest: +SKIP\n \"\"\"\n from numpy.compat import npy_load_module\n import doctest\n if filename is None:\n f = sys._getframe(1)\n filename = f.f_globals['__file__']\n name = os.path.splitext(os.path.basename(filename))[0]\n m = npy_load_module(name, filename)\n\n tests = doctest.DocTestFinder().find(m)\n runner = doctest.DocTestRunner(verbose=False)\n\n msg = []\n if raise_on_error:\n out = lambda s: msg.append(s)\n else:\n out = None\n\n for test in tests:\n runner.run(test, out=out)\n\n if runner.failures > 0 and raise_on_error:\n raise AssertionError(\"Some doctests failed:\\n%s\" % \"\\n\".join(msg))\n\n\ndef raises(*args):\n \"\"\"Decorator to check for raised exceptions.\n\n The decorated test function must raise one of the passed exceptions to\n pass. If you want to test many assertions about exceptions in a single\n test, you may want to use `assert_raises` instead.\n\n .. warning::\n This decorator is nose specific, do not use it if you are using a\n different test framework.\n\n Parameters\n ----------\n args : exceptions\n The test passes if any of the passed exceptions is raised.\n\n Raises\n ------\n AssertionError\n\n Examples\n --------\n\n Usage::\n\n @raises(TypeError, ValueError)\n def test_raises_type_error():\n raise TypeError(\"This test passes\")\n\n @raises(Exception)\n def test_that_fails_by_passing():\n pass\n\n \"\"\"\n nose = import_nose()\n return nose.tools.raises(*args)\n\n#\n# assert_raises and assert_raises_regex are taken from unittest.\n#\nimport unittest\n\n\nclass _Dummy(unittest.TestCase):\n def nop(self):\n pass\n\n_d = _Dummy('nop')\n\ndef assert_raises(*args, **kwargs):\n \"\"\"\n assert_raises(exception_class, callable, *args, **kwargs)\n assert_raises(exception_class)\n\n Fail unless an exception of class exception_class is thrown\n by callable when invoked with arguments args and keyword\n arguments kwargs. If a different type of exception is\n thrown, it will not be caught, and the test case will be\n deemed to have suffered an error, exactly as for an\n unexpected exception.\n\n Alternatively, `assert_raises` can be used as a context manager:\n\n >>> from numpy.testing import assert_raises\n >>> with assert_raises(ZeroDivisionError):\n ... 1 / 0\n\n is equivalent to\n\n >>> def div(x, y):\n ... return x / y\n >>> assert_raises(ZeroDivisionError, div, 1, 0)\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n return _d.assertRaises(*args,**kwargs)\n\n\ndef assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):\n \"\"\"\n assert_raises_regex(exception_class, expected_regexp, callable, *args,\n **kwargs)\n assert_raises_regex(exception_class, expected_regexp)\n\n Fail unless an exception of class exception_class and with message that\n matches expected_regexp is thrown by callable when invoked with arguments\n args and keyword arguments kwargs.\n\n Alternatively, can be used as a context manager like `assert_raises`.\n\n Name of this function adheres to Python 3.2+ reference, but should work in\n all versions down to 2.6.\n\n Notes\n -----\n .. versionadded:: 1.9.0\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n\n if sys.version_info.major >= 3:\n funcname = _d.assertRaisesRegex\n else:\n # Only present in Python 2.7, missing from unittest in 2.6\n funcname = _d.assertRaisesRegexp\n\n return funcname(exception_class, expected_regexp, *args, **kwargs)\n\n\ndef decorate_methods(cls, decorator, testmatch=None):\n \"\"\"\n Apply a decorator to all methods in a class matching a regular expression.\n\n The given decorator is applied to all public methods of `cls` that are\n matched by the regular expression `testmatch`\n (``testmatch.search(methodname)``). Methods that are private, i.e. start\n with an underscore, are ignored.\n\n Parameters\n ----------\n cls : class\n Class whose methods to decorate.\n decorator : function\n Decorator to apply to methods\n testmatch : compiled regexp or str, optional\n The regular expression. Default value is None, in which case the\n nose default (``re.compile(r'(?:^|[\\\\b_\\\\.%s-])[Tt]est' % os.sep)``)\n is used.\n If `testmatch` is a string, it is compiled to a regular expression\n first.\n\n \"\"\"\n if testmatch is None:\n testmatch = re.compile(r'(?:^|[\\\\b_\\\\.%s-])[Tt]est' % os.sep)\n else:\n testmatch = re.compile(testmatch)\n cls_attr = cls.__dict__\n\n # delayed import to reduce startup time\n from inspect import isfunction\n\n methods = [_m for _m in cls_attr.values() if isfunction(_m)]\n for function in methods:\n try:\n if hasattr(function, 'compat_func_name'):\n funcname = function.compat_func_name\n else:\n funcname = function.__name__\n except AttributeError:\n # not a function\n continue\n if testmatch.search(funcname) and not funcname.startswith('_'):\n setattr(cls, funcname, decorator(function))\n return\n\n\ndef measure(code_str,times=1,label=None):\n \"\"\"\n Return elapsed time for executing code in the namespace of the caller.\n\n The supplied code string is compiled with the Python builtin ``compile``.\n The precision of the timing is 10 milli-seconds. If the code will execute\n fast on this timescale, it can be executed many times to get reasonable\n timing accuracy.\n\n Parameters\n ----------\n code_str : str\n The code to be timed.\n times : int, optional\n The number of times the code is executed. Default is 1. The code is\n only compiled once.\n label : str, optional\n A label to identify `code_str` with. This is passed into ``compile``\n as the second argument (for run-time error messages).\n\n Returns\n -------\n elapsed : float\n Total elapsed time in seconds for executing `code_str` `times` times.\n\n Examples\n --------\n >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',\n ... times=times)\n >>> print(\"Time for a single execution : \", etime / times, \"s\")\n Time for a single execution : 0.005 s\n\n \"\"\"\n frame = sys._getframe(1)\n locs, globs = frame.f_locals, frame.f_globals\n\n code = compile(code_str,\n 'Test name: %s ' % label,\n 'exec')\n i = 0\n elapsed = jiffies()\n while i < times:\n i += 1\n exec(code, globs, locs)\n elapsed = jiffies() - elapsed\n return 0.01*elapsed\n\n\ndef _assert_valid_refcount(op):\n \"\"\"\n Check that ufuncs don't mishandle refcount of object `1`.\n Used in a few regression tests.\n \"\"\"\n if not HAS_REFCOUNT:\n return True\n import numpy as np, gc\n\n b = np.arange(100*100).reshape(100, 100)\n c = b\n i = 1\n\n gc.disable()\n try:\n rc = sys.getrefcount(i)\n for j in range(15):\n d = op(b, c)\n assert_(sys.getrefcount(i) >= rc)\n finally:\n gc.enable()\n del d # for pyflakes\n\n\ndef assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,\n err_msg='', verbose=True):\n \"\"\"\n Raises an AssertionError if two objects are not equal up to desired\n tolerance.\n\n The test is equivalent to ``allclose(actual, desired, rtol, atol)``.\n It compares the difference between `actual` and `desired` to\n ``atol + rtol * abs(desired)``.\n\n .. versionadded:: 1.5.0\n\n Parameters\n ----------\n actual : array_like\n Array obtained.\n desired : array_like\n Array desired.\n rtol : float, optional\n Relative tolerance.\n atol : float, optional\n Absolute tolerance.\n equal_nan : bool, optional.\n If True, NaNs will compare equal.\n err_msg : str, optional\n The error message to be printed in case of failure.\n verbose : bool, optional\n If True, the conflicting values are appended to the error message.\n\n Raises\n ------\n AssertionError\n If actual and desired are not equal up to specified precision.\n\n See Also\n --------\n assert_array_almost_equal_nulp, assert_array_max_ulp\n\n Examples\n --------\n >>> x = [1e-5, 1e-3, 1e-1]\n >>> y = np.arccos(np.cos(x))\n >>> assert_allclose(x, y, rtol=1e-5, atol=0)\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n import numpy as np\n\n def compare(x, y):\n return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,\n equal_nan=equal_nan)\n\n actual, desired = np.asanyarray(actual), np.asanyarray(desired)\n header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)\n assert_array_compare(compare, actual, desired, err_msg=str(err_msg),\n verbose=verbose, header=header, equal_nan=equal_nan)\n\n\ndef assert_array_almost_equal_nulp(x, y, nulp=1):\n \"\"\"\n Compare two arrays relatively to their spacing.\n\n This is a relatively robust method to compare two arrays whose amplitude\n is variable.\n\n Parameters\n ----------\n x, y : array_like\n Input arrays.\n nulp : int, optional\n The maximum number of unit in the last place for tolerance (see Notes).\n Default is 1.\n\n Returns\n -------\n None\n\n Raises\n ------\n AssertionError\n If the spacing between `x` and `y` for one or more elements is larger\n than `nulp`.\n\n See Also\n --------\n assert_array_max_ulp : Check that all items of arrays differ in at most\n N Units in the Last Place.\n spacing : Return the distance between x and the nearest adjacent number.\n\n Notes\n -----\n An assertion is raised if the following condition is not met::\n\n abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))\n\n Examples\n --------\n >>> x = np.array([1., 1e-10, 1e-20])\n >>> eps = np.finfo(x.dtype).eps\n >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)\n\n >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)\n Traceback (most recent call last):\n ...\n AssertionError: X and Y are not equal to 1 ULP (max is 2)\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n import numpy as np\n ax = np.abs(x)\n ay = np.abs(y)\n ref = nulp * np.spacing(np.where(ax > ay, ax, ay))\n if not np.all(np.abs(x-y) <= ref):\n if np.iscomplexobj(x) or np.iscomplexobj(y):\n msg = \"X and Y are not equal to %d ULP\" % nulp\n else:\n max_nulp = np.max(nulp_diff(x, y))\n msg = \"X and Y are not equal to %d ULP (max is %g)\" % (nulp, max_nulp)\n raise AssertionError(msg)\n\n\ndef assert_array_max_ulp(a, b, maxulp=1, dtype=None):\n \"\"\"\n Check that all items of arrays differ in at most N Units in the Last Place.\n\n Parameters\n ----------\n a, b : array_like\n Input arrays to be compared.\n maxulp : int, optional\n The maximum number of units in the last place that elements of `a` and\n `b` can differ. Default is 1.\n dtype : dtype, optional\n Data-type to convert `a` and `b` to if given. Default is None.\n\n Returns\n -------\n ret : ndarray\n Array containing number of representable floating point numbers between\n items in `a` and `b`.\n\n Raises\n ------\n AssertionError\n If one or more elements differ by more than `maxulp`.\n\n See Also\n --------\n assert_array_almost_equal_nulp : Compare two arrays relatively to their\n spacing.\n\n Examples\n --------\n >>> a = np.linspace(0., 1., 100)\n >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))\n\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n import numpy as np\n ret = nulp_diff(a, b, dtype)\n if not np.all(ret <= maxulp):\n raise AssertionError(\"Arrays are not almost equal up to %g ULP\" %\n maxulp)\n return ret\n\n\ndef nulp_diff(x, y, dtype=None):\n \"\"\"For each item in x and y, return the number of representable floating\n points between them.\n\n Parameters\n ----------\n x : array_like\n first input array\n y : array_like\n second input array\n dtype : dtype, optional\n Data-type to convert `x` and `y` to if given. Default is None.\n\n Returns\n -------\n nulp : array_like\n number of representable floating point numbers between each item in x\n and y.\n\n Examples\n --------\n # By definition, epsilon is the smallest number such as 1 + eps != 1, so\n # there should be exactly one ULP between 1 and 1 + eps\n >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)\n 1.0\n \"\"\"\n import numpy as np\n if dtype:\n x = np.array(x, dtype=dtype)\n y = np.array(y, dtype=dtype)\n else:\n x = np.array(x)\n y = np.array(y)\n\n t = np.common_type(x, y)\n if np.iscomplexobj(x) or np.iscomplexobj(y):\n raise NotImplementedError(\"_nulp not implemented for complex array\")\n\n x = np.array(x, dtype=t)\n y = np.array(y, dtype=t)\n\n if not x.shape == y.shape:\n raise ValueError(\"x and y do not have the same shape: %s - %s\" %\n (x.shape, y.shape))\n\n def _diff(rx, ry, vdt):\n diff = np.array(rx-ry, dtype=vdt)\n return np.abs(diff)\n\n rx = integer_repr(x)\n ry = integer_repr(y)\n return _diff(rx, ry, t)\n\n\ndef _integer_repr(x, vdt, comp):\n # Reinterpret binary representation of the float as sign-magnitude:\n # take into account two-complement representation\n # See also\n # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm\n rx = x.view(vdt)\n if not (rx.size == 1):\n rx[rx < 0] = comp - rx[rx < 0]\n else:\n if rx < 0:\n rx = comp - rx\n\n return rx\n\n\ndef integer_repr(x):\n \"\"\"Return the signed-magnitude interpretation of the binary representation of\n x.\"\"\"\n import numpy as np\n if x.dtype == np.float16:\n return _integer_repr(x, np.int16, np.int16(-2**15))\n elif x.dtype == np.float32:\n return _integer_repr(x, np.int32, np.int32(-2**31))\n elif x.dtype == np.float64:\n return _integer_repr(x, np.int64, np.int64(-2**63))\n else:\n raise ValueError(\"Unsupported dtype %s\" % x.dtype)\n\n\[email protected]\ndef _assert_warns_context(warning_class, name=None):\n __tracebackhide__ = True # Hide traceback for py.test\n with suppress_warnings() as sup:\n l = sup.record(warning_class)\n yield\n if not len(l) > 0:\n name_str = \" when calling %s\" % name if name is not None else \"\"\n raise AssertionError(\"No warning raised\" + name_str)\n\n\ndef assert_warns(warning_class, *args, **kwargs):\n \"\"\"\n Fail unless the given callable throws the specified warning.\n\n A warning of class warning_class should be thrown by the callable when\n invoked with arguments args and keyword arguments kwargs.\n If a different type of warning is thrown, it will not be caught.\n\n If called with all arguments other than the warning class omitted, may be\n used as a context manager:\n\n with assert_warns(SomeWarning):\n do_something()\n\n The ability to be used as a context manager is new in NumPy v1.11.0.\n\n .. versionadded:: 1.4.0\n\n Parameters\n ----------\n warning_class : class\n The class defining the warning that `func` is expected to throw.\n func : callable\n The callable to test.\n \\\\*args : Arguments\n Arguments passed to `func`.\n \\\\*\\\\*kwargs : Kwargs\n Keyword arguments passed to `func`.\n\n Returns\n -------\n The value returned by `func`.\n\n \"\"\"\n if not args:\n return _assert_warns_context(warning_class)\n\n func = args[0]\n args = args[1:]\n with _assert_warns_context(warning_class, name=func.__name__):\n return func(*args, **kwargs)\n\n\[email protected]\ndef _assert_no_warnings_context(name=None):\n __tracebackhide__ = True # Hide traceback for py.test\n with warnings.catch_warnings(record=True) as l:\n warnings.simplefilter('always')\n yield\n if len(l) > 0:\n name_str = \" when calling %s\" % name if name is not None else \"\"\n raise AssertionError(\"Got warnings%s: %s\" % (name_str, l))\n\n\ndef assert_no_warnings(*args, **kwargs):\n \"\"\"\n Fail if the given callable produces any warnings.\n\n If called with all arguments omitted, may be used as a context manager:\n\n with assert_no_warnings():\n do_something()\n\n The ability to be used as a context manager is new in NumPy v1.11.0.\n\n .. versionadded:: 1.7.0\n\n Parameters\n ----------\n func : callable\n The callable to test.\n \\\\*args : Arguments\n Arguments passed to `func`.\n \\\\*\\\\*kwargs : Kwargs\n Keyword arguments passed to `func`.\n\n Returns\n -------\n The value returned by `func`.\n\n \"\"\"\n if not args:\n return _assert_no_warnings_context()\n\n func = args[0]\n args = args[1:]\n with _assert_no_warnings_context(name=func.__name__):\n return func(*args, **kwargs)\n\n\ndef _gen_alignment_data(dtype=float32, type='binary', max_size=24):\n \"\"\"\n generator producing data with different alignment and offsets\n to test simd vectorization\n\n Parameters\n ----------\n dtype : dtype\n data type to produce\n type : string\n 'unary': create data for unary operations, creates one input\n and output array\n 'binary': create data for unary operations, creates two input\n and output array\n max_size : integer\n maximum size of data to produce\n\n Returns\n -------\n if type is 'unary' yields one output, one input array and a message\n containing information on the data\n if type is 'binary' yields one output array, two input array and a message\n containing information on the data\n\n \"\"\"\n ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'\n bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'\n for o in range(3):\n for s in range(o + 2, max(o + 3, max_size)):\n if type == 'unary':\n inp = lambda: arange(s, dtype=dtype)[o:]\n out = empty((s,), dtype=dtype)[o:]\n yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')\n d = inp()\n yield d, d, ufmt % (o, o, s, dtype, 'in place')\n yield out[1:], inp()[:-1], ufmt % \\\n (o + 1, o, s - 1, dtype, 'out of place')\n yield out[:-1], inp()[1:], ufmt % \\\n (o, o + 1, s - 1, dtype, 'out of place')\n yield inp()[:-1], inp()[1:], ufmt % \\\n (o, o + 1, s - 1, dtype, 'aliased')\n yield inp()[1:], inp()[:-1], ufmt % \\\n (o + 1, o, s - 1, dtype, 'aliased')\n if type == 'binary':\n inp1 = lambda: arange(s, dtype=dtype)[o:]\n inp2 = lambda: arange(s, dtype=dtype)[o:]\n out = empty((s,), dtype=dtype)[o:]\n yield out, inp1(), inp2(), bfmt % \\\n (o, o, o, s, dtype, 'out of place')\n d = inp1()\n yield d, d, inp2(), bfmt % \\\n (o, o, o, s, dtype, 'in place1')\n d = inp2()\n yield d, inp1(), d, bfmt % \\\n (o, o, o, s, dtype, 'in place2')\n yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \\\n (o + 1, o, o, s - 1, dtype, 'out of place')\n yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \\\n (o, o + 1, o, s - 1, dtype, 'out of place')\n yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \\\n (o, o, o + 1, s - 1, dtype, 'out of place')\n yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \\\n (o + 1, o, o, s - 1, dtype, 'aliased')\n yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \\\n (o, o + 1, o, s - 1, dtype, 'aliased')\n yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \\\n (o, o, o + 1, s - 1, dtype, 'aliased')\n\n\nclass IgnoreException(Exception):\n \"Ignoring this exception due to disabled feature\"\n pass\n\n\[email protected]\ndef tempdir(*args, **kwargs):\n \"\"\"Context manager to provide a temporary test folder.\n\n All arguments are passed as this to the underlying tempfile.mkdtemp\n function.\n\n \"\"\"\n tmpdir = mkdtemp(*args, **kwargs)\n try:\n yield tmpdir\n finally:\n shutil.rmtree(tmpdir)\n\n\[email protected]\ndef temppath(*args, **kwargs):\n \"\"\"Context manager for temporary files.\n\n Context manager that returns the path to a closed temporary file. Its\n parameters are the same as for tempfile.mkstemp and are passed directly\n to that function. The underlying file is removed when the context is\n exited, so it should be closed at that time.\n\n Windows does not allow a temporary file to be opened if it is already\n open, so the underlying file must be closed after opening before it\n can be opened again.\n\n \"\"\"\n fd, path = mkstemp(*args, **kwargs)\n os.close(fd)\n try:\n yield path\n finally:\n os.remove(path)\n\n\nclass clear_and_catch_warnings(warnings.catch_warnings):\n \"\"\" Context manager that resets warning registry for catching warnings\n\n Warnings can be slippery, because, whenever a warning is triggered, Python\n adds a ``__warningregistry__`` member to the *calling* module. This makes\n it impossible to retrigger the warning in this module, whatever you put in\n the warnings filters. This context manager accepts a sequence of `modules`\n as a keyword argument to its constructor and:\n\n * stores and removes any ``__warningregistry__`` entries in given `modules`\n on entry;\n * resets ``__warningregistry__`` to its previous state on exit.\n\n This makes it possible to trigger any warning afresh inside the context\n manager without disturbing the state of warnings outside.\n\n For compatibility with Python 3.0, please consider all arguments to be\n keyword-only.\n\n Parameters\n ----------\n record : bool, optional\n Specifies whether warnings should be captured by a custom\n implementation of ``warnings.showwarning()`` and be appended to a list\n returned by the context manager. Otherwise None is returned by the\n context manager. The objects appended to the list are arguments whose\n attributes mirror the arguments to ``showwarning()``.\n modules : sequence, optional\n Sequence of modules for which to reset warnings registry on entry and\n restore on exit. To work correctly, all 'ignore' filters should\n filter by one of these modules.\n\n Examples\n --------\n >>> import warnings\n >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):\n ... warnings.simplefilter('always')\n ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')\n ... # do something that raises a warning but ignore those in\n ... # np.core.fromnumeric\n \"\"\"\n class_modules = ()\n\n def __init__(self, record=False, modules=()):\n self.modules = set(modules).union(self.class_modules)\n self._warnreg_copies = {}\n super(clear_and_catch_warnings, self).__init__(record=record)\n\n def __enter__(self):\n for mod in self.modules:\n if hasattr(mod, '__warningregistry__'):\n mod_reg = mod.__warningregistry__\n self._warnreg_copies[mod] = mod_reg.copy()\n mod_reg.clear()\n return super(clear_and_catch_warnings, self).__enter__()\n\n def __exit__(self, *exc_info):\n super(clear_and_catch_warnings, self).__exit__(*exc_info)\n for mod in self.modules:\n if hasattr(mod, '__warningregistry__'):\n mod.__warningregistry__.clear()\n if mod in self._warnreg_copies:\n mod.__warningregistry__.update(self._warnreg_copies[mod])\n\n\nclass suppress_warnings(object):\n \"\"\"\n Context manager and decorator doing much the same as\n ``warnings.catch_warnings``.\n\n However, it also provides a filter mechanism to work around\n http://bugs.python.org/issue4180.\n\n This bug causes Python before 3.4 to not reliably show warnings again\n after they have been ignored once (even within catch_warnings). It\n means that no \"ignore\" filter can be used easily, since following\n tests might need to see the warning. Additionally it allows easier\n specificity for testing warnings and can be nested.\n\n Parameters\n ----------\n forwarding_rule : str, optional\n One of \"always\", \"once\", \"module\", or \"location\". Analogous to\n the usual warnings module filter mode, it is useful to reduce\n noise mostly on the outmost level. Unsuppressed and unrecorded\n warnings will be forwarded based on this rule. Defaults to \"always\".\n \"location\" is equivalent to the warnings \"default\", match by exact\n location the warning warning originated from.\n\n Notes\n -----\n Filters added inside the context manager will be discarded again\n when leaving it. Upon entering all filters defined outside a\n context will be applied automatically.\n\n When a recording filter is added, matching warnings are stored in the\n ``log`` attribute as well as in the list returned by ``record``.\n\n If filters are added and the ``module`` keyword is given, the\n warning registry of this module will additionally be cleared when\n applying it, entering the context, or exiting it. This could cause\n warnings to appear a second time after leaving the context if they\n were configured to be printed once (default) and were already\n printed before the context was entered.\n\n Nesting this context manager will work as expected when the\n forwarding rule is \"always\" (default). Unfiltered and unrecorded\n warnings will be passed out and be matched by the outer level.\n On the outmost level they will be printed (or caught by another\n warnings context). The forwarding rule argument can modify this\n behaviour.\n\n Like ``catch_warnings`` this context manager is not threadsafe.\n\n Examples\n --------\n >>> with suppress_warnings() as sup:\n ... sup.filter(DeprecationWarning, \"Some text\")\n ... sup.filter(module=np.ma.core)\n ... log = sup.record(FutureWarning, \"Does this occur?\")\n ... command_giving_warnings()\n ... # The FutureWarning was given once, the filtered warnings were\n ... # ignored. All other warnings abide outside settings (may be\n ... # printed/error)\n ... assert_(len(log) == 1)\n ... assert_(len(sup.log) == 1) # also stored in log attribute\n\n Or as a decorator:\n\n >>> sup = suppress_warnings()\n >>> sup.filter(module=np.ma.core) # module must match exact\n >>> @sup\n >>> def some_function():\n ... # do something which causes a warning in np.ma.core\n ... pass\n \"\"\"\n def __init__(self, forwarding_rule=\"always\"):\n self._entered = False\n\n # Suppressions are either instance or defined inside one with block:\n self._suppressions = []\n\n if forwarding_rule not in {\"always\", \"module\", \"once\", \"location\"}:\n raise ValueError(\"unsupported forwarding rule.\")\n self._forwarding_rule = forwarding_rule\n\n def _clear_registries(self):\n if hasattr(warnings, \"_filters_mutated\"):\n # clearing the registry should not be necessary on new pythons,\n # instead the filters should be mutated.\n warnings._filters_mutated()\n return\n # Simply clear the registry, this should normally be harmless,\n # note that on new pythons it would be invalidated anyway.\n for module in self._tmp_modules:\n if hasattr(module, \"__warningregistry__\"):\n module.__warningregistry__.clear()\n\n def _filter(self, category=Warning, message=\"\", module=None, record=False):\n if record:\n record = [] # The log where to store warnings\n else:\n record = None\n if self._entered:\n if module is None:\n warnings.filterwarnings(\n \"always\", category=category, message=message)\n else:\n module_regex = module.__name__.replace('.', r'\\.') + '$'\n warnings.filterwarnings(\n \"always\", category=category, message=message,\n module=module_regex)\n self._tmp_modules.add(module)\n self._clear_registries()\n\n self._tmp_suppressions.append(\n (category, message, re.compile(message, re.I), module, record))\n else:\n self._suppressions.append(\n (category, message, re.compile(message, re.I), module, record))\n\n return record\n\n def filter(self, category=Warning, message=\"\", module=None):\n \"\"\"\n Add a new suppressing filter or apply it if the state is entered.\n\n Parameters\n ----------\n category : class, optional\n Warning class to filter\n message : string, optional\n Regular expression matching the warning message.\n module : module, optional\n Module to filter for. Note that the module (and its file)\n must match exactly and cannot be a submodule. This may make\n it unreliable for external modules.\n\n Notes\n -----\n When added within a context, filters are only added inside\n the context and will be forgotten when the context is exited.\n \"\"\"\n self._filter(category=category, message=message, module=module,\n record=False)\n\n def record(self, category=Warning, message=\"\", module=None):\n \"\"\"\n Append a new recording filter or apply it if the state is entered.\n\n All warnings matching will be appended to the ``log`` attribute.\n\n Parameters\n ----------\n category : class, optional\n Warning class to filter\n message : string, optional\n Regular expression matching the warning message.\n module : module, optional\n Module to filter for. Note that the module (and its file)\n must match exactly and cannot be a submodule. This may make\n it unreliable for external modules.\n\n Returns\n -------\n log : list\n A list which will be filled with all matched warnings.\n\n Notes\n -----\n When added within a context, filters are only added inside\n the context and will be forgotten when the context is exited.\n \"\"\"\n return self._filter(category=category, message=message, module=module,\n record=True)\n\n def __enter__(self):\n if self._entered:\n raise RuntimeError(\"cannot enter suppress_warnings twice.\")\n\n self._orig_show = warnings.showwarning\n self._filters = warnings.filters\n warnings.filters = self._filters[:]\n\n self._entered = True\n self._tmp_suppressions = []\n self._tmp_modules = set()\n self._forwarded = set()\n\n self.log = [] # reset global log (no need to keep same list)\n\n for cat, mess, _, mod, log in self._suppressions:\n if log is not None:\n del log[:] # clear the log\n if mod is None:\n warnings.filterwarnings(\n \"always\", category=cat, message=mess)\n else:\n module_regex = mod.__name__.replace('.', r'\\.') + '$'\n warnings.filterwarnings(\n \"always\", category=cat, message=mess,\n module=module_regex)\n self._tmp_modules.add(mod)\n warnings.showwarning = self._showwarning\n self._clear_registries()\n\n return self\n\n def __exit__(self, *exc_info):\n warnings.showwarning = self._orig_show\n warnings.filters = self._filters\n self._clear_registries()\n self._entered = False\n del self._orig_show\n del self._filters\n\n def _showwarning(self, message, category, filename, lineno,\n *args, **kwargs):\n use_warnmsg = kwargs.pop(\"use_warnmsg\", None)\n for cat, _, pattern, mod, rec in (\n self._suppressions + self._tmp_suppressions)[::-1]:\n if (issubclass(category, cat) and\n pattern.match(message.args[0]) is not None):\n if mod is None:\n # Message and category match, either recorded or ignored\n if rec is not None:\n msg = WarningMessage(message, category, filename,\n lineno, **kwargs)\n self.log.append(msg)\n rec.append(msg)\n return\n # Use startswith, because warnings strips the c or o from\n # .pyc/.pyo files.\n elif mod.__file__.startswith(filename):\n # The message and module (filename) match\n if rec is not None:\n msg = WarningMessage(message, category, filename,\n lineno, **kwargs)\n self.log.append(msg)\n rec.append(msg)\n return\n\n # There is no filter in place, so pass to the outside handler\n # unless we should only pass it once\n if self._forwarding_rule == \"always\":\n if use_warnmsg is None:\n self._orig_show(message, category, filename, lineno,\n *args, **kwargs)\n else:\n self._orig_showmsg(use_warnmsg)\n return\n\n if self._forwarding_rule == \"once\":\n signature = (message.args, category)\n elif self._forwarding_rule == \"module\":\n signature = (message.args, category, filename)\n elif self._forwarding_rule == \"location\":\n signature = (message.args, category, filename, lineno)\n\n if signature in self._forwarded:\n return\n self._forwarded.add(signature)\n if use_warnmsg is None:\n self._orig_show(message, category, filename, lineno, *args,\n **kwargs)\n else:\n self._orig_showmsg(use_warnmsg)\n\n def __call__(self, func):\n \"\"\"\n Function decorator to apply certain suppressions to a whole\n function.\n \"\"\"\n @wraps(func)\n def new_func(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n\n return new_func\n\n\[email protected]\ndef _assert_no_gc_cycles_context(name=None):\n __tracebackhide__ = True # Hide traceback for py.test\n\n # not meaningful to test if there is no refcounting\n if not HAS_REFCOUNT:\n return\n\n assert_(gc.isenabled())\n gc.disable()\n gc_debug = gc.get_debug()\n try:\n for i in range(100):\n if gc.collect() == 0:\n break\n else:\n raise RuntimeError(\n \"Unable to fully collect garbage - perhaps a __del__ method is \"\n \"creating more reference cycles?\")\n\n gc.set_debug(gc.DEBUG_SAVEALL)\n yield\n # gc.collect returns the number of unreachable objects in cycles that\n # were found -- we are checking that no cycles were created in the context\n n_objects_in_cycles = gc.collect()\n objects_in_cycles = gc.garbage[:]\n finally:\n del gc.garbage[:]\n gc.set_debug(gc_debug)\n gc.enable()\n\n if n_objects_in_cycles:\n name_str = \" when calling %s\" % name if name is not None else \"\"\n raise AssertionError(\n \"Reference cycles were found{}: {} objects were collected, \"\n \"of which {} are shown below:{}\"\n .format(\n name_str,\n n_objects_in_cycles,\n len(objects_in_cycles),\n ''.join(\n \"\\n {} object with id={}:\\n {}\".format(\n type(o).__name__,\n id(o),\n pprint.pformat(o).replace('\\n', '\\n ')\n ) for o in objects_in_cycles\n )\n )\n )\n\n\ndef assert_no_gc_cycles(*args, **kwargs):\n \"\"\"\n Fail if the given callable produces any reference cycles.\n\n If called with all arguments omitted, may be used as a context manager:\n\n with assert_no_gc_cycles():\n do_something()\n\n .. versionadded:: 1.15.0\n\n Parameters\n ----------\n func : callable\n The callable to test.\n \\\\*args : Arguments\n Arguments passed to `func`.\n \\\\*\\\\*kwargs : Kwargs\n Keyword arguments passed to `func`.\n\n Returns\n -------\n Nothing. The result is deliberately discarded to ensure that all cycles\n are found.\n\n \"\"\"\n if not args:\n return _assert_no_gc_cycles_context()\n\n func = args[0]\n args = args[1:]\n with _assert_no_gc_cycles_context(name=func.__name__):\n func(*args, **kwargs)\n", "\"\"\"\nHistogram-related functions\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport operator\nimport warnings\n\nimport numpy as np\nfrom numpy.compat.py3k import basestring\n\n__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']\n\n# range is a keyword argument to many functions, so save the builtin so they can\n# use it.\n_range = range\n\n\ndef _hist_bin_sqrt(x):\n \"\"\"\n Square root histogram bin estimator.\n\n Bin width is inversely proportional to the data size. Used by many\n programs for its simplicity.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n return x.ptp() / np.sqrt(x.size)\n\n\ndef _hist_bin_sturges(x):\n \"\"\"\n Sturges histogram bin estimator.\n\n A very simplistic estimator based on the assumption of normality of\n the data. This estimator has poor performance for non-normal data,\n which becomes especially obvious for large data sets. The estimate\n depends only on size of the data.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n return x.ptp() / (np.log2(x.size) + 1.0)\n\n\ndef _hist_bin_rice(x):\n \"\"\"\n Rice histogram bin estimator.\n\n Another simple estimator with no normality assumption. It has better\n performance for large data than Sturges, but tends to overestimate\n the number of bins. The number of bins is proportional to the cube\n root of data size (asymptotically optimal). The estimate depends\n only on size of the data.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n return x.ptp() / (2.0 * x.size ** (1.0 / 3))\n\n\ndef _hist_bin_scott(x):\n \"\"\"\n Scott histogram bin estimator.\n\n The binwidth is proportional to the standard deviation of the data\n and inversely proportional to the cube root of data size\n (asymptotically optimal).\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)\n\n\ndef _hist_bin_doane(x):\n \"\"\"\n Doane's histogram bin estimator.\n\n Improved version of Sturges' formula which works better for\n non-normal data. See\n stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n if x.size > 2:\n sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))\n sigma = np.std(x)\n if sigma > 0.0:\n # These three operations add up to\n # g1 = np.mean(((x - np.mean(x)) / sigma)**3)\n # but use only one temp array instead of three\n temp = x - np.mean(x)\n np.true_divide(temp, sigma, temp)\n np.power(temp, 3, temp)\n g1 = np.mean(temp)\n return x.ptp() / (1.0 + np.log2(x.size) +\n np.log2(1.0 + np.absolute(g1) / sg1))\n return 0.0\n\n\ndef _hist_bin_fd(x):\n \"\"\"\n The Freedman-Diaconis histogram bin estimator.\n\n The Freedman-Diaconis rule uses interquartile range (IQR) to\n estimate binwidth. It is considered a variation of the Scott rule\n with more robustness as the IQR is less affected by outliers than\n the standard deviation. However, the IQR depends on fewer points\n than the standard deviation, so it is less accurate, especially for\n long tailed distributions.\n\n If the IQR is 0, this function returns 1 for the number of bins.\n Binwidth is inversely proportional to the cube root of data size\n (asymptotically optimal).\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n \"\"\"\n iqr = np.subtract(*np.percentile(x, [75, 25]))\n return 2.0 * iqr * x.size ** (-1.0 / 3.0)\n\n\ndef _hist_bin_auto(x):\n \"\"\"\n Histogram bin estimator that uses the minimum width of the\n Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero\n and the Sturges estimator if the FD bandwidth is 0.\n\n The FD estimator is usually the most robust method, but its width\n estimate tends to be too large for small `x` and bad for data with limited\n variance. The Sturges estimator is quite good for small (<1000) datasets\n and is the default in the R language. This method gives good off the shelf\n behaviour.\n\n .. versionchanged:: 1.15.0\n If there is limited variance the IQR can be 0, which results in the\n FD bin width being 0 too. This is not a valid bin width, so\n ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.\n If the IQR is 0, it's unlikely any variance based estimators will be of\n use, so we revert to the sturges estimator, which only uses the size of the\n dataset in its calculation.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n\n See Also\n --------\n _hist_bin_fd, _hist_bin_sturges\n \"\"\"\n fd_bw = _hist_bin_fd(x)\n sturges_bw = _hist_bin_sturges(x)\n if fd_bw:\n return min(fd_bw, sturges_bw)\n else:\n # limited variance, so we return a len dependent bw estimator\n return sturges_bw\n\n# Private dict initialized at module load time\n_hist_bin_selectors = {'auto': _hist_bin_auto,\n 'doane': _hist_bin_doane,\n 'fd': _hist_bin_fd,\n 'rice': _hist_bin_rice,\n 'scott': _hist_bin_scott,\n 'sqrt': _hist_bin_sqrt,\n 'sturges': _hist_bin_sturges}\n\n\ndef _ravel_and_check_weights(a, weights):\n \"\"\" Check a and weights have matching shapes, and ravel both \"\"\"\n a = np.asarray(a)\n if weights is not None:\n weights = np.asarray(weights)\n if weights.shape != a.shape:\n raise ValueError(\n 'weights should have the same shape as a.')\n weights = weights.ravel()\n a = a.ravel()\n return a, weights\n\n\ndef _get_outer_edges(a, range):\n \"\"\"\n Determine the outer bin edges to use, from either the data or the range\n argument\n \"\"\"\n if range is not None:\n first_edge, last_edge = range\n if first_edge > last_edge:\n raise ValueError(\n 'max must be larger than min in range parameter.')\n if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n raise ValueError(\n \"supplied range of [{}, {}] is not finite\".format(first_edge, last_edge))\n elif a.size == 0:\n # handle empty arrays. Can't determine range, so use 0-1.\n first_edge, last_edge = 0, 1\n else:\n first_edge, last_edge = a.min(), a.max()\n if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n raise ValueError(\n \"autodetected range of [{}, {}] is not finite\".format(first_edge, last_edge))\n\n # expand empty range to avoid divide by zero\n if first_edge == last_edge:\n first_edge = first_edge - 0.5\n last_edge = last_edge + 0.5\n\n return first_edge, last_edge\n\n\ndef _get_bin_edges(a, bins, range, weights):\n \"\"\"\n Computes the bins used internally by `histogram`.\n\n Parameters\n ==========\n a : ndarray\n Ravelled data array\n bins, range\n Forwarded arguments from `histogram`.\n weights : ndarray, optional\n Ravelled weights array, or None\n\n Returns\n =======\n bin_edges : ndarray\n Array of bin edges\n uniform_bins : (Number, Number, int):\n The upper bound, lowerbound, and number of bins, used in the optimized\n implementation of `histogram` that works on uniform bins.\n \"\"\"\n # parse the overloaded bins argument\n n_equal_bins = None\n bin_edges = None\n\n if isinstance(bins, basestring):\n bin_name = bins\n # if `bins` is a string for an automatic method,\n # this will replace it with the number of bins calculated\n if bin_name not in _hist_bin_selectors:\n raise ValueError(\n \"{!r} is not a valid estimator for `bins`\".format(bin_name))\n if weights is not None:\n raise TypeError(\"Automated estimation of the number of \"\n \"bins is not supported for weighted data\")\n\n first_edge, last_edge = _get_outer_edges(a, range)\n\n # truncate the range if needed\n if range is not None:\n keep = (a >= first_edge)\n keep &= (a <= last_edge)\n if not np.logical_and.reduce(keep):\n a = a[keep]\n\n if a.size == 0:\n n_equal_bins = 1\n else:\n # Do not call selectors on empty arrays\n width = _hist_bin_selectors[bin_name](a)\n if width:\n n_equal_bins = int(np.ceil((last_edge - first_edge) / width))\n else:\n # Width can be zero for some estimators, e.g. FD when\n # the IQR of the data is zero.\n n_equal_bins = 1\n\n elif np.ndim(bins) == 0:\n try:\n n_equal_bins = operator.index(bins)\n except TypeError:\n raise TypeError(\n '`bins` must be an integer, a string, or an array')\n if n_equal_bins < 1:\n raise ValueError('`bins` must be positive, when an integer')\n\n first_edge, last_edge = _get_outer_edges(a, range)\n\n elif np.ndim(bins) == 1:\n bin_edges = np.asarray(bins)\n if np.any(bin_edges[:-1] > bin_edges[1:]):\n raise ValueError(\n '`bins` must increase monotonically, when an array')\n\n else:\n raise ValueError('`bins` must be 1d, when an array')\n\n if n_equal_bins is not None:\n # gh-10322 means that type resolution rules are dependent on array\n # shapes. To avoid this causing problems, we pick a type now and stick\n # with it throughout.\n bin_type = np.result_type(first_edge, last_edge, a)\n if np.issubdtype(bin_type, np.integer):\n bin_type = np.result_type(bin_type, float)\n\n # bin edges must be computed\n bin_edges = np.linspace(\n first_edge, last_edge, n_equal_bins + 1,\n endpoint=True, dtype=bin_type)\n return bin_edges, (first_edge, last_edge, n_equal_bins)\n else:\n return bin_edges, None\n\n\ndef _search_sorted_inclusive(a, v):\n \"\"\"\n Like `searchsorted`, but where the last item in `v` is placed on the right.\n\n In the context of a histogram, this makes the last bin edge inclusive\n \"\"\"\n return np.concatenate((\n a.searchsorted(v[:-1], 'left'),\n a.searchsorted(v[-1:], 'right')\n ))\n\n\ndef histogram_bin_edges(a, bins=10, range=None, weights=None):\n r\"\"\"\n Function to calculate only the edges of the bins used by the `histogram` function.\n\n Parameters\n ----------\n a : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars or str, optional\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines the bin edges, including the rightmost\n edge, allowing for non-uniform bin widths.\n\n If `bins` is a string from the list below, `histogram_bin_edges` will use\n the method chosen to calculate the optimal bin width and\n consequently the number of bins (see `Notes` for more detail on\n the estimators) from the data that falls within the requested\n range. While the bin width will be optimal for the actual data\n in the range, the number of bins will be computed to fill the\n entire range, including the empty portions. For visualisation,\n using the 'auto' option is suggested. Weighted data is not\n supported for automated bin size selection.\n\n 'auto'\n Maximum of the 'sturges' and 'fd' estimators. Provides good\n all around performance.\n\n 'fd' (Freedman Diaconis Estimator)\n Robust (resilient to outliers) estimator that takes into\n account data variability and data size.\n\n 'doane'\n An improved version of Sturges' estimator that works better\n with non-normal datasets.\n\n 'scott'\n Less robust estimator that that takes into account data\n variability and data size.\n\n 'rice'\n Estimator does not take variability into account, only data\n size. Commonly overestimates number of bins required.\n\n 'sturges'\n R's default method, only accounts for data size. Only\n optimal for gaussian data and underestimates number of bins\n for large non-gaussian datasets.\n\n 'sqrt'\n Square root (of data size) estimator, used by Excel and\n other programs for its speed and simplicity.\n\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n\n weights : array_like, optional\n An array of weights, of the same shape as `a`. Each value in\n `a` only contributes its associated weight towards the bin count\n (instead of 1). This is currently not used by any of the bin estimators,\n but may be in the future.\n\n Returns\n -------\n bin_edges : array of dtype float\n The edges to pass into `histogram`\n\n See Also\n --------\n histogram\n\n Notes\n -----\n The methods to estimate the optimal number of bins are well founded\n in literature, and are inspired by the choices R provides for\n histogram visualisation. Note that having the number of bins\n proportional to :math:`n^{1/3}` is asymptotically optimal, which is\n why it appears in most estimators. These are simply plug-in methods\n that give good starting points for number of bins. In the equations\n below, :math:`h` is the binwidth and :math:`n_h` is the number of\n bins. All estimators that compute bin counts are recast to bin width\n using the `ptp` of the data. The final bin count is obtained from\n ``np.round(np.ceil(range / h))``.\n\n 'Auto' (maximum of the 'Sturges' and 'FD' estimators)\n A compromise to get a good value. For small datasets the Sturges\n value will usually be chosen, while larger datasets will usually\n default to FD. Avoids the overly conservative behaviour of FD\n and Sturges for small and large datasets respectively.\n Switchover point is usually :math:`a.size \\approx 1000`.\n\n 'FD' (Freedman Diaconis Estimator)\n .. math:: h = 2 \\frac{IQR}{n^{1/3}}\n\n The binwidth is proportional to the interquartile range (IQR)\n and inversely proportional to cube root of a.size. Can be too\n conservative for small datasets, but is quite good for large\n datasets. The IQR is very robust to outliers.\n\n 'Scott'\n .. math:: h = \\sigma \\sqrt[3]{\\frac{24 * \\sqrt{\\pi}}{n}}\n\n The binwidth is proportional to the standard deviation of the\n data and inversely proportional to cube root of ``x.size``. Can\n be too conservative for small datasets, but is quite good for\n large datasets. The standard deviation is not very robust to\n outliers. Values are very similar to the Freedman-Diaconis\n estimator in the absence of outliers.\n\n 'Rice'\n .. math:: n_h = 2n^{1/3}\n\n The number of bins is only proportional to cube root of\n ``a.size``. It tends to overestimate the number of bins and it\n does not take into account data variability.\n\n 'Sturges'\n .. math:: n_h = \\log _{2}n+1\n\n The number of bins is the base 2 log of ``a.size``. This\n estimator assumes normality of data and is too conservative for\n larger, non-normal datasets. This is the default method in R's\n ``hist`` method.\n\n 'Doane'\n .. math:: n_h = 1 + \\log_{2}(n) +\n \\log_{2}(1 + \\frac{|g_1|}{\\sigma_{g_1}})\n\n g_1 = mean[(\\frac{x - \\mu}{\\sigma})^3]\n\n \\sigma_{g_1} = \\sqrt{\\frac{6(n - 2)}{(n + 1)(n + 3)}}\n\n An improved version of Sturges' formula that produces better\n estimates for non-normal datasets. This estimator attempts to\n account for the skew of the data.\n\n 'Sqrt'\n .. math:: n_h = \\sqrt n\n The simplest and fastest estimator. Only takes into account the\n data size.\n\n Examples\n --------\n >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])\n >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))\n array([0. , 0.25, 0.5 , 0.75, 1. ])\n >>> np.histogram_bin_edges(arr, bins=2)\n array([0. , 2.5, 5. ])\n\n For consistency with histogram, an array of pre-computed bins is\n passed through unmodified:\n\n >>> np.histogram_bin_edges(arr, [1, 2])\n array([1, 2])\n\n This function allows one set of bins to be computed, and reused across\n multiple histograms:\n\n >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')\n >>> shared_bins\n array([0., 1., 2., 3., 4., 5.])\n\n >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])\n >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)\n >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)\n\n >>> hist_0; hist_1\n array([1, 1, 0, 1, 0])\n array([2, 0, 1, 1, 2])\n\n Which gives more easily comparable results than using separate bins for\n each histogram:\n\n >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')\n >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')\n >>> hist_0; hist1\n array([1, 1, 1])\n array([2, 1, 1, 2])\n >>> bins_0; bins_1\n array([0., 1., 2., 3.])\n array([0. , 1.25, 2.5 , 3.75, 5. ])\n\n \"\"\"\n a, weights = _ravel_and_check_weights(a, weights)\n bin_edges, _ = _get_bin_edges(a, bins, range, weights)\n return bin_edges\n\n\ndef histogram(a, bins=10, range=None, normed=None, weights=None,\n density=None):\n r\"\"\"\n Compute the histogram of a set of data.\n\n Parameters\n ----------\n a : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars or str, optional\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines the bin edges, including the rightmost\n edge, allowing for non-uniform bin widths.\n\n .. versionadded:: 1.11.0\n\n If `bins` is a string, it defines the method used to calculate the\n optimal bin width, as defined by `histogram_bin_edges`.\n\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n normed : bool, optional\n\n .. deprecated:: 1.6.0\n\n This is equivalent to the `density` argument, but produces incorrect\n results for unequal bin widths. It should not be used.\n\n .. versionchanged:: 1.15.0\n DeprecationWarnings are actually emitted.\n\n weights : array_like, optional\n An array of weights, of the same shape as `a`. Each value in\n `a` only contributes its associated weight towards the bin count\n (instead of 1). If `density` is True, the weights are\n normalized, so that the integral of the density over the range\n remains 1.\n density : bool, optional\n If ``False``, the result will contain the number of samples in\n each bin. If ``True``, the result is the value of the\n probability *density* function at the bin, normalized such that\n the *integral* over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability *mass* function.\n\n Overrides the ``normed`` keyword if given.\n\n Returns\n -------\n hist : array\n The values of the histogram. See `density` and `weights` for a\n description of the possible semantics.\n bin_edges : array of dtype float\n Return the bin edges ``(length(hist)+1)``.\n\n\n See Also\n --------\n histogramdd, bincount, searchsorted, digitize, histogram_bin_edges\n\n Notes\n -----\n All but the last (righthand-most) bin is half-open. In other words,\n if `bins` is::\n\n [1, 2, 3, 4]\n\n then the first bin is ``[1, 2)`` (including 1, but excluding 2) and\n the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which\n *includes* 4.\n\n\n Examples\n --------\n >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])\n (array([0, 2, 1]), array([0, 1, 2, 3]))\n >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)\n (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))\n >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])\n (array([1, 4, 1]), array([0, 1, 2, 3]))\n\n >>> a = np.arange(5)\n >>> hist, bin_edges = np.histogram(a, density=True)\n >>> hist\n array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])\n >>> hist.sum()\n 2.4999999999999996\n >>> np.sum(hist * np.diff(bin_edges))\n 1.0\n\n .. versionadded:: 1.11.0\n\n Automated Bin Selection Methods example, using 2 peak random data\n with 2000 points:\n\n >>> import matplotlib.pyplot as plt\n >>> rng = np.random.RandomState(10) # deterministic random data\n >>> a = np.hstack((rng.normal(size=1000),\n ... rng.normal(loc=5, scale=2, size=1000)))\n >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram\n >>> plt.title(\"Histogram with 'auto' bins\")\n >>> plt.show()\n\n \"\"\"\n a, weights = _ravel_and_check_weights(a, weights)\n\n bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)\n\n # Histogram is an integer or a float array depending on the weights.\n if weights is None:\n ntype = np.dtype(np.intp)\n else:\n ntype = weights.dtype\n\n # We set a block size, as this allows us to iterate over chunks when\n # computing histograms, to minimize memory usage.\n BLOCK = 65536\n\n # The fast path uses bincount, but that only works for certain types\n # of weight\n simple_weights = (\n weights is None or\n np.can_cast(weights.dtype, np.double) or\n np.can_cast(weights.dtype, complex)\n )\n\n if uniform_bins is not None and simple_weights:\n # Fast algorithm for equal bins\n # We now convert values of a to bin indices, under the assumption of\n # equal bin widths (which is valid here).\n first_edge, last_edge, n_equal_bins = uniform_bins\n\n # Initialize empty histogram\n n = np.zeros(n_equal_bins, ntype)\n\n # Pre-compute histogram scaling factor\n norm = n_equal_bins / (last_edge - first_edge)\n\n # We iterate over blocks here for two reasons: the first is that for\n # large arrays, it is actually faster (for example for a 10^8 array it\n # is 2x as fast) and it results in a memory footprint 3x lower in the\n # limit of large arrays.\n for i in _range(0, len(a), BLOCK):\n tmp_a = a[i:i+BLOCK]\n if weights is None:\n tmp_w = None\n else:\n tmp_w = weights[i:i + BLOCK]\n\n # Only include values in the right range\n keep = (tmp_a >= first_edge)\n keep &= (tmp_a <= last_edge)\n if not np.logical_and.reduce(keep):\n tmp_a = tmp_a[keep]\n if tmp_w is not None:\n tmp_w = tmp_w[keep]\n\n # This cast ensures no type promotions occur below, which gh-10322\n # make unpredictable. Getting it wrong leads to precision errors\n # like gh-8123.\n tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)\n\n # Compute the bin indices, and for values that lie exactly on\n # last_edge we need to subtract one\n f_indices = (tmp_a - first_edge) * norm\n indices = f_indices.astype(np.intp)\n indices[indices == n_equal_bins] -= 1\n\n # The index computation is not guaranteed to give exactly\n # consistent results within ~1 ULP of the bin edges.\n decrement = tmp_a < bin_edges[indices]\n indices[decrement] -= 1\n # The last bin includes the right edge. The other bins do not.\n increment = ((tmp_a >= bin_edges[indices + 1])\n & (indices != n_equal_bins - 1))\n indices[increment] += 1\n\n # We now compute the histogram using bincount\n if ntype.kind == 'c':\n n.real += np.bincount(indices, weights=tmp_w.real,\n minlength=n_equal_bins)\n n.imag += np.bincount(indices, weights=tmp_w.imag,\n minlength=n_equal_bins)\n else:\n n += np.bincount(indices, weights=tmp_w,\n minlength=n_equal_bins).astype(ntype)\n else:\n # Compute via cumulative histogram\n cum_n = np.zeros(bin_edges.shape, ntype)\n if weights is None:\n for i in _range(0, len(a), BLOCK):\n sa = np.sort(a[i:i+BLOCK])\n cum_n += _search_sorted_inclusive(sa, bin_edges)\n else:\n zero = np.zeros(1, dtype=ntype)\n for i in _range(0, len(a), BLOCK):\n tmp_a = a[i:i+BLOCK]\n tmp_w = weights[i:i+BLOCK]\n sorting_index = np.argsort(tmp_a)\n sa = tmp_a[sorting_index]\n sw = tmp_w[sorting_index]\n cw = np.concatenate((zero, sw.cumsum()))\n bin_index = _search_sorted_inclusive(sa, bin_edges)\n cum_n += cw[bin_index]\n\n n = np.diff(cum_n)\n\n # density overrides the normed keyword\n if density is not None:\n if normed is not None:\n # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)\n warnings.warn(\n \"The normed argument is ignored when density is provided. \"\n \"In future passing both will result in an error.\",\n DeprecationWarning, stacklevel=2)\n normed = None\n\n if density:\n db = np.array(np.diff(bin_edges), float)\n return n/db/n.sum(), bin_edges\n elif normed:\n # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)\n warnings.warn(\n \"Passing `normed=True` on non-uniform bins has always been \"\n \"broken, and computes neither the probability density \"\n \"function nor the probability mass function. \"\n \"The result is only correct if the bins are uniform, when \"\n \"density=True will produce the same result anyway. \"\n \"The argument will be removed in a future version of \"\n \"numpy.\",\n np.VisibleDeprecationWarning, stacklevel=2)\n\n # this normalization is incorrect, but\n db = np.array(np.diff(bin_edges), float)\n return n/(n*db).sum(), bin_edges\n else:\n if normed is not None:\n # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)\n warnings.warn(\n \"Passing normed=False is deprecated, and has no effect. \"\n \"Consider passing the density argument instead.\",\n DeprecationWarning, stacklevel=2)\n return n, bin_edges\n\n\ndef histogramdd(sample, bins=10, range=None, normed=None, weights=None,\n density=None):\n \"\"\"\n Compute the multidimensional histogram of some data.\n\n Parameters\n ----------\n sample : (N, D) array, or (D, N) array_like\n The data to be histogrammed.\n\n Note the unusual interpretation of sample when an array_like:\n\n * When an array, each row is a coordinate in a D-dimensional space -\n such as ``histogramgramdd(np.array([p1, p2, p3]))``.\n * When an array_like, each element is the list of values for single\n coordinate - such as ``histogramgramdd((X, Y, Z))``.\n\n The first form should be preferred.\n\n bins : sequence or int, optional\n The bin specification:\n\n * A sequence of arrays describing the bin edges along each dimension.\n * The number of bins for each dimension (nx, ny, ... =bins)\n * The number of bins for all dimensions (nx=ny=...=bins).\n\n range : sequence, optional\n A sequence of length D, each an optional (lower, upper) tuple giving\n the outer bin edges to be used if the edges are not given explicitly in\n `bins`.\n An entry of None in the sequence results in the minimum and maximum\n values being used for the corresponding dimension.\n The default, None, is equivalent to passing a tuple of D None values.\n density : bool, optional\n If False, the default, returns the number of samples in each bin.\n If True, returns the probability *density* function at the bin,\n ``bin_count / sample_count / bin_volume``.\n normed : bool, optional\n An alias for the density argument that behaves identically. To avoid\n confusion with the broken normed argument to `histogram`, `density`\n should be preferred.\n weights : (N,) array_like, optional\n An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.\n Weights are normalized to 1 if normed is True. If normed is False,\n the values of the returned histogram are equal to the sum of the\n weights belonging to the samples falling into each bin.\n\n Returns\n -------\n H : ndarray\n The multidimensional histogram of sample x. See normed and weights\n for the different possible semantics.\n edges : list\n A list of D arrays describing the bin edges for each dimension.\n\n See Also\n --------\n histogram: 1-D histogram\n histogram2d: 2-D histogram\n\n Examples\n --------\n >>> r = np.random.randn(100,3)\n >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))\n >>> H.shape, edges[0].size, edges[1].size, edges[2].size\n ((5, 8, 4), 6, 9, 5)\n\n \"\"\"\n\n try:\n # Sample is an ND-array.\n N, D = sample.shape\n except (AttributeError, ValueError):\n # Sample is a sequence of 1D arrays.\n sample = np.atleast_2d(sample).T\n N, D = sample.shape\n\n nbin = np.empty(D, int)\n edges = D*[None]\n dedges = D*[None]\n if weights is not None:\n weights = np.asarray(weights)\n\n try:\n M = len(bins)\n if M != D:\n raise ValueError(\n 'The dimension of bins must be equal to the dimension of the '\n ' sample x.')\n except TypeError:\n # bins is an integer\n bins = D*[bins]\n\n # normalize the range argument\n if range is None:\n range = (None,) * D\n elif len(range) != D:\n raise ValueError('range argument must have one entry per dimension')\n\n # Create edge arrays\n for i in _range(D):\n if np.ndim(bins[i]) == 0:\n if bins[i] < 1:\n raise ValueError(\n '`bins[{}]` must be positive, when an integer'.format(i))\n smin, smax = _get_outer_edges(sample[:,i], range[i])\n edges[i] = np.linspace(smin, smax, bins[i] + 1)\n elif np.ndim(bins[i]) == 1:\n edges[i] = np.asarray(bins[i])\n if np.any(edges[i][:-1] > edges[i][1:]):\n raise ValueError(\n '`bins[{}]` must be monotonically increasing, when an array'\n .format(i))\n else:\n raise ValueError(\n '`bins[{}]` must be a scalar or 1d array'.format(i))\n\n nbin[i] = len(edges[i]) + 1 # includes an outlier on each end\n dedges[i] = np.diff(edges[i])\n\n # Compute the bin number each sample falls into.\n Ncount = tuple(\n # avoid np.digitize to work around gh-11022\n np.searchsorted(edges[i], sample[:, i], side='right')\n for i in _range(D)\n )\n\n # Using digitize, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right edge to be\n # counted in the last bin, and not as an outlier.\n for i in _range(D):\n # Find which points are on the rightmost edge.\n on_edge = (sample[:, i] == edges[i][-1])\n # Shift these points one bin to the left.\n Ncount[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened histogram matrix.\n # This raises an error if the array is too large.\n xy = np.ravel_multi_index(Ncount, nbin)\n\n # Compute the number of repetitions in xy and assign it to the\n # flattened histmat.\n hist = np.bincount(xy, weights, minlength=nbin.prod())\n\n # Shape into a proper matrix\n hist = hist.reshape(nbin)\n\n # This preserves the (bad) behavior observed in gh-7845, for now.\n hist = hist.astype(float, casting='safe')\n\n # Remove outliers (indices 0 and -1 for each dimension).\n core = D*(slice(1, -1),)\n hist = hist[core]\n\n # handle the aliasing normed argument\n if normed is None:\n if density is None:\n density = False\n elif density is None:\n # an explicit normed argument was passed, alias it to the new name\n density = normed\n else:\n raise TypeError(\"Cannot specify both 'normed' and 'density'\")\n\n if density:\n # calculate the probability density function\n s = hist.sum()\n for i in _range(D):\n shape = np.ones(D, int)\n shape[i] = nbin[i] - 2\n hist = hist / dedges[i].reshape(shape)\n hist /= s\n\n if (hist.shape != nbin - 2).any():\n raise RuntimeError(\n \"Internal Shape Error\")\n return hist, edges\n", "# pylint: disable=E1103\n\nimport random\nimport re\nfrom collections import OrderedDict\nfrom datetime import date, datetime\n\nimport numpy as np\nimport pytest\nfrom numpy import nan\nfrom numpy.random import randn\n\nimport pandas as pd\nimport pandas.util.testing as tm\nfrom pandas import (Categorical, CategoricalIndex, DataFrame, DatetimeIndex,\n Float64Index, Index, Int64Index, MultiIndex, RangeIndex,\n Series, UInt64Index)\nfrom pandas.api.types import CategoricalDtype as CDT\nfrom pandas.compat import lrange, lzip\nfrom pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas.core.reshape.concat import concat\nfrom pandas.core.reshape.merge import MergeError, merge\nfrom pandas.util.testing import assert_frame_equal, assert_series_equal\n\nN = 50\nNGROUPS = 8\n\n\ndef get_test_data(ngroups=NGROUPS, n=N):\n unique_groups = lrange(ngroups)\n arr = np.asarray(np.tile(unique_groups, n // ngroups))\n\n if len(arr) < n:\n arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])\n\n random.shuffle(arr)\n return arr\n\n\nclass TestMerge(object):\n\n def setup_method(self, method):\n # aggregate multiple columns\n self.df = DataFrame({'key1': get_test_data(),\n 'key2': get_test_data(),\n 'data1': np.random.randn(N),\n 'data2': np.random.randn(N)})\n\n # exclude a couple keys for fun\n self.df = self.df[self.df['key2'] > 1]\n\n self.df2 = DataFrame({'key1': get_test_data(n=N // 5),\n 'key2': get_test_data(ngroups=NGROUPS // 2,\n n=N // 5),\n 'value': np.random.randn(N // 5)})\n\n self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],\n 'v1': np.random.randn(7)})\n self.right = DataFrame({'v2': np.random.randn(4)},\n index=['d', 'b', 'c', 'a'])\n\n def test_merge_inner_join_empty(self):\n # GH 15328\n df_empty = pd.DataFrame()\n df_a = pd.DataFrame({'a': [1, 2]}, index=[0, 1], dtype='int64')\n result = pd.merge(df_empty, df_a, left_index=True, right_index=True)\n expected = pd.DataFrame({'a': []}, index=[], dtype='int64')\n assert_frame_equal(result, expected)\n\n def test_merge_common(self):\n joined = merge(self.df, self.df2)\n exp = merge(self.df, self.df2, on=['key1', 'key2'])\n tm.assert_frame_equal(joined, exp)\n\n def test_merge_index_as_on_arg(self):\n # GH14355\n\n left = self.df.set_index('key1')\n right = self.df2.set_index('key1')\n result = merge(left, right, on='key1')\n expected = merge(self.df, self.df2, on='key1').set_index('key1')\n assert_frame_equal(result, expected)\n\n def test_merge_index_singlekey_right_vs_left(self):\n left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],\n 'v1': np.random.randn(7)})\n right = DataFrame({'v2': np.random.randn(4)},\n index=['d', 'b', 'c', 'a'])\n\n merged1 = merge(left, right, left_on='key',\n right_index=True, how='left', sort=False)\n merged2 = merge(right, left, right_on='key',\n left_index=True, how='right', sort=False)\n assert_frame_equal(merged1, merged2.loc[:, merged1.columns])\n\n merged1 = merge(left, right, left_on='key',\n right_index=True, how='left', sort=True)\n merged2 = merge(right, left, right_on='key',\n left_index=True, how='right', sort=True)\n assert_frame_equal(merged1, merged2.loc[:, merged1.columns])\n\n def test_merge_index_singlekey_inner(self):\n left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],\n 'v1': np.random.randn(7)})\n right = DataFrame({'v2': np.random.randn(4)},\n index=['d', 'b', 'c', 'a'])\n\n # inner join\n result = merge(left, right, left_on='key', right_index=True,\n how='inner')\n expected = left.join(right, on='key').loc[result.index]\n assert_frame_equal(result, expected)\n\n result = merge(right, left, right_on='key', left_index=True,\n how='inner')\n expected = left.join(right, on='key').loc[result.index]\n assert_frame_equal(result, expected.loc[:, result.columns])\n\n def test_merge_misspecified(self):\n pytest.raises(ValueError, merge, self.left, self.right,\n left_index=True)\n pytest.raises(ValueError, merge, self.left, self.right,\n right_index=True)\n\n pytest.raises(ValueError, merge, self.left, self.left,\n left_on='key', on='key')\n\n pytest.raises(ValueError, merge, self.df, self.df2,\n left_on=['key1'], right_on=['key1', 'key2'])\n\n def test_index_and_on_parameters_confusion(self):\n pytest.raises(ValueError, merge, self.df, self.df2, how='left',\n left_index=False, right_index=['key1', 'key2'])\n pytest.raises(ValueError, merge, self.df, self.df2, how='left',\n left_index=['key1', 'key2'], right_index=False)\n pytest.raises(ValueError, merge, self.df, self.df2, how='left',\n left_index=['key1', 'key2'],\n right_index=['key1', 'key2'])\n\n def test_merge_overlap(self):\n merged = merge(self.left, self.left, on='key')\n exp_len = (self.left['key'].value_counts() ** 2).sum()\n assert len(merged) == exp_len\n assert 'v1_x' in merged\n assert 'v1_y' in merged\n\n def test_merge_different_column_key_names(self):\n left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n 'value': [1, 2, 3, 4]})\n right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],\n 'value': [5, 6, 7, 8]})\n\n merged = left.merge(right, left_on='lkey', right_on='rkey',\n how='outer', sort=True)\n\n exp = pd.Series(['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan],\n name='lkey')\n tm.assert_series_equal(merged['lkey'], exp)\n\n exp = pd.Series(['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'],\n name='rkey')\n tm.assert_series_equal(merged['rkey'], exp)\n\n exp = pd.Series([2, 3, 1, 1, 4, 4, np.nan], name='value_x')\n tm.assert_series_equal(merged['value_x'], exp)\n\n exp = pd.Series([6, np.nan, 5, 8, 5, 8, 7], name='value_y')\n tm.assert_series_equal(merged['value_y'], exp)\n\n def test_merge_copy(self):\n left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))\n right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))\n\n merged = merge(left, right, left_index=True,\n right_index=True, copy=True)\n\n merged['a'] = 6\n assert (left['a'] == 0).all()\n\n merged['d'] = 'peekaboo'\n assert (right['d'] == 'bar').all()\n\n def test_merge_nocopy(self):\n left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))\n right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))\n\n merged = merge(left, right, left_index=True,\n right_index=True, copy=False)\n\n merged['a'] = 6\n assert (left['a'] == 6).all()\n\n merged['d'] = 'peekaboo'\n assert (right['d'] == 'peekaboo').all()\n\n def test_intelligently_handle_join_key(self):\n # #733, be a bit more 1337 about not returning unconsolidated DataFrame\n\n left = DataFrame({'key': [1, 1, 2, 2, 3],\n 'value': lrange(5)}, columns=['value', 'key'])\n right = DataFrame({'key': [1, 1, 2, 3, 4, 5],\n 'rvalue': lrange(6)})\n\n joined = merge(left, right, on='key', how='outer')\n expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5],\n 'value': np.array([0, 0, 1, 1, 2, 3, 4,\n np.nan, np.nan]),\n 'rvalue': [0, 1, 0, 1, 2, 2, 3, 4, 5]},\n columns=['value', 'key', 'rvalue'])\n assert_frame_equal(joined, expected)\n\n def test_merge_join_key_dtype_cast(self):\n # #8596\n\n df1 = DataFrame({'key': [1], 'v1': [10]})\n df2 = DataFrame({'key': [2], 'v1': [20]})\n df = merge(df1, df2, how='outer')\n assert df['key'].dtype == 'int64'\n\n df1 = DataFrame({'key': [True], 'v1': [1]})\n df2 = DataFrame({'key': [False], 'v1': [0]})\n df = merge(df1, df2, how='outer')\n\n # GH13169\n # this really should be bool\n assert df['key'].dtype == 'object'\n\n df1 = DataFrame({'val': [1]})\n df2 = DataFrame({'val': [2]})\n lkey = np.array([1])\n rkey = np.array([2])\n df = merge(df1, df2, left_on=lkey, right_on=rkey, how='outer')\n assert df['key_0'].dtype == 'int64'\n\n def test_handle_join_key_pass_array(self):\n left = DataFrame({'key': [1, 1, 2, 2, 3],\n 'value': lrange(5)}, columns=['value', 'key'])\n right = DataFrame({'rvalue': lrange(6)})\n key = np.array([1, 1, 2, 3, 4, 5])\n\n merged = merge(left, right, left_on='key', right_on=key, how='outer')\n merged2 = merge(right, left, left_on=key, right_on='key', how='outer')\n\n assert_series_equal(merged['key'], merged2['key'])\n assert merged['key'].notna().all()\n assert merged2['key'].notna().all()\n\n left = DataFrame({'value': lrange(5)}, columns=['value'])\n right = DataFrame({'rvalue': lrange(6)})\n lkey = np.array([1, 1, 2, 2, 3])\n rkey = np.array([1, 1, 2, 3, 4, 5])\n\n merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')\n tm.assert_series_equal(merged['key_0'], Series([1, 1, 1, 1, 2,\n 2, 3, 4, 5],\n name='key_0'))\n\n left = DataFrame({'value': lrange(3)})\n right = DataFrame({'rvalue': lrange(6)})\n\n key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64)\n merged = merge(left, right, left_index=True, right_on=key, how='outer')\n tm.assert_series_equal(merged['key_0'], Series(key, name='key_0'))\n\n def test_no_overlap_more_informative_error(self):\n dt = datetime.now()\n df1 = DataFrame({'x': ['a']}, index=[dt])\n\n df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])\n pytest.raises(MergeError, merge, df1, df2)\n\n msg = ('No common columns to perform merge on. '\n 'Merge options: left_on={lon}, right_on={ron}, '\n 'left_index={lidx}, right_index={ridx}'\n .format(lon=None, ron=None, lidx=False, ridx=False))\n\n with tm.assert_raises_regex(MergeError, msg):\n merge(df1, df2)\n\n def test_merge_non_unique_indexes(self):\n\n dt = datetime(2012, 5, 1)\n dt2 = datetime(2012, 5, 2)\n dt3 = datetime(2012, 5, 3)\n dt4 = datetime(2012, 5, 4)\n\n df1 = DataFrame({'x': ['a']}, index=[dt])\n df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])\n _check_merge(df1, df2)\n\n # Not monotonic\n df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])\n df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},\n index=[dt3, dt3, dt2, dt2, dt, dt])\n _check_merge(df1, df2)\n\n df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])\n df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])\n _check_merge(df1, df2)\n\n def test_merge_non_unique_index_many_to_many(self):\n dt = datetime(2012, 5, 1)\n dt2 = datetime(2012, 5, 2)\n dt3 = datetime(2012, 5, 3)\n df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},\n index=[dt2, dt2, dt, dt])\n df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},\n index=[dt2, dt2, dt3, dt, dt])\n _check_merge(df1, df2)\n\n def test_left_merge_empty_dataframe(self):\n left = DataFrame({'key': [1], 'value': [2]})\n right = DataFrame({'key': []})\n\n result = merge(left, right, on='key', how='left')\n assert_frame_equal(result, left)\n\n result = merge(right, left, on='key', how='right')\n assert_frame_equal(result, left)\n\n @pytest.mark.parametrize('kwarg',\n [dict(left_index=True, right_index=True),\n dict(left_index=True, right_on='x'),\n dict(left_on='a', right_index=True),\n dict(left_on='a', right_on='x')])\n def test_merge_left_empty_right_empty(self, join_type, kwarg):\n # GH 10824\n left = pd.DataFrame([], columns=['a', 'b', 'c'])\n right = pd.DataFrame([], columns=['x', 'y', 'z'])\n\n exp_in = pd.DataFrame([], columns=['a', 'b', 'c', 'x', 'y', 'z'],\n index=pd.Index([], dtype=object),\n dtype=object)\n\n result = pd.merge(left, right, how=join_type, **kwarg)\n tm.assert_frame_equal(result, exp_in)\n\n def test_merge_left_empty_right_notempty(self):\n # GH 10824\n left = pd.DataFrame([], columns=['a', 'b', 'c'])\n right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=['x', 'y', 'z'])\n\n exp_out = pd.DataFrame({'a': np.array([np.nan] * 3, dtype=object),\n 'b': np.array([np.nan] * 3, dtype=object),\n 'c': np.array([np.nan] * 3, dtype=object),\n 'x': [1, 4, 7],\n 'y': [2, 5, 8],\n 'z': [3, 6, 9]},\n columns=['a', 'b', 'c', 'x', 'y', 'z'])\n exp_in = exp_out[0:0] # make empty DataFrame keeping dtype\n # result will have object dtype\n exp_in.index = exp_in.index.astype(object)\n\n def check1(exp, kwarg):\n result = pd.merge(left, right, how='inner', **kwarg)\n tm.assert_frame_equal(result, exp)\n result = pd.merge(left, right, how='left', **kwarg)\n tm.assert_frame_equal(result, exp)\n\n def check2(exp, kwarg):\n result = pd.merge(left, right, how='right', **kwarg)\n tm.assert_frame_equal(result, exp)\n result = pd.merge(left, right, how='outer', **kwarg)\n tm.assert_frame_equal(result, exp)\n\n for kwarg in [dict(left_index=True, right_index=True),\n dict(left_index=True, right_on='x')]:\n check1(exp_in, kwarg)\n check2(exp_out, kwarg)\n\n kwarg = dict(left_on='a', right_index=True)\n check1(exp_in, kwarg)\n exp_out['a'] = [0, 1, 2]\n check2(exp_out, kwarg)\n\n kwarg = dict(left_on='a', right_on='x')\n check1(exp_in, kwarg)\n exp_out['a'] = np.array([np.nan] * 3, dtype=object)\n check2(exp_out, kwarg)\n\n def test_merge_left_notempty_right_empty(self):\n # GH 10824\n left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=['a', 'b', 'c'])\n right = pd.DataFrame([], columns=['x', 'y', 'z'])\n\n exp_out = pd.DataFrame({'a': [1, 4, 7],\n 'b': [2, 5, 8],\n 'c': [3, 6, 9],\n 'x': np.array([np.nan] * 3, dtype=object),\n 'y': np.array([np.nan] * 3, dtype=object),\n 'z': np.array([np.nan] * 3, dtype=object)},\n columns=['a', 'b', 'c', 'x', 'y', 'z'])\n exp_in = exp_out[0:0] # make empty DataFrame keeping dtype\n # result will have object dtype\n exp_in.index = exp_in.index.astype(object)\n\n def check1(exp, kwarg):\n result = pd.merge(left, right, how='inner', **kwarg)\n tm.assert_frame_equal(result, exp)\n result = pd.merge(left, right, how='right', **kwarg)\n tm.assert_frame_equal(result, exp)\n\n def check2(exp, kwarg):\n result = pd.merge(left, right, how='left', **kwarg)\n tm.assert_frame_equal(result, exp)\n result = pd.merge(left, right, how='outer', **kwarg)\n tm.assert_frame_equal(result, exp)\n\n for kwarg in [dict(left_index=True, right_index=True),\n dict(left_index=True, right_on='x'),\n dict(left_on='a', right_index=True),\n dict(left_on='a', right_on='x')]:\n check1(exp_in, kwarg)\n check2(exp_out, kwarg)\n\n def test_merge_nosort(self):\n # #2098, anything to do?\n\n from datetime import datetime\n\n d = {\"var1\": np.random.randint(0, 10, size=10),\n \"var2\": np.random.randint(0, 10, size=10),\n \"var3\": [datetime(2012, 1, 12),\n datetime(2011, 2, 4),\n datetime(2010, 2, 3),\n datetime(2012, 1, 12),\n datetime(2011, 2, 4),\n datetime(2012, 4, 3),\n datetime(2012, 3, 4),\n datetime(2008, 5, 1),\n datetime(2010, 2, 3),\n datetime(2012, 2, 3)]}\n df = DataFrame.from_dict(d)\n var3 = df.var3.unique()\n var3.sort()\n new = DataFrame.from_dict({\"var3\": var3,\n \"var8\": np.random.random(7)})\n\n result = df.merge(new, on=\"var3\", sort=False)\n exp = merge(df, new, on='var3', sort=False)\n assert_frame_equal(result, exp)\n\n assert (df.var3.unique() == result.var3.unique()).all()\n\n def test_merge_nan_right(self):\n df1 = DataFrame({\"i1\": [0, 1], \"i2\": [0, 1]})\n df2 = DataFrame({\"i1\": [0], \"i3\": [0]})\n result = df1.join(df2, on=\"i1\", rsuffix=\"_\")\n expected = (DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},\n 'i1_': {0: 0, 1: np.nan},\n 'i3': {0: 0.0, 1: np.nan},\n None: {0: 0, 1: 0}})\n .set_index(None)\n .reset_index()[['i1', 'i2', 'i1_', 'i3']])\n assert_frame_equal(result, expected, check_dtype=False)\n\n df1 = DataFrame({\"i1\": [0, 1], \"i2\": [0.5, 1.5]})\n df2 = DataFrame({\"i1\": [0], \"i3\": [0.7]})\n result = df1.join(df2, rsuffix=\"_\", on='i1')\n expected = (DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},\n 'i2': {0: 0.5, 1: 1.5},\n 'i3': {0: 0.69999999999999996,\n 1: nan}})\n [['i1', 'i2', 'i1_', 'i3']])\n assert_frame_equal(result, expected)\n\n def test_merge_type(self):\n class NotADataFrame(DataFrame):\n\n @property\n def _constructor(self):\n return NotADataFrame\n\n nad = NotADataFrame(self.df)\n result = nad.merge(self.df2, on='key1')\n\n assert isinstance(result, NotADataFrame)\n\n def test_join_append_timedeltas(self):\n\n import datetime as dt\n from pandas import NaT\n\n # timedelta64 issues with join/merge\n # GH 5695\n\n d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}\n df = DataFrame(columns=list('dt'))\n df = df.append(d, ignore_index=True)\n result = df.append(d, ignore_index=True)\n expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),\n dt.datetime(2013, 11, 5, 5, 56)],\n 't': [dt.timedelta(0, 22500),\n dt.timedelta(0, 22500)]})\n assert_frame_equal(result, expected)\n\n td = np.timedelta64(300000000)\n lhs = DataFrame(Series([td, td], index=[\"A\", \"B\"]))\n rhs = DataFrame(Series([td], index=[\"A\"]))\n\n result = lhs.join(rhs, rsuffix='r', how=\"left\")\n expected = DataFrame({'0': Series([td, td], index=list('AB')),\n '0r': Series([td, NaT], index=list('AB'))})\n assert_frame_equal(result, expected)\n\n def test_other_datetime_unit(self):\n # GH 13389\n df1 = pd.DataFrame({'entity_id': [101, 102]})\n s = pd.Series([None, None], index=[101, 102], name='days')\n\n for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',\n 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',\n 'datetime64[ns]']:\n\n df2 = s.astype(dtype).to_frame('days')\n # coerces to datetime64[ns], thus sholuld not be affected\n assert df2['days'].dtype == 'datetime64[ns]'\n\n result = df1.merge(df2, left_on='entity_id', right_index=True)\n\n exp = pd.DataFrame({'entity_id': [101, 102],\n 'days': np.array(['nat', 'nat'],\n dtype='datetime64[ns]')},\n columns=['entity_id', 'days'])\n tm.assert_frame_equal(result, exp)\n\n @pytest.mark.parametrize(\"unit\", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])\n def test_other_timedelta_unit(self, unit):\n # GH 13389\n df1 = pd.DataFrame({'entity_id': [101, 102]})\n s = pd.Series([None, None], index=[101, 102], name='days')\n\n dtype = \"m8[{}]\".format(unit)\n df2 = s.astype(dtype).to_frame('days')\n assert df2['days'].dtype == 'm8[ns]'\n\n result = df1.merge(df2, left_on='entity_id', right_index=True)\n\n exp = pd.DataFrame({'entity_id': [101, 102],\n 'days': np.array(['nat', 'nat'],\n dtype=dtype)},\n columns=['entity_id', 'days'])\n tm.assert_frame_equal(result, exp)\n\n def test_overlapping_columns_error_message(self):\n df = DataFrame({'key': [1, 2, 3],\n 'v1': [4, 5, 6],\n 'v2': [7, 8, 9]})\n df2 = DataFrame({'key': [1, 2, 3],\n 'v1': [4, 5, 6],\n 'v2': [7, 8, 9]})\n\n df.columns = ['key', 'foo', 'foo']\n df2.columns = ['key', 'bar', 'bar']\n expected = DataFrame({'key': [1, 2, 3],\n 'v1': [4, 5, 6],\n 'v2': [7, 8, 9],\n 'v3': [4, 5, 6],\n 'v4': [7, 8, 9]})\n expected.columns = ['key', 'foo', 'foo', 'bar', 'bar']\n assert_frame_equal(merge(df, df2), expected)\n\n # #2649, #10639\n df2.columns = ['key1', 'foo', 'foo']\n pytest.raises(ValueError, merge, df, df2)\n\n def test_merge_on_datetime64tz(self):\n\n # GH11405\n left = pd.DataFrame({'key': pd.date_range('20151010', periods=2,\n tz='US/Eastern'),\n 'value': [1, 2]})\n right = pd.DataFrame({'key': pd.date_range('20151011', periods=3,\n tz='US/Eastern'),\n 'value': [1, 2, 3]})\n\n expected = DataFrame({'key': pd.date_range('20151010', periods=4,\n tz='US/Eastern'),\n 'value_x': [1, 2, np.nan, np.nan],\n 'value_y': [np.nan, 1, 2, 3]})\n result = pd.merge(left, right, on='key', how='outer')\n assert_frame_equal(result, expected)\n\n left = pd.DataFrame({'key': [1, 2],\n 'value': pd.date_range('20151010', periods=2,\n tz='US/Eastern')})\n right = pd.DataFrame({'key': [2, 3],\n 'value': pd.date_range('20151011', periods=2,\n tz='US/Eastern')})\n expected = DataFrame({\n 'key': [1, 2, 3],\n 'value_x': list(pd.date_range('20151010', periods=2,\n tz='US/Eastern')) + [pd.NaT],\n 'value_y': [pd.NaT] + list(pd.date_range('20151011', periods=2,\n tz='US/Eastern'))})\n result = pd.merge(left, right, on='key', how='outer')\n assert_frame_equal(result, expected)\n assert result['value_x'].dtype == 'datetime64[ns, US/Eastern]'\n assert result['value_y'].dtype == 'datetime64[ns, US/Eastern]'\n\n def test_merge_non_unique_period_index(self):\n # GH #16871\n index = pd.period_range('2016-01-01', periods=16, freq='M')\n df = DataFrame([i for i in range(len(index))],\n index=index, columns=['pnum'])\n df2 = concat([df, df])\n result = df.merge(df2, left_index=True, right_index=True, how='inner')\n expected = DataFrame(\n np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),\n columns=['pnum_x', 'pnum_y'], index=df2.sort_index().index)\n tm.assert_frame_equal(result, expected)\n\n def test_merge_on_periods(self):\n left = pd.DataFrame({'key': pd.period_range('20151010', periods=2,\n freq='D'),\n 'value': [1, 2]})\n right = pd.DataFrame({'key': pd.period_range('20151011', periods=3,\n freq='D'),\n 'value': [1, 2, 3]})\n\n expected = DataFrame({'key': pd.period_range('20151010', periods=4,\n freq='D'),\n 'value_x': [1, 2, np.nan, np.nan],\n 'value_y': [np.nan, 1, 2, 3]})\n result = pd.merge(left, right, on='key', how='outer')\n assert_frame_equal(result, expected)\n\n left = pd.DataFrame({'key': [1, 2],\n 'value': pd.period_range('20151010', periods=2,\n freq='D')})\n right = pd.DataFrame({'key': [2, 3],\n 'value': pd.period_range('20151011', periods=2,\n freq='D')})\n\n exp_x = pd.period_range('20151010', periods=2, freq='D')\n exp_y = pd.period_range('20151011', periods=2, freq='D')\n expected = DataFrame({'key': [1, 2, 3],\n 'value_x': list(exp_x) + [pd.NaT],\n 'value_y': [pd.NaT] + list(exp_y)})\n result = pd.merge(left, right, on='key', how='outer')\n assert_frame_equal(result, expected)\n assert result['value_x'].dtype == 'object'\n assert result['value_y'].dtype == 'object'\n\n def test_indicator(self):\n # PR #10054. xref #7412 and closes #8790.\n df1 = DataFrame({'col1': [0, 1], 'col_conflict': [1, 2],\n 'col_left': ['a', 'b']})\n df1_copy = df1.copy()\n\n df2 = DataFrame({'col1': [1, 2, 3, 4, 5],\n 'col_conflict': [1, 2, 3, 4, 5],\n 'col_right': [2, 2, 2, 2, 2]})\n df2_copy = df2.copy()\n\n df_result = DataFrame({\n 'col1': [0, 1, 2, 3, 4, 5],\n 'col_conflict_x': [1, 2, np.nan, np.nan, np.nan, np.nan],\n 'col_left': ['a', 'b', np.nan, np.nan, np.nan, np.nan],\n 'col_conflict_y': [np.nan, 1, 2, 3, 4, 5],\n 'col_right': [np.nan, 2, 2, 2, 2, 2]})\n df_result['_merge'] = Categorical(\n ['left_only', 'both', 'right_only',\n 'right_only', 'right_only', 'right_only'],\n categories=['left_only', 'right_only', 'both'])\n\n df_result = df_result[['col1', 'col_conflict_x', 'col_left',\n 'col_conflict_y', 'col_right', '_merge']]\n\n test = merge(df1, df2, on='col1', how='outer', indicator=True)\n assert_frame_equal(test, df_result)\n test = df1.merge(df2, on='col1', how='outer', indicator=True)\n assert_frame_equal(test, df_result)\n\n # No side effects\n assert_frame_equal(df1, df1_copy)\n assert_frame_equal(df2, df2_copy)\n\n # Check with custom name\n df_result_custom_name = df_result\n df_result_custom_name = df_result_custom_name.rename(\n columns={'_merge': 'custom_name'})\n\n test_custom_name = merge(\n df1, df2, on='col1', how='outer', indicator='custom_name')\n assert_frame_equal(test_custom_name, df_result_custom_name)\n test_custom_name = df1.merge(\n df2, on='col1', how='outer', indicator='custom_name')\n assert_frame_equal(test_custom_name, df_result_custom_name)\n\n # Check only accepts strings and booleans\n with pytest.raises(ValueError):\n merge(df1, df2, on='col1', how='outer', indicator=5)\n with pytest.raises(ValueError):\n df1.merge(df2, on='col1', how='outer', indicator=5)\n\n # Check result integrity\n\n test2 = merge(df1, df2, on='col1', how='left', indicator=True)\n assert (test2._merge != 'right_only').all()\n test2 = df1.merge(df2, on='col1', how='left', indicator=True)\n assert (test2._merge != 'right_only').all()\n\n test3 = merge(df1, df2, on='col1', how='right', indicator=True)\n assert (test3._merge != 'left_only').all()\n test3 = df1.merge(df2, on='col1', how='right', indicator=True)\n assert (test3._merge != 'left_only').all()\n\n test4 = merge(df1, df2, on='col1', how='inner', indicator=True)\n assert (test4._merge == 'both').all()\n test4 = df1.merge(df2, on='col1', how='inner', indicator=True)\n assert (test4._merge == 'both').all()\n\n # Check if working name in df\n for i in ['_right_indicator', '_left_indicator', '_merge']:\n df_badcolumn = DataFrame({'col1': [1, 2], i: [2, 2]})\n\n with pytest.raises(ValueError):\n merge(df1, df_badcolumn, on='col1',\n how='outer', indicator=True)\n with pytest.raises(ValueError):\n df1.merge(df_badcolumn, on='col1', how='outer', indicator=True)\n\n # Check for name conflict with custom name\n df_badcolumn = DataFrame(\n {'col1': [1, 2], 'custom_column_name': [2, 2]})\n\n with pytest.raises(ValueError):\n merge(df1, df_badcolumn, on='col1', how='outer',\n indicator='custom_column_name')\n with pytest.raises(ValueError):\n df1.merge(df_badcolumn, on='col1', how='outer',\n indicator='custom_column_name')\n\n # Merge on multiple columns\n df3 = DataFrame({'col1': [0, 1], 'col2': ['a', 'b']})\n\n df4 = DataFrame({'col1': [1, 1, 3], 'col2': ['b', 'x', 'y']})\n\n hand_coded_result = DataFrame({'col1': [0, 1, 1, 3],\n 'col2': ['a', 'b', 'x', 'y']})\n hand_coded_result['_merge'] = Categorical(\n ['left_only', 'both', 'right_only', 'right_only'],\n categories=['left_only', 'right_only', 'both'])\n\n test5 = merge(df3, df4, on=['col1', 'col2'],\n how='outer', indicator=True)\n assert_frame_equal(test5, hand_coded_result)\n test5 = df3.merge(df4, on=['col1', 'col2'],\n how='outer', indicator=True)\n assert_frame_equal(test5, hand_coded_result)\n\n def test_validation(self):\n left = DataFrame({'a': ['a', 'b', 'c', 'd'],\n 'b': ['cat', 'dog', 'weasel', 'horse']},\n index=range(4))\n\n right = DataFrame({'a': ['a', 'b', 'c', 'd', 'e'],\n 'c': ['meow', 'bark', 'um... weasel noise?',\n 'nay', 'chirp']},\n index=range(5))\n\n # Make sure no side effects.\n left_copy = left.copy()\n right_copy = right.copy()\n\n result = merge(left, right, left_index=True, right_index=True,\n validate='1:1')\n assert_frame_equal(left, left_copy)\n assert_frame_equal(right, right_copy)\n\n # make sure merge still correct\n expected = DataFrame({'a_x': ['a', 'b', 'c', 'd'],\n 'b': ['cat', 'dog', 'weasel', 'horse'],\n 'a_y': ['a', 'b', 'c', 'd'],\n 'c': ['meow', 'bark', 'um... weasel noise?',\n 'nay']},\n index=range(4),\n columns=['a_x', 'b', 'a_y', 'c'])\n\n result = merge(left, right, left_index=True, right_index=True,\n validate='one_to_one')\n assert_frame_equal(result, expected)\n\n expected_2 = DataFrame({'a': ['a', 'b', 'c', 'd'],\n 'b': ['cat', 'dog', 'weasel', 'horse'],\n 'c': ['meow', 'bark', 'um... weasel noise?',\n 'nay']},\n index=range(4))\n\n result = merge(left, right, on='a', validate='1:1')\n assert_frame_equal(left, left_copy)\n assert_frame_equal(right, right_copy)\n assert_frame_equal(result, expected_2)\n\n result = merge(left, right, on='a', validate='one_to_one')\n assert_frame_equal(result, expected_2)\n\n # One index, one column\n expected_3 = DataFrame({'b': ['cat', 'dog', 'weasel', 'horse'],\n 'a': ['a', 'b', 'c', 'd'],\n 'c': ['meow', 'bark', 'um... weasel noise?',\n 'nay']},\n columns=['b', 'a', 'c'],\n index=range(4))\n\n left_index_reset = left.set_index('a')\n result = merge(left_index_reset, right, left_index=True,\n right_on='a', validate='one_to_one')\n assert_frame_equal(result, expected_3)\n\n # Dups on right\n right_w_dups = right.append(pd.DataFrame({'a': ['e'], 'c': ['moo']},\n index=[4]))\n merge(left, right_w_dups, left_index=True, right_index=True,\n validate='one_to_many')\n\n with pytest.raises(MergeError):\n merge(left, right_w_dups, left_index=True, right_index=True,\n validate='one_to_one')\n\n with pytest.raises(MergeError):\n merge(left, right_w_dups, on='a', validate='one_to_one')\n\n # Dups on left\n left_w_dups = left.append(pd.DataFrame({'a': ['a'], 'c': ['cow']},\n index=[3]), sort=True)\n merge(left_w_dups, right, left_index=True, right_index=True,\n validate='many_to_one')\n\n with pytest.raises(MergeError):\n merge(left_w_dups, right, left_index=True, right_index=True,\n validate='one_to_one')\n\n with pytest.raises(MergeError):\n merge(left_w_dups, right, on='a', validate='one_to_one')\n\n # Dups on both\n merge(left_w_dups, right_w_dups, on='a', validate='many_to_many')\n\n with pytest.raises(MergeError):\n merge(left_w_dups, right_w_dups, left_index=True,\n right_index=True, validate='many_to_one')\n\n with pytest.raises(MergeError):\n merge(left_w_dups, right_w_dups, on='a',\n validate='one_to_many')\n\n # Check invalid arguments\n with pytest.raises(ValueError):\n merge(left, right, on='a', validate='jibberish')\n\n # Two column merge, dups in both, but jointly no dups.\n left = DataFrame({'a': ['a', 'a', 'b', 'b'],\n 'b': [0, 1, 0, 1],\n 'c': ['cat', 'dog', 'weasel', 'horse']},\n index=range(4))\n\n right = DataFrame({'a': ['a', 'a', 'b'],\n 'b': [0, 1, 0],\n 'd': ['meow', 'bark', 'um... weasel noise?']},\n index=range(3))\n\n expected_multi = DataFrame({'a': ['a', 'a', 'b'],\n 'b': [0, 1, 0],\n 'c': ['cat', 'dog', 'weasel'],\n 'd': ['meow', 'bark',\n 'um... weasel noise?']},\n index=range(3))\n\n with pytest.raises(MergeError):\n merge(left, right, on='a', validate='1:1')\n\n result = merge(left, right, on=['a', 'b'], validate='1:1')\n assert_frame_equal(result, expected_multi)\n\n def test_merge_two_empty_df_no_division_error(self):\n # GH17776, PR #17846\n a = pd.DataFrame({'a': [], 'b': [], 'c': []})\n with np.errstate(divide='raise'):\n merge(a, a, on=('a', 'b'))\n\n\ndef _check_merge(x, y):\n for how in ['inner', 'left', 'outer']:\n result = x.join(y, how=how)\n\n expected = merge(x.reset_index(), y.reset_index(), how=how,\n sort=True)\n expected = expected.set_index('index')\n\n # TODO check_names on merge?\n assert_frame_equal(result, expected, check_names=False)\n\n\nclass TestMergeMulti(object):\n\n def setup_method(self, method):\n self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,\n columns=['j_one', 'j_two', 'j_three'])\n\n # a little relevant example with NAs\n key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',\n 'qux', 'snap']\n key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',\n 'three', 'one']\n\n data = np.random.randn(len(key1))\n self.data = DataFrame({'key1': key1, 'key2': key2,\n 'data': data})\n\n def test_merge_on_multikey(self):\n joined = self.data.join(self.to_join, on=['key1', 'key2'])\n\n join_key = Index(lzip(self.data['key1'], self.data['key2']))\n indexer = self.to_join.index.get_indexer(join_key)\n ex_values = self.to_join.values.take(indexer, axis=0)\n ex_values[indexer == -1] = np.nan\n expected = self.data.join(DataFrame(ex_values,\n columns=self.to_join.columns))\n\n # TODO: columns aren't in the same order yet\n assert_frame_equal(joined, expected.loc[:, joined.columns])\n\n left = self.data.join(self.to_join, on=['key1', 'key2'], sort=True)\n right = expected.loc[:, joined.columns].sort_values(['key1', 'key2'],\n kind='mergesort')\n assert_frame_equal(left, right)\n\n def test_left_join_multi_index(self):\n icols = ['1st', '2nd', '3rd']\n\n def bind_cols(df):\n iord = lambda a: 0 if a != a else ord(a)\n f = lambda ts: ts.map(iord) - ord('a')\n return (f(df['1st']) + f(df['3rd']) * 1e2 +\n df['2nd'].fillna(0) * 1e4)\n\n def run_asserts(left, right):\n for sort in [False, True]:\n res = left.join(right, on=icols, how='left', sort=sort)\n\n assert len(left) < len(res) + 1\n assert not res['4th'].isna().any()\n assert not res['5th'].isna().any()\n\n tm.assert_series_equal(\n res['4th'], - res['5th'], check_names=False)\n result = bind_cols(res.iloc[:, :-2])\n tm.assert_series_equal(res['4th'], result, check_names=False)\n assert result.name is None\n\n if sort:\n tm.assert_frame_equal(\n res, res.sort_values(icols, kind='mergesort'))\n\n out = merge(left, right.reset_index(), on=icols,\n sort=sort, how='left')\n\n res.index = np.arange(len(res))\n tm.assert_frame_equal(out, res)\n\n lc = list(map(chr, np.arange(ord('a'), ord('z') + 1)))\n left = DataFrame(np.random.choice(lc, (5000, 2)),\n columns=['1st', '3rd'])\n left.insert(1, '2nd', np.random.randint(0, 1000, len(left)))\n\n i = np.random.permutation(len(left))\n right = left.iloc[i].copy()\n\n left['4th'] = bind_cols(left)\n right['5th'] = - bind_cols(right)\n right.set_index(icols, inplace=True)\n\n run_asserts(left, right)\n\n # inject some nulls\n left.loc[1::23, '1st'] = np.nan\n left.loc[2::37, '2nd'] = np.nan\n left.loc[3::43, '3rd'] = np.nan\n left['4th'] = bind_cols(left)\n\n i = np.random.permutation(len(left))\n right = left.iloc[i, :-1]\n right['5th'] = - bind_cols(right)\n right.set_index(icols, inplace=True)\n\n run_asserts(left, right)\n\n def test_merge_right_vs_left(self):\n # compare left vs right merge with multikey\n for sort in [False, True]:\n merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],\n right_index=True, how='left', sort=sort)\n\n merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],\n left_index=True, how='right',\n sort=sort)\n\n merged2 = merged2.loc[:, merged1.columns]\n assert_frame_equal(merged1, merged2)\n\n def test_compress_group_combinations(self):\n\n # ~ 40000000 possible unique groups\n key1 = tm.rands_array(10, 10000)\n key1 = np.tile(key1, 2)\n key2 = key1[::-1]\n\n df = DataFrame({'key1': key1, 'key2': key2,\n 'value1': np.random.randn(20000)})\n\n df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],\n 'value2': np.random.randn(10000)})\n\n # just to hit the label compression code path\n merge(df, df2, how='outer')\n\n def test_left_join_index_preserve_order(self):\n\n left = DataFrame({'k1': [0, 1, 2] * 8,\n 'k2': ['foo', 'bar'] * 12,\n 'v': np.array(np.arange(24), dtype=np.int64)})\n\n index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])\n right = DataFrame({'v2': [5, 7]}, index=index)\n\n result = left.join(right, on=['k1', 'k2'])\n\n expected = left.copy()\n expected['v2'] = np.nan\n expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5\n expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(\n result.sort_values(['k1', 'k2'], kind='mergesort'),\n left.join(right, on=['k1', 'k2'], sort=True))\n\n # test join with multi dtypes blocks\n left = DataFrame({'k1': [0, 1, 2] * 8,\n 'k2': ['foo', 'bar'] * 12,\n 'k3': np.array([0, 1, 2] * 8, dtype=np.float32),\n 'v': np.array(np.arange(24), dtype=np.int32)})\n\n index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])\n right = DataFrame({'v2': [5, 7]}, index=index)\n\n result = left.join(right, on=['k1', 'k2'])\n\n expected = left.copy()\n expected['v2'] = np.nan\n expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5\n expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(\n result.sort_values(['k1', 'k2'], kind='mergesort'),\n left.join(right, on=['k1', 'k2'], sort=True))\n\n # do a right join for an extra test\n joined = merge(right, left, left_index=True,\n right_on=['k1', 'k2'], how='right')\n tm.assert_frame_equal(joined.loc[:, expected.columns], expected)\n\n def test_left_join_index_multi_match_multiindex(self):\n left = DataFrame([\n ['X', 'Y', 'C', 'a'],\n ['W', 'Y', 'C', 'e'],\n ['V', 'Q', 'A', 'h'],\n ['V', 'R', 'D', 'i'],\n ['X', 'Y', 'D', 'b'],\n ['X', 'Y', 'A', 'c'],\n ['W', 'Q', 'B', 'f'],\n ['W', 'R', 'C', 'g'],\n ['V', 'Y', 'C', 'j'],\n ['X', 'Y', 'B', 'd']],\n columns=['cola', 'colb', 'colc', 'tag'],\n index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8])\n\n right = DataFrame([\n ['W', 'R', 'C', 0],\n ['W', 'Q', 'B', 3],\n ['W', 'Q', 'B', 8],\n ['X', 'Y', 'A', 1],\n ['X', 'Y', 'A', 4],\n ['X', 'Y', 'B', 5],\n ['X', 'Y', 'C', 6],\n ['X', 'Y', 'C', 9],\n ['X', 'Q', 'C', -6],\n ['X', 'R', 'C', -9],\n ['V', 'Y', 'C', 7],\n ['V', 'R', 'D', 2],\n ['V', 'R', 'D', -1],\n ['V', 'Q', 'A', -3]],\n columns=['col1', 'col2', 'col3', 'val'])\n\n right.set_index(['col1', 'col2', 'col3'], inplace=True)\n result = left.join(right, on=['cola', 'colb', 'colc'], how='left')\n\n expected = DataFrame([\n ['X', 'Y', 'C', 'a', 6],\n ['X', 'Y', 'C', 'a', 9],\n ['W', 'Y', 'C', 'e', nan],\n ['V', 'Q', 'A', 'h', -3],\n ['V', 'R', 'D', 'i', 2],\n ['V', 'R', 'D', 'i', -1],\n ['X', 'Y', 'D', 'b', nan],\n ['X', 'Y', 'A', 'c', 1],\n ['X', 'Y', 'A', 'c', 4],\n ['W', 'Q', 'B', 'f', 3],\n ['W', 'Q', 'B', 'f', 8],\n ['W', 'R', 'C', 'g', 0],\n ['V', 'Y', 'C', 'j', 7],\n ['X', 'Y', 'B', 'd', 5]],\n columns=['cola', 'colb', 'colc', 'tag', 'val'],\n index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8])\n\n tm.assert_frame_equal(result, expected)\n\n result = left.join(right, on=['cola', 'colb', 'colc'],\n how='left', sort=True)\n\n tm.assert_frame_equal(\n result,\n expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort'))\n\n # GH7331 - maintain left frame order in left merge\n right.reset_index(inplace=True)\n right.columns = left.columns[:3].tolist() + right.columns[-1:].tolist()\n result = merge(left, right, how='left', on=left.columns[:-1].tolist())\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n def test_left_join_index_multi_match(self):\n left = DataFrame([\n ['c', 0],\n ['b', 1],\n ['a', 2],\n ['b', 3]],\n columns=['tag', 'val'],\n index=[2, 0, 1, 3])\n\n right = DataFrame([\n ['a', 'v'],\n ['c', 'w'],\n ['c', 'x'],\n ['d', 'y'],\n ['a', 'z'],\n ['c', 'r'],\n ['e', 'q'],\n ['c', 's']],\n columns=['tag', 'char'])\n\n right.set_index('tag', inplace=True)\n result = left.join(right, on='tag', how='left')\n\n expected = DataFrame([\n ['c', 0, 'w'],\n ['c', 0, 'x'],\n ['c', 0, 'r'],\n ['c', 0, 's'],\n ['b', 1, nan],\n ['a', 2, 'v'],\n ['a', 2, 'z'],\n ['b', 3, nan]],\n columns=['tag', 'val', 'char'],\n index=[2, 2, 2, 2, 0, 1, 1, 3])\n\n tm.assert_frame_equal(result, expected)\n\n result = left.join(right, on='tag', how='left', sort=True)\n tm.assert_frame_equal(\n result, expected.sort_values('tag', kind='mergesort'))\n\n # GH7331 - maintain left frame order in left merge\n result = merge(left, right.reset_index(), how='left', on='tag')\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n def test_left_merge_na_buglet(self):\n left = DataFrame({'id': list('abcde'), 'v1': randn(5),\n 'v2': randn(5), 'dummy': list('abcde'),\n 'v3': randn(5)},\n columns=['id', 'v1', 'v2', 'dummy', 'v3'])\n right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],\n 'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})\n\n merged = merge(left, right, on='id', how='left')\n\n rdf = right.drop(['id'], axis=1)\n expected = left.join(rdf)\n tm.assert_frame_equal(merged, expected)\n\n def test_merge_na_keys(self):\n data = [[1950, \"A\", 1.5],\n [1950, \"B\", 1.5],\n [1955, \"B\", 1.5],\n [1960, \"B\", np.nan],\n [1970, \"B\", 4.],\n [1950, \"C\", 4.],\n [1960, \"C\", np.nan],\n [1965, \"C\", 3.],\n [1970, \"C\", 4.]]\n\n frame = DataFrame(data, columns=[\"year\", \"panel\", \"data\"])\n\n other_data = [[1960, 'A', np.nan],\n [1970, 'A', np.nan],\n [1955, 'A', np.nan],\n [1965, 'A', np.nan],\n [1965, 'B', np.nan],\n [1955, 'C', np.nan]]\n other = DataFrame(other_data, columns=['year', 'panel', 'data'])\n\n result = frame.merge(other, how='outer')\n\n expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')\n expected = expected.replace(-999, np.nan)\n\n tm.assert_frame_equal(result, expected)\n\n def test_join_multi_levels(self):\n\n # GH 3662\n # merge multi-levels\n household = (\n DataFrame(\n dict(household_id=[1, 2, 3],\n male=[0, 1, 0],\n wealth=[196087.3, 316478.7, 294750]),\n columns=['household_id', 'male', 'wealth'])\n .set_index('household_id'))\n portfolio = (\n DataFrame(\n dict(household_id=[1, 2, 2, 3, 3, 3, 4],\n asset_id=[\"nl0000301109\", \"nl0000289783\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"lu0197800237\", \"nl0000289965\",\n np.nan],\n name=[\"ABN Amro\", \"Robeco\", \"Royal Dutch Shell\",\n \"Royal Dutch Shell\",\n \"AAB Eastern Europe Equity Fund\",\n \"Postbank BioTech Fonds\", np.nan],\n share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),\n columns=['household_id', 'asset_id', 'name', 'share'])\n .set_index(['household_id', 'asset_id']))\n result = household.join(portfolio, how='inner')\n expected = (\n DataFrame(\n dict(male=[0, 1, 1, 0, 0, 0],\n wealth=[196087.3, 316478.7, 316478.7,\n 294750.0, 294750.0, 294750.0],\n name=['ABN Amro', 'Robeco', 'Royal Dutch Shell',\n 'Royal Dutch Shell',\n 'AAB Eastern Europe Equity Fund',\n 'Postbank BioTech Fonds'],\n share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25],\n household_id=[1, 2, 2, 3, 3, 3],\n asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29',\n 'gb00b03mlx29', 'lu0197800237',\n 'nl0000289965']))\n .set_index(['household_id', 'asset_id'])\n .reindex(columns=['male', 'wealth', 'name', 'share']))\n assert_frame_equal(result, expected)\n\n assert_frame_equal(result, expected)\n\n # equivalency\n result2 = (merge(household.reset_index(), portfolio.reset_index(),\n on=['household_id'], how='inner')\n .set_index(['household_id', 'asset_id']))\n assert_frame_equal(result2, expected)\n\n result = household.join(portfolio, how='outer')\n expected = (concat([\n expected,\n (DataFrame(\n dict(share=[1.00]),\n index=MultiIndex.from_tuples(\n [(4, np.nan)],\n names=['household_id', 'asset_id'])))\n ], axis=0, sort=True).reindex(columns=expected.columns))\n assert_frame_equal(result, expected)\n\n # invalid cases\n household.index.name = 'foo'\n\n def f():\n household.join(portfolio, how='inner')\n\n pytest.raises(ValueError, f)\n\n portfolio2 = portfolio.copy()\n portfolio2.index.set_names(['household_id', 'foo'])\n\n def f():\n portfolio2.join(portfolio, how='inner')\n\n pytest.raises(ValueError, f)\n\n def test_join_multi_levels2(self):\n\n # some more advanced merges\n # GH6360\n household = (\n DataFrame(\n dict(household_id=[1, 2, 2, 3, 3, 3, 4],\n asset_id=[\"nl0000301109\", \"nl0000301109\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"lu0197800237\", \"nl0000289965\",\n np.nan],\n share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),\n columns=['household_id', 'asset_id', 'share'])\n .set_index(['household_id', 'asset_id']))\n\n log_return = DataFrame(dict(\n asset_id=[\"gb00b03mlx29\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"lu0197800237\", \"lu0197800237\"],\n t=[233, 234, 235, 180, 181],\n log_return=[.09604978, -.06524096, .03532373, .03025441, .036997]\n )).set_index([\"asset_id\", \"t\"])\n\n expected = (\n DataFrame(dict(\n household_id=[2, 2, 2, 3, 3, 3, 3, 3],\n asset_id=[\"gb00b03mlx29\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"gb00b03mlx29\",\n \"lu0197800237\", \"lu0197800237\"],\n t=[233, 234, 235, 233, 234, 235, 180, 181],\n share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6],\n log_return=[.09604978, -.06524096, .03532373,\n .09604978, -.06524096, .03532373,\n .03025441, .036997]\n ))\n .set_index([\"household_id\", \"asset_id\", \"t\"])\n .reindex(columns=['share', 'log_return']))\n\n def f():\n household.join(log_return, how='inner')\n\n pytest.raises(NotImplementedError, f)\n\n # this is the equivalency\n result = (merge(household.reset_index(), log_return.reset_index(),\n on=['asset_id'], how='inner')\n .set_index(['household_id', 'asset_id', 't']))\n assert_frame_equal(result, expected)\n\n expected = (\n DataFrame(dict(\n household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4],\n asset_id=[\"nl0000301109\", \"nl0000289783\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"gb00b03mlx29\", \"gb00b03mlx29\",\n \"lu0197800237\", \"lu0197800237\",\n \"nl0000289965\", None],\n t=[None, None, 233, 234, 235, 233, 234,\n 235, 180, 181, None, None],\n share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15,\n 0.15, 0.15, 0.6, 0.6, 0.25, 1.0],\n log_return=[None, None, .09604978, -.06524096, .03532373,\n .09604978, -.06524096, .03532373,\n .03025441, .036997, None, None]\n ))\n .set_index([\"household_id\", \"asset_id\", \"t\"]))\n\n def f():\n household.join(log_return, how='outer')\n\n pytest.raises(NotImplementedError, f)\n\n @pytest.mark.parametrize(\"klass\", [None, np.asarray, Series, Index])\n def test_merge_datetime_index(self, klass):\n # see gh-19038\n df = DataFrame([1, 2, 3],\n [\"2016-01-01\", \"2017-01-01\", \"2018-01-01\"],\n columns=[\"a\"])\n df.index = pd.to_datetime(df.index)\n on_vector = df.index.year\n\n if klass is not None:\n on_vector = klass(on_vector)\n\n expected = DataFrame(\n OrderedDict([\n (\"a\", [1, 2, 3]),\n (\"key_1\", [2016, 2017, 2018]),\n ])\n )\n\n result = df.merge(df, on=[\"a\", on_vector], how=\"inner\")\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(\n OrderedDict([\n (\"key_0\", [2016, 2017, 2018]),\n (\"a_x\", [1, 2, 3]),\n (\"a_y\", [1, 2, 3]),\n ])\n )\n\n result = df.merge(df, on=[df.index.year], how=\"inner\")\n tm.assert_frame_equal(result, expected)\n\n\nclass TestMergeDtypes(object):\n\n @pytest.mark.parametrize('right_vals', [\n ['foo', 'bar'],\n Series(['foo', 'bar']).astype('category'),\n [1, 2],\n [1.0, 2.0],\n Series([1, 2], dtype='uint64'),\n Series([1, 2], dtype='int32')\n ])\n def test_different(self, right_vals):\n\n left = DataFrame({'A': ['foo', 'bar'],\n 'B': Series(['foo', 'bar']).astype('category'),\n 'C': [1, 2],\n 'D': [1.0, 2.0],\n 'E': Series([1, 2], dtype='uint64'),\n 'F': Series([1, 2], dtype='int32')})\n right = DataFrame({'A': right_vals})\n\n # GH 9780\n # We allow merging on object and categorical cols and cast\n # categorical cols to object\n if (is_categorical_dtype(right['A'].dtype) or\n is_object_dtype(right['A'].dtype)):\n result = pd.merge(left, right, on='A')\n assert is_object_dtype(result.A.dtype)\n\n # GH 9780\n # We raise for merging on object col and int/float col and\n # merging on categorical col and int/float col\n else:\n msg = (\"You are trying to merge on \"\n \"{lk_dtype} and {rk_dtype} columns. \"\n \"If you wish to proceed you should use \"\n \"pd.concat\".format(lk_dtype=left['A'].dtype,\n rk_dtype=right['A'].dtype))\n with tm.assert_raises_regex(ValueError, msg):\n pd.merge(left, right, on='A')\n\n @pytest.mark.parametrize('d1', [np.int64, np.int32,\n np.int16, np.int8, np.uint8])\n @pytest.mark.parametrize('d2', [np.int64, np.float64,\n np.float32, np.float16])\n def test_join_multi_dtypes(self, d1, d2):\n\n dtype1 = np.dtype(d1)\n dtype2 = np.dtype(d2)\n\n left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),\n 'k2': ['foo', 'bar'] * 12,\n 'v': np.array(np.arange(24), dtype=np.int64)})\n\n index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])\n right = DataFrame({'v2': np.array([5, 7], dtype=dtype2)}, index=index)\n\n result = left.join(right, on=['k1', 'k2'])\n\n expected = left.copy()\n\n if dtype2.kind == 'i':\n dtype2 = np.dtype('float64')\n expected['v2'] = np.array(np.nan, dtype=dtype2)\n expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5\n expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7\n\n tm.assert_frame_equal(result, expected)\n\n result = left.join(right, on=['k1', 'k2'], sort=True)\n expected.sort_values(['k1', 'k2'], kind='mergesort', inplace=True)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('int_vals, float_vals, exp_vals', [\n ([1, 2, 3], [1.0, 2.0, 3.0], {'X': [1, 2, 3], 'Y': [1.0, 2.0, 3.0]}),\n ([1, 2, 3], [1.0, 3.0], {'X': [1, 3], 'Y': [1.0, 3.0]}),\n ([1, 2], [1.0, 2.0, 3.0], {'X': [1, 2], 'Y': [1.0, 2.0]}),\n ])\n def test_merge_on_ints_floats(self, int_vals, float_vals, exp_vals):\n # GH 16572\n # Check that float column is not cast to object if\n # merging on float and int columns\n A = DataFrame({'X': int_vals})\n B = DataFrame({'Y': float_vals})\n expected = DataFrame(exp_vals)\n\n result = A.merge(B, left_on='X', right_on='Y')\n assert_frame_equal(result, expected)\n\n result = B.merge(A, left_on='Y', right_on='X')\n assert_frame_equal(result, expected[['Y', 'X']])\n\n def test_merge_on_ints_floats_warning(self):\n # GH 16572\n # merge will produce a warning when merging on int and\n # float columns where the float values are not exactly\n # equal to their int representation\n A = DataFrame({'X': [1, 2, 3]})\n B = DataFrame({'Y': [1.1, 2.5, 3.0]})\n expected = DataFrame({'X': [3], 'Y': [3.0]})\n\n with tm.assert_produces_warning(UserWarning):\n result = A.merge(B, left_on='X', right_on='Y')\n assert_frame_equal(result, expected)\n\n with tm.assert_produces_warning(UserWarning):\n result = B.merge(A, left_on='Y', right_on='X')\n assert_frame_equal(result, expected[['Y', 'X']])\n\n # test no warning if float has NaNs\n B = DataFrame({'Y': [np.nan, np.nan, 3.0]})\n\n with tm.assert_produces_warning(None):\n result = B.merge(A, left_on='Y', right_on='X')\n assert_frame_equal(result, expected[['Y', 'X']])\n\n def test_merge_incompat_infer_boolean_object(self):\n # GH21119: bool + object bool merge OK\n df1 = DataFrame({'key': Series([True, False], dtype=object)})\n df2 = DataFrame({'key': [True, False]})\n\n expected = DataFrame({'key': [True, False]}, dtype=object)\n result = pd.merge(df1, df2, on='key')\n assert_frame_equal(result, expected)\n result = pd.merge(df2, df1, on='key')\n assert_frame_equal(result, expected)\n\n # with missing value\n df1 = DataFrame({'key': Series([True, False, np.nan], dtype=object)})\n df2 = DataFrame({'key': [True, False]})\n\n expected = DataFrame({'key': [True, False]}, dtype=object)\n result = pd.merge(df1, df2, on='key')\n assert_frame_equal(result, expected)\n result = pd.merge(df2, df1, on='key')\n assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('df1_vals, df2_vals', [\n ([0, 1, 2], [\"0\", \"1\", \"2\"]),\n ([0.0, 1.0, 2.0], [\"0\", \"1\", \"2\"]),\n ([0, 1, 2], [u\"0\", u\"1\", u\"2\"]),\n (pd.date_range('1/1/2011', periods=2, freq='D'), ['2011-01-01',\n '2011-01-02']),\n (pd.date_range('1/1/2011', periods=2, freq='D'), [0, 1]),\n (pd.date_range('1/1/2011', periods=2, freq='D'), [0.0, 1.0]),\n (pd.date_range('20130101', periods=3),\n pd.date_range('20130101', periods=3, tz='US/Eastern')),\n ([0, 1, 2], Series(['a', 'b', 'a']).astype('category')),\n ([0.0, 1.0, 2.0], Series(['a', 'b', 'a']).astype('category')),\n # TODO ([0, 1], pd.Series([False, True], dtype=bool)),\n ([0, 1], pd.Series([False, True], dtype=object))\n ])\n def test_merge_incompat_dtypes(self, df1_vals, df2_vals):\n # GH 9780, GH 15800\n # Raise a ValueError when a user tries to merge on\n # dtypes that are incompatible (e.g., obj and int/float)\n\n df1 = DataFrame({'A': df1_vals})\n df2 = DataFrame({'A': df2_vals})\n\n msg = (\"You are trying to merge on {lk_dtype} and \"\n \"{rk_dtype} columns. If you wish to proceed \"\n \"you should use pd.concat\".format(lk_dtype=df1['A'].dtype,\n rk_dtype=df2['A'].dtype))\n msg = re.escape(msg)\n with tm.assert_raises_regex(ValueError, msg):\n pd.merge(df1, df2, on=['A'])\n\n # Check that error still raised when swapping order of dataframes\n msg = (\"You are trying to merge on {lk_dtype} and \"\n \"{rk_dtype} columns. If you wish to proceed \"\n \"you should use pd.concat\".format(lk_dtype=df2['A'].dtype,\n rk_dtype=df1['A'].dtype))\n msg = re.escape(msg)\n with tm.assert_raises_regex(ValueError, msg):\n pd.merge(df2, df1, on=['A'])\n\n\[email protected]\ndef left():\n np.random.seed(1234)\n return DataFrame(\n {'X': Series(np.random.choice(\n ['foo', 'bar'],\n size=(10,))).astype(CDT(['foo', 'bar'])),\n 'Y': np.random.choice(['one', 'two', 'three'], size=(10,))})\n\n\[email protected]\ndef right():\n np.random.seed(1234)\n return DataFrame(\n {'X': Series(['foo', 'bar']).astype(CDT(['foo', 'bar'])),\n 'Z': [1, 2]})\n\n\nclass TestMergeCategorical(object):\n\n def test_identical(self, left):\n # merging on the same, should preserve dtypes\n merged = pd.merge(left, left, on='X')\n result = merged.dtypes.sort_index()\n expected = Series([CategoricalDtype(),\n np.dtype('O'),\n np.dtype('O')],\n index=['X', 'Y_x', 'Y_y'])\n assert_series_equal(result, expected)\n\n def test_basic(self, left, right):\n # we have matching Categorical dtypes in X\n # so should preserve the merged column\n merged = pd.merge(left, right, on='X')\n result = merged.dtypes.sort_index()\n expected = Series([CategoricalDtype(),\n np.dtype('O'),\n np.dtype('int64')],\n index=['X', 'Y', 'Z'])\n assert_series_equal(result, expected)\n\n def test_merge_categorical(self):\n # GH 9426\n\n right = DataFrame({'c': {0: 'a',\n 1: 'b',\n 2: 'c',\n 3: 'd',\n 4: 'e'},\n 'd': {0: 'null',\n 1: 'null',\n 2: 'null',\n 3: 'null',\n 4: 'null'}})\n left = DataFrame({'a': {0: 'f',\n 1: 'f',\n 2: 'f',\n 3: 'f',\n 4: 'f'},\n 'b': {0: 'g',\n 1: 'g',\n 2: 'g',\n 3: 'g',\n 4: 'g'}})\n df = pd.merge(left, right, how='left', left_on='b', right_on='c')\n\n # object-object\n expected = df.copy()\n\n # object-cat\n # note that we propagate the category\n # because we don't have any matching rows\n cright = right.copy()\n cright['d'] = cright['d'].astype('category')\n result = pd.merge(left, cright, how='left', left_on='b', right_on='c')\n expected['d'] = expected['d'].astype(CategoricalDtype(['null']))\n tm.assert_frame_equal(result, expected)\n\n # cat-object\n cleft = left.copy()\n cleft['b'] = cleft['b'].astype('category')\n result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')\n tm.assert_frame_equal(result, expected)\n\n # cat-cat\n cright = right.copy()\n cright['d'] = cright['d'].astype('category')\n cleft = left.copy()\n cleft['b'] = cleft['b'].astype('category')\n result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')\n tm.assert_frame_equal(result, expected)\n\n def tests_merge_categorical_unordered_equal(self):\n # GH-19551\n df1 = DataFrame({\n 'Foo': Categorical(['A', 'B', 'C'], categories=['A', 'B', 'C']),\n 'Left': ['A0', 'B0', 'C0'],\n })\n\n df2 = DataFrame({\n 'Foo': Categorical(['C', 'B', 'A'], categories=['C', 'B', 'A']),\n 'Right': ['C1', 'B1', 'A1'],\n })\n result = pd.merge(df1, df2, on=['Foo'])\n expected = DataFrame({\n 'Foo': pd.Categorical(['A', 'B', 'C']),\n 'Left': ['A0', 'B0', 'C0'],\n 'Right': ['A1', 'B1', 'C1'],\n })\n assert_frame_equal(result, expected)\n\n def test_other_columns(self, left, right):\n # non-merge columns should preserve if possible\n right = right.assign(Z=right.Z.astype('category'))\n\n merged = pd.merge(left, right, on='X')\n result = merged.dtypes.sort_index()\n expected = Series([CategoricalDtype(),\n np.dtype('O'),\n CategoricalDtype()],\n index=['X', 'Y', 'Z'])\n assert_series_equal(result, expected)\n\n # categories are preserved\n assert left.X.values.is_dtype_equal(merged.X.values)\n assert right.Z.values.is_dtype_equal(merged.Z.values)\n\n @pytest.mark.parametrize(\n 'change', [lambda x: x,\n lambda x: x.astype(CDT(['foo', 'bar', 'bah'])),\n lambda x: x.astype(CDT(ordered=True))])\n def test_dtype_on_merged_different(self, change, join_type, left, right):\n # our merging columns, X now has 2 different dtypes\n # so we must be object as a result\n\n X = change(right.X.astype('object'))\n right = right.assign(X=X)\n assert is_categorical_dtype(left.X.values)\n # assert not left.X.values.is_dtype_equal(right.X.values)\n\n merged = pd.merge(left, right, on='X', how=join_type)\n\n result = merged.dtypes.sort_index()\n expected = Series([np.dtype('O'),\n np.dtype('O'),\n np.dtype('int64')],\n index=['X', 'Y', 'Z'])\n assert_series_equal(result, expected)\n\n def test_self_join_multiple_categories(self):\n # GH 16767\n # non-duplicates should work with multiple categories\n m = 5\n df = pd.DataFrame({\n 'a': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] * m,\n 'b': ['t', 'w', 'x', 'y', 'z'] * 2 * m,\n 'c': [letter\n for each in ['m', 'n', 'u', 'p', 'o']\n for letter in [each] * 2 * m],\n 'd': [letter\n for each in ['aa', 'bb', 'cc', 'dd', 'ee',\n 'ff', 'gg', 'hh', 'ii', 'jj']\n for letter in [each] * m]})\n\n # change them all to categorical variables\n df = df.apply(lambda x: x.astype('category'))\n\n # self-join should equal ourselves\n result = pd.merge(df, df, on=list(df.columns))\n\n assert_frame_equal(result, df)\n\n def test_dtype_on_categorical_dates(self):\n # GH 16900\n # dates should not be coerced to ints\n\n df = pd.DataFrame(\n [[date(2001, 1, 1), 1.1],\n [date(2001, 1, 2), 1.3]],\n columns=['date', 'num2']\n )\n df['date'] = df['date'].astype('category')\n\n df2 = pd.DataFrame(\n [[date(2001, 1, 1), 1.3],\n [date(2001, 1, 3), 1.4]],\n columns=['date', 'num4']\n )\n df2['date'] = df2['date'].astype('category')\n\n expected_outer = pd.DataFrame([\n [pd.Timestamp('2001-01-01'), 1.1, 1.3],\n [pd.Timestamp('2001-01-02'), 1.3, np.nan],\n [pd.Timestamp('2001-01-03'), np.nan, 1.4]],\n columns=['date', 'num2', 'num4']\n )\n result_outer = pd.merge(df, df2, how='outer', on=['date'])\n assert_frame_equal(result_outer, expected_outer)\n\n expected_inner = pd.DataFrame(\n [[pd.Timestamp('2001-01-01'), 1.1, 1.3]],\n columns=['date', 'num2', 'num4']\n )\n result_inner = pd.merge(df, df2, how='inner', on=['date'])\n assert_frame_equal(result_inner, expected_inner)\n\n @pytest.mark.parametrize('ordered', [True, False])\n @pytest.mark.parametrize('category_column,categories,expected_categories',\n [([False, True, True, False], [True, False],\n [True, False]),\n ([2, 1, 1, 2], [1, 2], [1, 2]),\n (['False', 'True', 'True', 'False'],\n ['True', 'False'], ['True', 'False'])])\n def test_merging_with_bool_or_int_cateorical_column(self, category_column,\n categories,\n expected_categories,\n ordered):\n # GH 17187\n # merging with a boolean/int categorical column\n df1 = pd.DataFrame({'id': [1, 2, 3, 4],\n 'cat': category_column})\n df1['cat'] = df1['cat'].astype(CDT(categories, ordered=ordered))\n df2 = pd.DataFrame({'id': [2, 4], 'num': [1, 9]})\n result = df1.merge(df2)\n expected = pd.DataFrame({'id': [2, 4], 'cat': expected_categories,\n 'num': [1, 9]})\n expected['cat'] = expected['cat'].astype(\n CDT(categories, ordered=ordered))\n assert_frame_equal(expected, result)\n\n\[email protected]\ndef left_df():\n return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])\n\n\[email protected]\ndef right_df():\n return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])\n\n\nclass TestMergeOnIndexes(object):\n\n @pytest.mark.parametrize(\n \"how, sort, expected\",\n [('inner', False, DataFrame({'a': [20, 10],\n 'b': [200, 100]},\n index=[2, 1])),\n ('inner', True, DataFrame({'a': [10, 20],\n 'b': [100, 200]},\n index=[1, 2])),\n ('left', False, DataFrame({'a': [20, 10, 0],\n 'b': [200, 100, np.nan]},\n index=[2, 1, 0])),\n ('left', True, DataFrame({'a': [0, 10, 20],\n 'b': [np.nan, 100, 200]},\n index=[0, 1, 2])),\n ('right', False, DataFrame({'a': [np.nan, 10, 20],\n 'b': [300, 100, 200]},\n index=[3, 1, 2])),\n ('right', True, DataFrame({'a': [10, 20, np.nan],\n 'b': [100, 200, 300]},\n index=[1, 2, 3])),\n ('outer', False, DataFrame({'a': [0, 10, 20, np.nan],\n 'b': [np.nan, 100, 200, 300]},\n index=[0, 1, 2, 3])),\n ('outer', True, DataFrame({'a': [0, 10, 20, np.nan],\n 'b': [np.nan, 100, 200, 300]},\n index=[0, 1, 2, 3]))])\n def test_merge_on_indexes(self, left_df, right_df, how, sort, expected):\n result = pd.merge(left_df, right_df,\n left_index=True,\n right_index=True,\n how=how,\n sort=sort)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n 'index', [\n CategoricalIndex(['A', 'B'], categories=['A', 'B'], name='index_col'),\n Float64Index([1.0, 2.0], name='index_col'),\n Int64Index([1, 2], name='index_col'),\n UInt64Index([1, 2], name='index_col'),\n RangeIndex(start=0, stop=2, name='index_col'),\n DatetimeIndex([\"2018-01-01\", \"2018-01-02\"], name='index_col'),\n ], ids=lambda x: type(x).__name__)\ndef test_merge_index_types(index):\n # gh-20777\n # assert key access is consistent across index types\n left = DataFrame({\"left_data\": [1, 2]}, index=index)\n right = DataFrame({\"right_data\": [1.0, 2.0]}, index=index)\n\n result = left.merge(right, on=['index_col'])\n\n expected = DataFrame(\n OrderedDict([('left_data', [1, 2]), ('right_data', [1.0, 2.0])]),\n index=index)\n assert_frame_equal(result, expected)\n", "\"\"\"\nSparseArray data structure\n\"\"\"\nfrom __future__ import division\n# pylint: disable=E1101,E1103,W0231\n\nimport numpy as np\nimport warnings\n\nimport pandas as pd\nfrom pandas.core.base import PandasObject, IndexOpsMixin\n\nfrom pandas import compat\nfrom pandas.compat import range, PYPY\nfrom pandas.compat.numpy import function as nv\n\nfrom pandas.core.dtypes.generic import ABCSparseSeries\nfrom pandas.core.dtypes.common import (\n _ensure_platform_int,\n is_float, is_integer,\n is_object_dtype,\n is_integer_dtype,\n is_bool_dtype,\n is_list_like,\n is_string_dtype,\n is_scalar, is_dtype_equal)\nfrom pandas.core.dtypes.cast import (\n maybe_convert_platform, maybe_promote,\n astype_nansafe, find_common_type, infer_dtype_from_scalar,\n construct_1d_arraylike_from_scalar)\nfrom pandas.core.dtypes.missing import isna, notna, na_value_for_dtype\n\nimport pandas._libs.sparse as splib\nimport pandas._libs.lib as lib\nfrom pandas._libs.sparse import SparseIndex, BlockIndex, IntIndex\nfrom pandas._libs import index as libindex\nimport pandas.core.algorithms as algos\nimport pandas.core.ops as ops\nimport pandas.io.formats.printing as printing\nfrom pandas.util._decorators import Appender\nfrom pandas.core.indexes.base import _index_shared_docs\n\n\n_sparray_doc_kwargs = dict(klass='SparseArray')\n\n\ndef _get_fill(arr):\n # coerce fill_value to arr dtype if possible\n # int64 SparseArray can have NaN as fill_value if there is no missing\n try:\n return np.asarray(arr.fill_value, dtype=arr.dtype)\n except ValueError:\n return np.asarray(arr.fill_value)\n\n\ndef _sparse_array_op(left, right, op, name):\n if name.startswith('__'):\n # For lookups in _libs.sparse we need non-dunder op name\n name = name[2:-2]\n\n # dtype used to find corresponding sparse method\n if not is_dtype_equal(left.dtype, right.dtype):\n dtype = find_common_type([left.dtype, right.dtype])\n left = left.astype(dtype)\n right = right.astype(dtype)\n else:\n dtype = left.dtype\n\n # dtype the result must have\n result_dtype = None\n\n if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:\n with np.errstate(all='ignore'):\n result = op(left.get_values(), right.get_values())\n fill = op(_get_fill(left), _get_fill(right))\n\n if left.sp_index.ngaps == 0:\n index = left.sp_index\n else:\n index = right.sp_index\n elif left.sp_index.equals(right.sp_index):\n with np.errstate(all='ignore'):\n result = op(left.sp_values, right.sp_values)\n fill = op(_get_fill(left), _get_fill(right))\n index = left.sp_index\n else:\n if name[0] == 'r':\n left, right = right, left\n name = name[1:]\n\n if name in ('and', 'or') and dtype == 'bool':\n opname = 'sparse_{name}_uint8'.format(name=name)\n # to make template simple, cast here\n left_sp_values = left.sp_values.view(np.uint8)\n right_sp_values = right.sp_values.view(np.uint8)\n result_dtype = np.bool\n else:\n opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype)\n left_sp_values = left.sp_values\n right_sp_values = right.sp_values\n\n sparse_op = getattr(splib, opname)\n with np.errstate(all='ignore'):\n result, index, fill = sparse_op(left_sp_values, left.sp_index,\n left.fill_value, right_sp_values,\n right.sp_index, right.fill_value)\n\n if result_dtype is None:\n result_dtype = result.dtype\n\n return _wrap_result(name, result, index, fill, dtype=result_dtype)\n\n\ndef _wrap_result(name, data, sparse_index, fill_value, dtype=None):\n \"\"\" wrap op result to have correct dtype \"\"\"\n if name.startswith('__'):\n # e.g. __eq__ --> eq\n name = name[2:-2]\n\n if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):\n dtype = np.bool\n\n if is_bool_dtype(dtype):\n # fill_value may be np.bool_\n fill_value = bool(fill_value)\n return SparseArray(data, sparse_index=sparse_index,\n fill_value=fill_value, dtype=dtype)\n\n\nclass SparseArray(PandasObject, np.ndarray):\n \"\"\"Data structure for labeled, sparse floating point 1-D data\n\n Parameters\n ----------\n data : {array-like (1-D), Series, SparseSeries, dict}\n kind : {'block', 'integer'}\n fill_value : float\n Code for missing value. Defaults depends on dtype.\n 0 for int dtype, False for bool dtype, and NaN for other dtypes\n sparse_index : {BlockIndex, IntIndex}, optional\n Only if you have one. Mainly used internally\n\n Notes\n -----\n SparseArray objects are immutable via the typical Python means. If you\n must change values, convert to dense, make your changes, then convert back\n to sparse\n \"\"\"\n __array_priority__ = 15\n _typ = 'array'\n _subtyp = 'sparse_array'\n\n sp_index = None\n fill_value = None\n\n def __new__(cls, data, sparse_index=None, index=None, kind='integer',\n fill_value=None, dtype=None, copy=False):\n\n if index is not None:\n if data is None:\n data = np.nan\n if not is_scalar(data):\n raise Exception(\"must only pass scalars with an index \")\n dtype = infer_dtype_from_scalar(data)[0]\n data = construct_1d_arraylike_from_scalar(\n data, len(index), dtype)\n\n if isinstance(data, ABCSparseSeries):\n data = data.values\n is_sparse_array = isinstance(data, SparseArray)\n\n if dtype is not None:\n dtype = np.dtype(dtype)\n\n if is_sparse_array:\n sparse_index = data.sp_index\n values = data.sp_values\n fill_value = data.fill_value\n else:\n # array-like\n if sparse_index is None:\n if dtype is not None:\n data = np.asarray(data, dtype=dtype)\n res = make_sparse(data, kind=kind, fill_value=fill_value)\n values, sparse_index, fill_value = res\n else:\n values = _sanitize_values(data)\n if len(values) != sparse_index.npoints:\n raise AssertionError(\"Non array-like type {type} must \"\n \"have the same length as the index\"\n .format(type=type(values)))\n # Create array, do *not* copy data by default\n if copy:\n subarr = np.array(values, dtype=dtype, copy=True)\n else:\n subarr = np.asarray(values, dtype=dtype)\n # Change the class of the array to be the subclass type.\n return cls._simple_new(subarr, sparse_index, fill_value)\n\n @classmethod\n def _simple_new(cls, data, sp_index, fill_value):\n if not isinstance(sp_index, SparseIndex):\n # caller must pass SparseIndex\n raise ValueError('sp_index must be a SparseIndex')\n\n if fill_value is None:\n if sp_index.ngaps > 0:\n # has missing hole\n fill_value = np.nan\n else:\n fill_value = na_value_for_dtype(data.dtype)\n\n if (is_integer_dtype(data) and is_float(fill_value) and\n sp_index.ngaps > 0):\n # if float fill_value is being included in dense repr,\n # convert values to float\n data = data.astype(float)\n\n result = data.view(cls)\n\n if not isinstance(sp_index, SparseIndex):\n # caller must pass SparseIndex\n raise ValueError('sp_index must be a SparseIndex')\n\n result.sp_index = sp_index\n result._fill_value = fill_value\n return result\n\n @property\n def _constructor(self):\n return lambda x: SparseArray(x, fill_value=self.fill_value,\n kind=self.kind)\n\n @property\n def kind(self):\n if isinstance(self.sp_index, BlockIndex):\n return 'block'\n elif isinstance(self.sp_index, IntIndex):\n return 'integer'\n\n @Appender(IndexOpsMixin.memory_usage.__doc__)\n def memory_usage(self, deep=False):\n values = self.sp_values\n\n v = values.nbytes\n\n if deep and is_object_dtype(self) and not PYPY:\n v += lib.memory_usage_of_objects(values)\n\n return v\n\n def __array_wrap__(self, out_arr, context=None):\n \"\"\"\n NumPy calls this method when ufunc is applied\n\n Parameters\n ----------\n\n out_arr : ndarray\n ufunc result (note that ufunc is only applied to sp_values)\n context : tuple of 3 elements (ufunc, signature, domain)\n for example, following is a context when np.sin is applied to\n SparseArray,\n\n (<ufunc 'sin'>, (SparseArray,), 0))\n\n See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html\n \"\"\"\n if isinstance(context, tuple) and len(context) == 3:\n ufunc, args, domain = context\n # to apply ufunc only to fill_value (to avoid recursive call)\n args = [getattr(a, 'fill_value', a) for a in args]\n with np.errstate(all='ignore'):\n fill_value = ufunc(self.fill_value, *args[1:])\n else:\n fill_value = self.fill_value\n\n return self._simple_new(out_arr, sp_index=self.sp_index,\n fill_value=fill_value)\n\n def __array_finalize__(self, obj):\n \"\"\"\n Gets called after any ufunc or other array operations, necessary\n to pass on the index.\n \"\"\"\n self.sp_index = getattr(obj, 'sp_index', None)\n self._fill_value = getattr(obj, 'fill_value', None)\n\n def __reduce__(self):\n \"\"\"Necessary for making this object picklable\"\"\"\n object_state = list(np.ndarray.__reduce__(self))\n subclass_state = self.fill_value, self.sp_index\n object_state[2] = self.sp_values.__reduce__()[2]\n object_state[2] = (object_state[2], subclass_state)\n return tuple(object_state)\n\n def __setstate__(self, state):\n \"\"\"Necessary for making this object picklable\"\"\"\n nd_state, own_state = state\n np.ndarray.__setstate__(self, nd_state)\n\n fill_value, sp_index = own_state[:2]\n self.sp_index = sp_index\n self._fill_value = fill_value\n\n def __len__(self):\n try:\n return self.sp_index.length\n except:\n return 0\n\n def __unicode__(self):\n return '{self}\\nFill: {fill}\\n{index}'.format(\n self=printing.pprint_thing(self),\n fill=printing.pprint_thing(self.fill_value),\n index=printing.pprint_thing(self.sp_index))\n\n def disable(self, other):\n raise NotImplementedError('inplace binary ops not supported')\n # Inplace operators\n __iadd__ = disable\n __isub__ = disable\n __imul__ = disable\n __itruediv__ = disable\n __ifloordiv__ = disable\n __ipow__ = disable\n\n # Python 2 division operators\n if not compat.PY3:\n __idiv__ = disable\n\n @property\n def values(self):\n \"\"\"\n Dense values\n \"\"\"\n output = np.empty(len(self), dtype=self.dtype)\n int_index = self.sp_index.to_int_index()\n output.fill(self.fill_value)\n output.put(int_index.indices, self)\n return output\n\n @property\n def shape(self):\n return (len(self),)\n\n @property\n def sp_values(self):\n # caching not an option, leaks memory\n return self.view(np.ndarray)\n\n @property\n def fill_value(self):\n return self._fill_value\n\n @fill_value.setter\n def fill_value(self, value):\n if not is_scalar(value):\n raise ValueError('fill_value must be a scalar')\n # if the specified value triggers type promotion, raise ValueError\n new_dtype, fill_value = maybe_promote(self.dtype, value)\n if is_dtype_equal(self.dtype, new_dtype):\n self._fill_value = fill_value\n else:\n msg = 'unable to set fill_value {fill} to {dtype} dtype'\n raise ValueError(msg.format(fill=value, dtype=self.dtype))\n\n def get_values(self, fill=None):\n \"\"\" return a dense representation \"\"\"\n return self.to_dense(fill=fill)\n\n def to_dense(self, fill=None):\n \"\"\"\n Convert SparseArray to a NumPy array.\n\n Parameters\n ----------\n fill: float, default None\n .. deprecated:: 0.20.0\n This argument is not respected by this function.\n\n Returns\n -------\n arr : NumPy array\n \"\"\"\n if fill is not None:\n warnings.warn((\"The 'fill' parameter has been deprecated and \"\n \"will be removed in a future version.\"),\n FutureWarning, stacklevel=2)\n return self.values\n\n def __iter__(self):\n if np.issubdtype(self.dtype, np.floating):\n boxer = float\n elif np.issubdtype(self.dtype, np.integer):\n boxer = int\n else:\n boxer = lambda x: x\n\n for i in range(len(self)):\n r = self._get_val_at(i)\n\n # box em\n yield boxer(r)\n\n def __getitem__(self, key):\n \"\"\"\n\n \"\"\"\n\n if is_integer(key):\n return self._get_val_at(key)\n elif isinstance(key, tuple):\n data_slice = self.values[key]\n else:\n if isinstance(key, SparseArray):\n if is_bool_dtype(key):\n key = key.to_dense()\n else:\n key = np.asarray(key)\n\n if hasattr(key, '__len__') and len(self) != len(key):\n return self.take(key)\n else:\n data_slice = self.values[key]\n\n return self._constructor(data_slice)\n\n def __getslice__(self, i, j):\n if i < 0:\n i = 0\n if j < 0:\n j = 0\n slobj = slice(i, j)\n return self.__getitem__(slobj)\n\n def _get_val_at(self, loc):\n n = len(self)\n if loc < 0:\n loc += n\n\n if loc >= n or loc < 0:\n raise IndexError('Out of bounds access')\n\n sp_loc = self.sp_index.lookup(loc)\n if sp_loc == -1:\n return self.fill_value\n else:\n return libindex.get_value_at(self, sp_loc)\n\n @Appender(_index_shared_docs['take'] % _sparray_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True,\n fill_value=None, **kwargs):\n \"\"\"\n Sparse-compatible version of ndarray.take\n\n Returns\n -------\n taken : ndarray\n \"\"\"\n nv.validate_take(tuple(), kwargs)\n\n if axis:\n raise ValueError(\"axis must be 0, input was {axis}\"\n .format(axis=axis))\n\n if is_integer(indices):\n # return scalar\n return self[indices]\n\n indices = _ensure_platform_int(indices)\n n = len(self)\n if allow_fill and fill_value is not None:\n # allow -1 to indicate self.fill_value,\n # self.fill_value may not be NaN\n if (indices < -1).any():\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n raise ValueError(msg)\n elif (n <= indices).any():\n msg = 'index is out of bounds for size {size}'.format(size=n)\n raise IndexError(msg)\n else:\n if ((indices < -n) | (n <= indices)).any():\n msg = 'index is out of bounds for size {size}'.format(size=n)\n raise IndexError(msg)\n\n indices = indices.astype(np.int32)\n if not (allow_fill and fill_value is not None):\n indices = indices.copy()\n indices[indices < 0] += n\n\n locs = self.sp_index.lookup_array(indices)\n indexer = np.arange(len(locs), dtype=np.int32)\n mask = locs != -1\n if mask.any():\n indexer = indexer[mask]\n new_values = self.sp_values.take(locs[mask])\n else:\n indexer = np.empty(shape=(0, ), dtype=np.int32)\n new_values = np.empty(shape=(0, ), dtype=self.sp_values.dtype)\n\n sp_index = _make_index(len(indices), indexer, kind=self.sp_index)\n return self._simple_new(new_values, sp_index, self.fill_value)\n\n def __setitem__(self, key, value):\n # if is_integer(key):\n # self.values[key] = value\n # else:\n # raise Exception(\"SparseArray does not support setting non-scalars\n # via setitem\")\n raise TypeError(\n \"SparseArray does not support item assignment via setitem\")\n\n def __setslice__(self, i, j, value):\n if i < 0:\n i = 0\n if j < 0:\n j = 0\n slobj = slice(i, j) # noqa\n\n # if not is_scalar(value):\n # raise Exception(\"SparseArray does not support setting non-scalars\n # via slices\")\n\n # x = self.values\n # x[slobj] = value\n # self.values = x\n raise TypeError(\"SparseArray does not support item assignment via \"\n \"slices\")\n\n def astype(self, dtype=None, copy=True):\n dtype = np.dtype(dtype)\n sp_values = astype_nansafe(self.sp_values, dtype, copy=copy)\n try:\n if is_bool_dtype(dtype):\n # to avoid np.bool_ dtype\n fill_value = bool(self.fill_value)\n else:\n fill_value = dtype.type(self.fill_value)\n except ValueError:\n msg = 'unable to coerce current fill_value {fill} to {dtype} dtype'\n raise ValueError(msg.format(fill=self.fill_value, dtype=dtype))\n return self._simple_new(sp_values, self.sp_index,\n fill_value=fill_value)\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of the SparseArray. Only the actual sparse values need to\n be copied.\n \"\"\"\n if deep:\n values = self.sp_values.copy()\n else:\n values = self.sp_values\n return SparseArray(values, sparse_index=self.sp_index,\n dtype=self.dtype, fill_value=self.fill_value)\n\n def count(self):\n \"\"\"\n Compute sum of non-NA/null observations in SparseArray. If the\n fill_value is not NaN, the \"sparse\" locations will be included in the\n observation count.\n\n Returns\n -------\n nobs : int\n \"\"\"\n sp_values = self.sp_values\n valid_spvals = np.isfinite(sp_values).sum()\n if self._null_fill_value:\n return valid_spvals\n else:\n return valid_spvals + self.sp_index.ngaps\n\n @property\n def _null_fill_value(self):\n return isna(self.fill_value)\n\n @property\n def _valid_sp_values(self):\n sp_vals = self.sp_values\n mask = notna(sp_vals)\n return sp_vals[mask]\n\n @Appender(_index_shared_docs['fillna'] % _sparray_doc_kwargs)\n def fillna(self, value, downcast=None):\n if downcast is not None:\n raise NotImplementedError\n\n if issubclass(self.dtype.type, np.floating):\n value = float(value)\n\n new_values = np.where(isna(self.sp_values), value, self.sp_values)\n fill_value = value if self._null_fill_value else self.fill_value\n\n return self._simple_new(new_values, self.sp_index,\n fill_value=fill_value)\n\n def all(self, axis=0, *args, **kwargs):\n \"\"\"\n Tests whether all elements evaluate True\n\n Returns\n -------\n all : bool\n\n See Also\n --------\n numpy.all\n \"\"\"\n nv.validate_all(args, kwargs)\n\n values = self.sp_values\n\n if len(values) != len(self) and not np.all(self.fill_value):\n return False\n\n return values.all()\n\n def any(self, axis=0, *args, **kwargs):\n \"\"\"\n Tests whether at least one of elements evaluate True\n\n Returns\n -------\n any : bool\n\n See Also\n --------\n numpy.any\n \"\"\"\n nv.validate_any(args, kwargs)\n\n values = self.sp_values\n\n if len(values) != len(self) and np.any(self.fill_value):\n return True\n\n return values.any()\n\n def sum(self, axis=0, *args, **kwargs):\n \"\"\"\n Sum of non-NA/null values\n\n Returns\n -------\n sum : float\n \"\"\"\n nv.validate_sum(args, kwargs)\n valid_vals = self._valid_sp_values\n sp_sum = valid_vals.sum()\n if self._null_fill_value:\n return sp_sum\n else:\n nsparse = self.sp_index.ngaps\n return sp_sum + self.fill_value * nsparse\n\n def cumsum(self, axis=0, *args, **kwargs):\n \"\"\"\n Cumulative sum of non-NA/null values.\n\n When performing the cumulative summation, any non-NA/null values will\n be skipped. The resulting SparseArray will preserve the locations of\n NaN values, but the fill value will be `np.nan` regardless.\n\n Parameters\n ----------\n axis : int or None\n Axis over which to perform the cumulative summation. If None,\n perform cumulative summation over flattened array.\n\n Returns\n -------\n cumsum : SparseArray\n \"\"\"\n nv.validate_cumsum(args, kwargs)\n\n if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.\n raise ValueError(\"axis(={axis}) out of bounds\".format(axis=axis))\n\n if not self._null_fill_value:\n return SparseArray(self.to_dense()).cumsum()\n\n return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index,\n fill_value=self.fill_value)\n\n def mean(self, axis=0, *args, **kwargs):\n \"\"\"\n Mean of non-NA/null values\n\n Returns\n -------\n mean : float\n \"\"\"\n nv.validate_mean(args, kwargs)\n valid_vals = self._valid_sp_values\n sp_sum = valid_vals.sum()\n ct = len(valid_vals)\n\n if self._null_fill_value:\n return sp_sum / ct\n else:\n nsparse = self.sp_index.ngaps\n return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)\n\n def value_counts(self, dropna=True):\n \"\"\"\n Returns a Series containing counts of unique values.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't include counts of NaN, even if NaN is in sp_values.\n\n Returns\n -------\n counts : Series\n \"\"\"\n keys, counts = algos._value_counts_arraylike(self.sp_values,\n dropna=dropna)\n fcounts = self.sp_index.ngaps\n if fcounts > 0:\n if self._null_fill_value and dropna:\n pass\n else:\n if self._null_fill_value:\n mask = pd.isna(keys)\n else:\n mask = keys == self.fill_value\n\n if mask.any():\n counts[mask] += fcounts\n else:\n keys = np.insert(keys, 0, self.fill_value)\n counts = np.insert(counts, 0, fcounts)\n\n if not isinstance(keys, pd.Index):\n keys = pd.Index(keys)\n result = pd.Series(counts, index=keys)\n return result\n\n\ndef _maybe_to_dense(obj):\n \"\"\" try to convert to dense \"\"\"\n if hasattr(obj, 'to_dense'):\n return obj.to_dense()\n return obj\n\n\ndef _maybe_to_sparse(array):\n \"\"\" array must be SparseSeries or SparseArray \"\"\"\n if isinstance(array, ABCSparseSeries):\n array = array.values.copy()\n return array\n\n\ndef _sanitize_values(arr):\n \"\"\"\n return an ndarray for our input,\n in a platform independent manner\n \"\"\"\n\n if hasattr(arr, 'values'):\n arr = arr.values\n else:\n\n # scalar\n if is_scalar(arr):\n arr = [arr]\n\n # ndarray\n if isinstance(arr, np.ndarray):\n pass\n\n elif is_list_like(arr) and len(arr) > 0:\n arr = maybe_convert_platform(arr)\n\n else:\n arr = np.asarray(arr)\n\n return arr\n\n\ndef make_sparse(arr, kind='block', fill_value=None):\n \"\"\"\n Convert ndarray to sparse format\n\n Parameters\n ----------\n arr : ndarray\n kind : {'block', 'integer'}\n fill_value : NaN or another value\n\n Returns\n -------\n (sparse_values, index) : (ndarray, SparseIndex)\n \"\"\"\n\n arr = _sanitize_values(arr)\n\n if arr.ndim > 1:\n raise TypeError(\"expected dimension <= 1 data\")\n\n if fill_value is None:\n fill_value = na_value_for_dtype(arr.dtype)\n\n if isna(fill_value):\n mask = notna(arr)\n else:\n # For str arrays in NumPy 1.12.0, operator!= below isn't\n # element-wise but just returns False if fill_value is not str,\n # so cast to object comparison to be safe\n if is_string_dtype(arr):\n arr = arr.astype(object)\n\n if is_object_dtype(arr.dtype):\n # element-wise equality check method in numpy doesn't treat\n # each element type, eg. 0, 0.0, and False are treated as\n # same. So we have to check the both of its type and value.\n mask = splib.make_mask_object_ndarray(arr, fill_value)\n else:\n mask = arr != fill_value\n\n length = len(arr)\n if length != mask.size:\n # the arr is a SparseArray\n indices = mask.sp_index.indices\n else:\n indices = mask.nonzero()[0].astype(np.int32)\n\n index = _make_index(length, indices, kind)\n sparsified_values = arr[mask]\n return sparsified_values, index, fill_value\n\n\ndef _make_index(length, indices, kind):\n\n if kind == 'block' or isinstance(kind, BlockIndex):\n locs, lens = splib.get_blocks(indices)\n index = BlockIndex(length, locs, lens)\n elif kind == 'integer' or isinstance(kind, IntIndex):\n index = IntIndex(length, indices)\n else: # pragma: no cover\n raise ValueError('must be block or integer type')\n return index\n\n\nops.add_special_arithmetic_methods(SparseArray)\n" ]
[ [ "pandas.CategoricalIndex", "numpy.argsort", "pandas.util.testing.assert_numpy_array_equal", "pandas.Series", "numpy.arange", "pandas.util.testing.assert_raises_regex", "pandas.Index", "pandas.option_context", "pandas.util.testing.equalContents", "numpy.errstate", "pandas.util.testing.assert_index_equal", "pandas.isna", "pandas.compat.iteritems", "pandas.core.dtypes.dtypes.CategoricalDtype", "pandas.util.testing.round_trip_pickle", "numpy.repeat", "numpy.array", "pandas.core.dtypes.common.needs_i8_conversion" ], [ "pandas.util.testing.ensure_clean", "pandas.Series", "pandas.util.testing.assert_contains_all", "pandas.util.testing.assert_produces_warning", "pandas.MultiIndex.from_tuples", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.assert_index_equal", "pandas.util.testing.makeUnicodeIndex", "numpy.random.randn", "pandas.compat.iteritems", "pandas.compat.lzip", "numpy.ones_like", "pandas.util.testing.assert_categorical_equal", "numpy.unique", "pandas.Timestamp", "numpy.arange", "pandas.util.testing.assert_series_equal", "pandas.Index", "pandas.util.testing.makeDateIndex", "numpy.isnan", "pandas.Categorical", "pandas.util.testing.makeCategoricalIndex", "pandas.util.testing.assert_almost_equal", "pandas.util.testing.makeStringIndex", "pandas.date_range", "pandas.core.indexes.datetimes.Timestamp", "pandas.util.testing.makeTimedeltaIndex", "numpy.array", "pandas.util.testing.makeUIntIndex", "pandas.compat.isidentifier", "pandas.timedelta_range", "numpy.random.random", "pandas.period_range", "pandas.util.testing.assert_raises_regex", "numpy.compress", "pandas.util.testing.makeIntIndex", "pandas.to_timedelta", "pandas.util.testing.makeFloatIndex", "pandas.read_pickle", "pandas.io.formats.printing.pprint_thing", "pandas.util.testing.makePeriodIndex", "pandas.compat.range" ], [ "numpy.lib.utils.deprecate", "numpy.core.bool_", "numpy.core.zeros", "numpy.all", "numpy.core.numerictypes.issubdtype", "numpy.iscomplexobj", "numpy.core.result_type", "numpy.where", "numpy.lib.imag", "numpy.arange", "numpy.asanyarray", "numpy.core.isnat", "numpy.core.signbit", "numpy.power", "numpy.core.array", "numpy.core.arange", "numpy.core.isfinite", "numpy.core.empty", "numpy.lib.iscomplexobj", "numpy.int64", "numpy.log10", "numpy.errstate", "numpy.array", "numpy.lib.real", "numpy.core.isnan", "numpy.abs", "numpy.core.isinf", "numpy.int32", "numpy.core.isscalar", "numpy.int16", "numpy.compat.npy_load_module", "numpy.common_type", "numpy.core.errstate", "numpy.core.numeric.isclose" ], [ "numpy.true_divide", "numpy.can_cast", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.issubdtype", "numpy.dtype", "numpy.mean", "numpy.any", "numpy.searchsorted", "numpy.ravel_multi_index", "numpy.logical_and.reduce", "numpy.ceil", "numpy.std", "numpy.diff", "numpy.zeros", "numpy.power", "numpy.ndim", "numpy.atleast_2d", "numpy.argsort", "numpy.log2", "numpy.absolute", "numpy.isfinite", "numpy.percentile", "numpy.ones", "numpy.sort", "numpy.result_type", "numpy.bincount", "numpy.empty" ], [ "pandas.merge", "pandas.to_datetime", "pandas.Series", "pandas.RangeIndex", "pandas.util.testing.assert_produces_warning", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.dtype", "pandas.util.testing.assert_frame_equal", "numpy.random.randn", "pandas.compat.lzip", "numpy.random.randint", "numpy.arange", "pandas.util.testing.assert_series_equal", "pandas.DatetimeIndex", "pandas.Index", "pandas.Int64Index", "pandas.util.testing.rands_array", "pandas.core.dtypes.common.is_categorical_dtype", "pandas.MultiIndex", "numpy.random.choice", "pandas.Categorical", "pandas.Float64Index", "pandas.UInt64Index", "numpy.timedelta64", "pandas.core.reshape.concat.concat", "pandas.DataFrame.from_dict", "numpy.errstate", "pandas.core.dtypes.dtypes.CategoricalDtype", "pandas.date_range", "numpy.array", "pandas.CategoricalIndex", "numpy.random.random", "pandas.api.types.CategoricalDtype", "numpy.random.seed", "pandas.period_range", "pandas.util.testing.assert_raises_regex", "numpy.tile", "pandas.core.reshape.merge.merge", "pandas.core.dtypes.common.is_object_dtype", "pandas.Timestamp", "pandas.compat.lrange" ], [ "pandas._libs.sparse.make_mask_object_ndarray", "pandas.Series", "numpy.asarray", "pandas.core.dtypes.common.is_dtype_equal", "numpy.issubdtype", "pandas._libs.sparse.IntIndex", "pandas.compat.numpy.function.validate_cumsum", "numpy.dtype", "pandas.core.dtypes.missing.notna", "numpy.all", "numpy.any", "pandas.compat.numpy.function.validate_any", "pandas.isna", "pandas._libs.index.get_value_at", "pandas.compat.numpy.function.validate_all", "pandas.core.ops.add_special_arithmetic_methods", "pandas.core.dtypes.cast.maybe_convert_platform", "pandas.Index", "pandas._libs.sparse.BlockIndex", "numpy.insert", "pandas.core.dtypes.common.is_string_dtype", "pandas.core.dtypes.common.is_float", "numpy.ndarray.__reduce__", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_list_like", "pandas.util._decorators.Appender", "pandas.core.dtypes.cast.infer_dtype_from_scalar", "pandas.core.dtypes.cast.maybe_promote", "pandas.compat.numpy.function.validate_mean", "numpy.errstate", "numpy.array", "pandas.compat.numpy.function.validate_sum", "pandas.core.dtypes.cast.astype_nansafe", "pandas.core.dtypes.common._ensure_platform_int", "pandas.core.dtypes.common.is_bool_dtype", "numpy.isfinite", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.common.is_integer", "pandas.core.dtypes.cast.find_common_type", "pandas.core.dtypes.missing.na_value_for_dtype", "pandas._libs.sparse.get_blocks", "pandas.core.dtypes.common.is_object_dtype", "pandas._libs.lib.memory_usage_of_objects", "pandas.core.dtypes.missing.isna", "numpy.ndarray.__setstate__", "pandas.io.formats.printing.pprint_thing", "pandas.core.algorithms._value_counts_arraylike", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.24", "0.21" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.10", "1.12", "1.13", "1.16", "1.9", "1.7", "1.15", "1.14", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hyunynim/im2avatar
[ "db7f01b79f7edf200815351b6d5821044605c0fd" ]
[ "train_color_human.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport os\nimport sys\nsys.path.append('./utils')\nsys.path.append('./models')\n\nimport dataset_human as dataset\nimport model_color as model\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('train_dir', './train_color_human',\n \"\"\"Directory where to write summaries and checkpoint.\"\"\")\ntf.app.flags.DEFINE_string('base_dir', './data/human_im2avatar', \n \"\"\"The path containing all the samples.\"\"\")\ntf.app.flags.DEFINE_string('data_list_path', './data_list', \n \"\"\"The path containing data lists.\"\"\")\n\ntf.app.flags.DEFINE_integer('train_epochs', 501, \"\"\"\"\"\")\ntf.app.flags.DEFINE_integer('batch_size', 55, \"\"\"\"\"\")\ntf.app.flags.DEFINE_integer('gpu', 1, \"\"\"\"\"\")\ntf.app.flags.DEFINE_float('learning_rate', 0.0003, \"\"\"\"\"\")\ntf.app.flags.DEFINE_float('wd', 0.00001, \"\"\"\"\"\")\ntf.app.flags.DEFINE_integer('epochs_to_save',20, \"\"\"\"\"\")\ntf.app.flags.DEFINE_integer('decay_step',20000, \"\"\"for lr\"\"\")\ntf.app.flags.DEFINE_float('decay_rate', 0.7, \"\"\"for lr\"\"\")\n\nIM_DIM = 128 \nVOL_DIM = 64 \n\nBATCH_SIZE = FLAGS.batch_size\nTRAIN_EPOCHS = FLAGS.train_epochs\nGPU_INDEX = FLAGS.gpu\nBASE_LEARNING_RATE = FLAGS.learning_rate\nDECAY_STEP = FLAGS.decay_step\nDECAY_RATE = FLAGS.decay_rate\n\nBN_INIT_DECAY = 0.5\nBN_DECAY_DECAY_RATE = 0.5\nBN_DECAY_DECAY_STEP = float(DECAY_STEP)\nBN_DECAY_CLIP = 0.99\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(GPU_INDEX)\n\nTRAIN_DIR = FLAGS.train_dir\nif not os.path.exists(TRAIN_DIR): \n os.makedirs(TRAIN_DIR)\nLOG_FOUT = open(os.path.join(TRAIN_DIR, 'log_train.txt'), 'w')\nLOG_FOUT.write(str(tf.flags._global_parser.parse_args())+'\\n')\n\ndef log_string(out_str):\n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n\ndef get_learning_rate(batch):\n learning_rate = tf.train.exponential_decay(\n BASE_LEARNING_RATE, # Base learning rate.\n batch * BATCH_SIZE, # Current index into the dataset.\n DECAY_STEP, # Decay step.\n DECAY_RATE, # Decay rate.\n staircase=True)\n learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!\n return learning_rate \n\ndef get_bn_decay(batch):\n bn_momentum = tf.train.exponential_decay(\n BN_INIT_DECAY,\n batch*BATCH_SIZE,\n BN_DECAY_DECAY_STEP,\n BN_DECAY_DECAY_RATE,\n staircase=True)\n bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)\n return bn_decay \n\n\ndef train(dataset_):\n with tf.Graph().as_default():\n with tf.device('/gpu:'+str(GPU_INDEX)):\n is_train_pl = tf.placeholder(tf.bool)\n img_pl, vol_clr_pl, vol_flow_pl = model.placeholder_inputs(BATCH_SIZE, IM_DIM, VOL_DIM)\n\n global_step = tf.Variable(0)\n bn_decay = get_bn_decay(global_step)\n \n pred_reg_clr, pred_conf, pred_flow, pred_blended_clr = model.get_model(img_pl, is_train_pl, weight_decay=FLAGS.wd, bn_decay=bn_decay)\n loss = model.get_loss(pred_reg_clr, pred_blended_clr, vol_clr_pl, pred_flow, vol_flow_pl)\n \n learning_rate = get_learning_rate(global_step)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(loss, global_step=global_step)\n\n saver = tf.train.Saver()\n\n config = tf.ConfigProto()\n config.gpu_options.allocator_type = 'BFC'\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n\n with tf.Session(config=config) as sess:\n model_path = os.path.join(TRAIN_DIR, \"trained_models\")\n if tf.gfile.Exists(os.path.join(model_path, \"checkpoint\")):\n ckpt = tf.train.get_checkpoint_state(model_path)\n restorer = tf.train.Saver()\n restorer.restore(sess, ckpt.model_checkpoint_path)\n print (\"Load parameters from checkpoint.\")\n else:\n sess.run(tf.global_variables_initializer())\n\n train_sample_size = dataset_.getTrainSampleSize()\n train_batches = train_sample_size // BATCH_SIZE\n\n for epoch in range(TRAIN_EPOCHS):\n dataset_.shuffleTrainNames()\n\n for batch_idx in range(train_batches):\n imgs, vols_flow, vols_clr = dataset_.next_flow_batch(batch_idx * BATCH_SIZE, BATCH_SIZE, vol_dim=VOL_DIM) \n feed_dict = {img_pl: imgs, vol_clr_pl: vols_clr, vol_flow_pl: vols_flow, is_train_pl: True}\n\n step = sess.run(global_step)\n _, loss_val = sess.run([train_op, loss], feed_dict=feed_dict)\n\n log_string(\"<TRAIN> Epoch {} - Batch {}: loss: {}.\".format(epoch, batch_idx, loss_val))\n\n if epoch % FLAGS.epochs_to_save == 0:\n checkpoint_path = os.path.join(model_path, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=epoch)\n\ndef main():\n train_dataset = dataset.Dataset(base_path=FLAGS.base_dir, \n data_list_path=FLAGS.data_list_path)\n train(train_dataset)\n\nif __name__ == '__main__':\n main()\n\n\n\n\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.Graph", "tensorflow.flags._global_parser.parse_args", "tensorflow.Variable", "tensorflow.maximum", "tensorflow.minimum", "tensorflow.app.flags.DEFINE_integer", "tensorflow.placeholder", "tensorflow.train.exponential_decay", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.app.flags.DEFINE_string", "tensorflow.app.flags.DEFINE_float", "tensorflow.train.AdamOptimizer", "tensorflow.train.Saver", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
vsobolmaven/python-statlib
[ "f0dc8c1a93c5536c3c4a32fa425ddd081349dccd", "f0dc8c1a93c5536c3c4a32fa425ddd081349dccd" ]
[ "statlib/pstat.py", "statlib/anova.py" ]
[ "# Copyright (c) 1999-2007 Gary Strangman; All Rights Reserved.\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n# \r\n# The above copyright notice and this permission notice shall be included in\r\n# all copies or substantial portions of the Software.\r\n# \r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\n# THE SOFTWARE.\r\n#\r\n# Comments and/or additions are welcome (send e-mail to:\r\n# [email protected]).\r\n# \r\n\"\"\"\r\npstat.py module\r\n\r\n#################################################\r\n####### Written by: Gary Strangman ###########\r\n####### Last modified: Dec 18, 2007 ###########\r\n#################################################\r\n\r\nThis module provides some useful list and array manipulation routines\r\nmodeled after those found in the |Stat package by Gary Perlman, plus a\r\nnumber of other useful list/file manipulation functions. The list-based\r\nfunctions include:\r\n\r\n abut (source,*args)\r\n simpleabut (source, addon)\r\n colex (listoflists,cnums)\r\n collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)\r\n dm (listoflists,criterion)\r\n flat (l)\r\n linexand (listoflists,columnlist,valuelist)\r\n linexor (listoflists,columnlist,valuelist)\r\n linedelimited (inlist,delimiter)\r\n lineincols (inlist,colsize) \r\n lineincustcols (inlist,colsizes)\r\n list2string (inlist)\r\n makelol(inlist)\r\n makestr(x)\r\n printcc (lst,extra=2)\r\n printincols (listoflists,colsize)\r\n pl (listoflists)\r\n printl(listoflists)\r\n replace (lst,oldval,newval)\r\n recode (inlist,listmap,cols='all')\r\n remap (listoflists,criterion)\r\n roundlist (inlist,num_digits_to_round_floats_to)\r\n sortby(listoflists,sortcols)\r\n unique (inlist)\r\n duplicates(inlist)\r\n writedelimited (listoflists, delimiter, file, writetype='w')\r\n\r\nSome of these functions have alternate versions which are defined only if\r\nNumeric (NumPy) can be imported. These functions are generally named as\r\nabove, with an 'a' prefix.\r\n\r\n aabut (source, *args)\r\n acolex (a,indices,axis=1)\r\n acollapse (a,keepcols,collapsecols,sterr=0,ns=0)\r\n adm (a,criterion)\r\n alinexand (a,columnlist,valuelist)\r\n alinexor (a,columnlist,valuelist)\r\n areplace (a,oldval,newval)\r\n arecode (a,listmap,col='all')\r\n arowcompare (row1, row2)\r\n arowsame (row1, row2)\r\n asortrows(a,axis=0)\r\n aunique(inarray)\r\n aduplicates(inarray)\r\n\r\nCurrently, the code is all but completely un-optimized. In many cases, the\r\narray versions of functions amount simply to aliases to built-in array\r\nfunctions/methods. Their inclusion here is for function name consistency.\r\n\"\"\"\r\n\r\n## CHANGE LOG:\r\n## ==========\r\n## 07-11-26 ... edited to work with numpy\r\n## 01-11-15 ... changed list2string() to accept a delimiter\r\n## 01-06-29 ... converted exec()'s to eval()'s to make compatible with Py2.1\r\n## 01-05-31 ... added duplicates() and aduplicates() functions\r\n## 00-12-28 ... license made GPL, docstring and import requirements\r\n## 99-11-01 ... changed version to 0.3\r\n## 99-08-30 ... removed get, getstrings, put, aget, aput (into io.py)\r\n## 03/27/99 ... added areplace function, made replace fcn recursive\r\n## 12/31/98 ... added writefc function for ouput to fixed column sizes\r\n## 12/07/98 ... fixed import problem (failed on collapse() fcn)\r\n## added __version__ variable (now 0.2)\r\n## 12/05/98 ... updated doc-strings\r\n## added features to collapse() function\r\n## added flat() function for lists\r\n## fixed a broken asortrows() \r\n## 11/16/98 ... fixed minor bug in aput for 1D arrays\r\n##\r\n## 11/08/98 ... fixed aput to output large arrays correctly\r\n\r\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom . import stats # required 3rd party module\r\nimport string, copy\r\nfrom types import *\r\nfrom six.moves import map\nfrom six.moves import range\n\r\n__version__ = 0.4\r\n\r\n###=========================== LIST FUNCTIONS ==========================\r\n###\r\n### Here are the list functions, DEFINED FOR ALL SYSTEMS.\r\n### Array functions (for NumPy-enabled computers) appear below.\r\n###\r\n\r\ndef abut (source,*args):\r\n \"\"\"\r\nLike the |Stat abut command. It concatenates two lists side-by-side\r\nand returns the result. '2D' lists are also accomodated for either argument\r\n(source or addon). CAUTION: If one list is shorter, it will be repeated\r\nuntil it is as long as the longest list. If this behavior is not desired,\r\nuse pstat.simpleabut().\r\n\r\nUsage: abut(source, args) where args=any # of lists\r\nReturns: a list of lists as long as the LONGEST list past, source on the\r\n 'left', lists in <args> attached consecutively on the 'right'\r\n\"\"\"\r\n\r\n if type(source) not in [list,tuple]:\r\n source = [source]\r\n for addon in args:\r\n if type(addon) not in [list,tuple]:\r\n addon = [addon]\r\n if len(addon) < len(source): # is source list longer?\r\n if len(source) % len(addon) == 0: # are they integer multiples?\r\n repeats = len(source)/len(addon) # repeat addon n times\r\n origadd = copy.deepcopy(addon)\r\n for i in range(repeats-1):\r\n addon = addon + origadd\r\n else:\r\n repeats = len(source)/len(addon)+1 # repeat addon x times,\r\n origadd = copy.deepcopy(addon) # x is NOT an integer\r\n for i in range(repeats-1):\r\n addon = addon + origadd\r\n addon = addon[0:len(source)]\r\n elif len(source) < len(addon): # is addon list longer?\r\n if len(addon) % len(source) == 0: # are they integer multiples?\r\n repeats = len(addon)/len(source) # repeat source n times\r\n origsour = copy.deepcopy(source)\r\n for i in range(repeats-1):\r\n source = source + origsour\r\n else:\r\n repeats = len(addon)/len(source)+1 # repeat source x times,\r\n origsour = copy.deepcopy(source) # x is NOT an integer\r\n for i in range(repeats-1):\r\n source = source + origsour\r\n source = source[0:len(addon)]\r\n\r\n source = simpleabut(source,addon)\r\n return source\r\n\r\n\r\ndef simpleabut (source, addon):\r\n \"\"\"\r\nConcatenates two lists as columns and returns the result. '2D' lists\r\nare also accomodated for either argument (source or addon). This DOES NOT\r\nrepeat either list to make the 2 lists of equal length. Beware of list pairs\r\nwith different lengths ... the resulting list will be the length of the\r\nFIRST list passed.\r\n\r\nUsage: simpleabut(source,addon) where source, addon=list (or list-of-lists)\r\nReturns: a list of lists as long as source, with source on the 'left' and\r\n addon on the 'right'\r\n\"\"\"\r\n if type(source) not in [list,tuple]:\r\n source = [source]\r\n if type(addon) not in [list,tuple]:\r\n addon = [addon]\r\n minlen = min(len(source),len(addon))\r\n list_ = copy.deepcopy(source) # start abut process\r\n if type(source[0]) not in [list_,tuple]:\r\n if type(addon[0]) not in [list_,tuple]:\r\n for i in range(minlen):\r\n list_[i] = [source[i]] + [addon[i]] # source/addon = column\r\n else:\r\n for i in range(minlen):\r\n list_[i] = [source[i]] + addon[i] # addon=list_-of-list_s\r\n else:\r\n if type(addon[0]) not in [list_,tuple]:\r\n for i in range(minlen):\r\n list_[i] = source[i] + [addon[i]] # source=list_-of-list_s\r\n else:\r\n for i in range(minlen):\r\n list_[i] = source[i] + addon[i] # source/addon = list_-of-list_s\r\n source = list_\r\n return source\r\n\r\n\r\ndef colex (listoflists,cnums):\r\n \"\"\"\r\nExtracts from listoflists the columns specified in the list 'cnums'\r\n(cnums can be an integer, a sequence of integers, or a string-expression that\r\ncorresponds to a slice operation on the variable x ... e.g., 'x[3:]' will colex\r\ncolumns 3 onward from the listoflists).\r\n\r\nUsage: colex (listoflists,cnums)\r\nReturns: a list-of-lists corresponding to the columns from listoflists\r\n specified by cnums, in the order the column numbers appear in cnums\r\n\"\"\"\r\n global index\r\n column = 0\r\n if type(cnums) in [list,tuple]: # if multiple columns to get\r\n index = cnums[0]\r\n column = [x[index] for x in listoflists]\r\n for col in cnums[1:]:\r\n index = col\r\n column = abut(column,[x[index] for x in listoflists])\r\n elif type(cnums) == str: # if an 'x[3:]' type expr.\r\n evalstring = 'map(lambda x: x'+cnums+', listoflists)'\r\n column = eval(evalstring)\r\n else: # else it's just 1 col to get\r\n index = cnums\r\n column = [x[index] for x in listoflists]\r\n return column\r\n\r\n\r\ndef collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):\r\n \"\"\"\r\nAverages data in collapsecol, keeping all unique items in keepcols\r\n(using unique, which keeps unique LISTS of column numbers), retaining the\r\nunique sets of values in keepcols, the mean for each. Setting fcn1\r\nand/or fcn2 to point to a function rather than None (e.g., stats.sterr, len)\r\nwill append those results (e.g., the sterr, N) after each calculated mean.\r\ncfcn is the collapse function to apply (defaults to mean, defined here in the\r\npstat module to avoid circular imports with stats.py, but harmonicmean or\r\nothers could be passed).\r\n\r\nUsage: collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)\r\nReturns: a list of lists with all unique permutations of entries appearing in\r\n columns (\"conditions\") specified by keepcols, abutted with the result of\r\n cfcn (if cfcn=None, defaults to the mean) of each column specified by\r\n collapsecols.\r\n\"\"\"\r\n def collmean (inlist):\r\n s = 0\r\n for item in inlist:\r\n s = s + item\r\n return s/float(len(inlist))\r\n\r\n if type(keepcols) not in [list,tuple]:\r\n keepcols = [keepcols]\r\n if type(collapsecols) not in [list,tuple]:\r\n collapsecols = [collapsecols]\r\n if cfcn == None:\r\n cfcn = collmean\r\n if keepcols == []:\r\n means = [0]*len(collapsecols)\r\n for i in range(len(collapsecols)):\r\n avgcol = colex(listoflists,collapsecols[i])\r\n means[i] = cfcn(avgcol)\r\n if fcn1:\r\n try:\r\n test = fcn1(avgcol)\r\n except:\r\n test = 'N/A'\r\n means[i] = [means[i], test]\r\n if fcn2:\r\n try:\r\n test = fcn2(avgcol)\r\n except:\r\n test = 'N/A'\r\n try:\r\n means[i] = means[i] + [len(avgcol)]\r\n except TypeError:\r\n means[i] = [means[i],len(avgcol)]\r\n return means\r\n else:\r\n values = colex(listoflists,keepcols)\r\n uniques = unique(values)\r\n uniques.sort()\r\n newlist = []\r\n if type(keepcols) not in [list,tuple]: keepcols = [keepcols]\r\n for item in uniques:\r\n if type(item) not in [list,tuple]: item =[item]\r\n tmprows = linexand(listoflists,keepcols,item)\r\n for col in collapsecols:\r\n avgcol = colex(tmprows,col)\r\n item.append(cfcn(avgcol))\r\n if fcn1 != None:\r\n try:\r\n test = fcn1(avgcol)\r\n except:\r\n test = 'N/A'\r\n item.append(test)\r\n if fcn2 != None:\r\n try:\r\n test = fcn2(avgcol)\r\n except:\r\n test = 'N/A'\r\n item.append(test)\r\n newlist.append(item)\r\n return newlist\r\n\r\n\r\ndef dm (listoflists,criterion):\r\n \"\"\"\r\nReturns rows from the passed list of lists that meet the criteria in\r\nthe passed criterion expression (a string as a function of x; e.g., 'x[3]>=9'\r\nwill return all rows where the 4th column>=9 and \"x[2]=='N'\" will return rows\r\nwith column 2 equal to the string 'N').\r\n\r\nUsage: dm (listoflists, criterion)\r\nReturns: rows from listoflists that meet the specified criterion.\r\n\"\"\"\r\n function = 'filter(lambda x: '+criterion+',listoflists)'\r\n lines = eval(function)\r\n return lines\r\n\r\n\r\ndef flat(l):\r\n \"\"\"\r\nReturns the flattened version of a '2D' list. List-correlate to the a.ravel()()\r\nmethod of NumPy arrays.\r\n\r\nUsage: flat(l)\r\n\"\"\"\r\n newl = []\r\n for i in range(len(l)):\r\n for j in range(len(l[i])):\r\n newl.append(l[i][j])\r\n return newl\r\n\r\n\r\ndef linexand (listoflists,columnlist,valuelist):\r\n \"\"\"\r\nReturns the rows of a list of lists where col (from columnlist) = val\r\n(from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]).\r\nlen(columnlist) must equal len(valuelist).\r\n\r\nUsage: linexand (listoflists,columnlist,valuelist)\r\nReturns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i\r\n\"\"\"\r\n if type(columnlist) not in [list,tuple]:\r\n columnlist = [columnlist]\r\n if type(valuelist) not in [list,tuple]:\r\n valuelist = [valuelist]\r\n criterion = ''\r\n for i in range(len(columnlist)):\r\n if type(valuelist[i])==str:\r\n critval = '\\'' + valuelist[i] + '\\''\r\n else:\r\n critval = str(valuelist[i])\r\n criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'\r\n criterion = criterion[0:-3] # remove the \"and\" after the last crit\r\n function = 'filter(lambda x: '+criterion+',listoflists)'\r\n lines = eval(function)\r\n return lines\r\n\r\n\r\ndef linexor (listoflists,columnlist,valuelist):\r\n \"\"\"\r\nReturns the rows of a list of lists where col (from columnlist) = val\r\n(from valuelist) for ANY pair of values (colunmlist[i],valuelist[i[).\r\nOne value is required for each column in columnlist. If only one value\r\nexists for columnlist but multiple values appear in valuelist, the\r\nvaluelist values are all assumed to pertain to the same column.\r\n\r\nUsage: linexor (listoflists,columnlist,valuelist)\r\nReturns: the rows of listoflists where columnlist[i]=valuelist[i] for ANY i\r\n\"\"\"\r\n if type(columnlist) not in [list,tuple]:\r\n columnlist = [columnlist]\r\n if type(valuelist) not in [list,tuple]:\r\n valuelist = [valuelist]\r\n criterion = ''\r\n if len(columnlist) == 1 and len(valuelist) > 1:\r\n columnlist = columnlist*len(valuelist)\r\n for i in range(len(columnlist)): # build an exec string\r\n if type(valuelist[i])==str:\r\n critval = '\\'' + valuelist[i] + '\\''\r\n else:\r\n critval = str(valuelist[i])\r\n criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'\r\n criterion = criterion[0:-2] # remove the \"or\" after the last crit\r\n function = 'filter(lambda x: '+criterion+',listoflists)'\r\n lines = eval(function)\r\n return lines\r\n\r\n\r\ndef linedelimited (inlist,delimiter):\r\n \"\"\"\r\nReturns a string composed of elements in inlist, with each element\r\nseparated by 'delimiter.' Used by function writedelimited. Use '\\t'\r\nfor tab-delimiting.\r\n\r\nUsage: linedelimited (inlist,delimiter)\r\n\"\"\"\r\n outstr = ''\r\n for item in inlist:\r\n if type(item) != str:\r\n item = str(item)\r\n outstr = outstr + item + delimiter\r\n outstr = outstr[0:-1]\r\n return outstr\r\n\r\n\r\ndef lineincols (inlist,colsize):\r\n \"\"\"\r\nReturns a string composed of elements in inlist, with each element\r\nright-aligned in columns of (fixed) colsize.\r\n\r\nUsage: lineincols (inlist,colsize) where colsize is an integer\r\n\"\"\"\r\n outstr = ''\r\n for item in inlist:\r\n if type(item) != str:\r\n item = str(item)\r\n size = len(item)\r\n if size <= colsize:\r\n for i in range(colsize-size):\r\n outstr = outstr + ' '\r\n outstr = outstr + item\r\n else:\r\n outstr = outstr + item[0:colsize+1]\r\n return outstr\r\n\r\n\r\ndef lineincustcols (inlist,colsizes):\r\n \"\"\"\r\nReturns a string composed of elements in inlist, with each element\r\nright-aligned in a column of width specified by a sequence colsizes. The\r\nlength of colsizes must be greater than or equal to the number of columns\r\nin inlist.\r\n\r\nUsage: lineincustcols (inlist,colsizes)\r\nReturns: formatted string created from inlist\r\n\"\"\"\r\n outstr = ''\r\n for i in range(len(inlist)):\r\n if type(inlist[i]) != str:\r\n item = str(inlist[i])\r\n else:\r\n item = inlist[i]\r\n size = len(item)\r\n if size <= colsizes[i]:\r\n for j in range(colsizes[i]-size):\r\n outstr = outstr + ' '\r\n outstr = outstr + item\r\n else:\r\n outstr = outstr + item[0:colsizes[i]+1]\r\n return outstr\r\n\r\n\r\ndef list2string (inlist,delimit=' '):\r\n \"\"\"\r\nConverts a 1D list to a single long string for file output, using\r\nthe string.join function.\r\n\r\nUsage: list2string (inlist,delimit=' ')\r\nReturns: the string created from inlist\r\n\"\"\"\r\n stringlist = list(map(makestr,inlist))\r\n return string.join(stringlist,delimit)\r\n\r\n\r\ndef makelol(inlist):\r\n \"\"\"\r\nConverts a 1D list to a 2D list (i.e., a list-of-lists). Useful when you\r\nwant to use put() to write a 1D list one item per line in the file.\r\n\r\nUsage: makelol(inlist)\r\nReturns: if l = [1,2,'hi'] then returns [[1],[2],['hi']] etc.\r\n\"\"\"\r\n x = []\r\n for item in inlist:\r\n x.append([item])\r\n return x\r\n\r\n\r\ndef makestr (x):\r\n if type(x) != str:\r\n x = str(x)\r\n return x\r\n\r\n\r\ndef printcc (lst,extra=2):\r\n \"\"\"\r\nPrints a list of lists in columns, customized by the max size of items\r\nwithin the columns (max size of items in col, plus 'extra' number of spaces).\r\nUse 'dashes' or '\\\\n' in the list-of-lists to print dashes or blank lines,\r\nrespectively.\r\n\r\nUsage: printcc (lst,extra=2)\r\nReturns: None\r\n\"\"\"\r\n if type(lst[0]) not in [list,tuple]:\r\n lst = [lst]\r\n rowstokill = []\r\n list2print = copy.deepcopy(lst)\r\n for i in range(len(lst)):\r\n if lst[i] == ['\\n'] or lst[i]=='\\n' or lst[i]=='dashes' or lst[i]=='' or lst[i]==['']:\r\n rowstokill = rowstokill + [i]\r\n rowstokill.reverse() # delete blank rows from the end\r\n for row in rowstokill:\r\n del list2print[row]\r\n maxsize = [0]*len(list2print[0])\r\n for col in range(len(list2print[0])):\r\n items = colex(list2print,col)\r\n items = list(map(makestr,items))\r\n maxsize[col] = max(list(map(len,items))) + extra\r\n for row in lst:\r\n if row == ['\\n'] or row == '\\n' or row == '' or row == ['']:\r\n print()\r\n elif row == ['dashes'] or row == 'dashes':\r\n dashes = [0]*len(maxsize)\r\n for j in range(len(maxsize)):\r\n dashes[j] = '-'*(maxsize[j]-2)\r\n print(lineincustcols(dashes,maxsize))\r\n else:\r\n print(lineincustcols(row,maxsize))\r\n return None\r\n\r\n\r\ndef printincols (listoflists,colsize):\r\n \"\"\"\r\nPrints a list of lists in columns of (fixed) colsize width, where\r\ncolsize is an integer.\r\n\r\nUsage: printincols (listoflists,colsize)\r\nReturns: None\r\n\"\"\"\r\n for row in listoflists:\r\n print(lineincols(row,colsize))\r\n return None\r\n\r\n\r\ndef pl (listoflists):\r\n \"\"\"\r\nPrints a list of lists, 1 list (row) at a time.\r\n\r\nUsage: pl(listoflists)\r\nReturns: None\r\n\"\"\"\r\n for row in listoflists:\r\n if row[-1] == '\\n':\r\n print(row, end=' ')\r\n else:\r\n print(row)\r\n return None\r\n\r\n\r\ndef printl(listoflists):\r\n \"\"\"Alias for pl.\"\"\"\r\n pl(listoflists)\r\n return\r\n\r\n\r\ndef replace (inlst,oldval,newval):\r\n \"\"\"\r\nReplaces all occurrences of 'oldval' with 'newval', recursively.\r\n\r\nUsage: replace (inlst,oldval,newval)\r\n\"\"\"\r\n lst = inlst*1\r\n for i in range(len(lst)):\r\n if type(lst[i]) not in [list,tuple]:\r\n if lst[i]==oldval: lst[i]=newval\r\n else:\r\n lst[i] = replace(lst[i],oldval,newval)\r\n return lst\r\n\r\n\r\ndef recode (inlist,listmap,cols=None):\r\n \"\"\"\r\nChanges the values in a list to a new set of values (useful when\r\nyou need to recode data from (e.g.) strings to numbers. cols defaults\r\nto None (meaning all columns are recoded).\r\n\r\nUsage: recode (inlist,listmap,cols=None) cols=recode cols, listmap=2D list\r\nReturns: inlist with the appropriate values replaced with new ones\r\n\"\"\"\r\n lst = copy.deepcopy(inlist)\r\n if cols != None:\r\n if type(cols) not in [list,tuple]:\r\n cols = [cols]\r\n for col in cols:\r\n for row in range(len(lst)):\r\n try:\r\n idx = colex(listmap,0).index(lst[row][col])\r\n lst[row][col] = listmap[idx][1]\r\n except ValueError:\r\n pass\r\n else:\r\n for row in range(len(lst)):\r\n for col in range(len(lst)):\r\n try:\r\n idx = colex(listmap,0).index(lst[row][col])\r\n lst[row][col] = listmap[idx][1]\r\n except ValueError:\r\n pass\r\n return lst\r\n\r\n\r\ndef remap (listoflists,criterion):\r\n \"\"\"\r\nRemaps values in a given column of a 2D list (listoflists). This requires\r\na criterion as a function of 'x' so that the result of the following is\r\nreturned ... map(lambda x: 'criterion',listoflists). \r\n\r\nUsage: remap(listoflists,criterion) criterion=string\r\nReturns: remapped version of listoflists\r\n\"\"\"\r\n function = 'map(lambda x: '+criterion+',listoflists)'\r\n lines = eval(function)\r\n return lines\r\n\r\n\r\ndef roundlist (inlist,digits):\r\n \"\"\"\r\nGoes through each element in a 1D or 2D inlist, and applies the following\r\nfunction to all elements of float ... round(element,digits).\r\n\r\nUsage: roundlist(inlist,digits)\r\nReturns: list with rounded floats\r\n\"\"\"\r\n if type(inlist[0]) in [int, float]:\r\n inlist = [inlist]\r\n l = inlist*1\r\n for i in range(len(l)):\r\n for j in range(len(l[i])):\r\n if type(l[i][j])==float:\r\n l[i][j] = round(l[i][j],digits)\r\n return l\r\n\r\n\r\ndef sortby(listoflists,sortcols):\r\n \"\"\"\r\nSorts a list of lists on the column(s) specified in the sequence\r\nsortcols.\r\n\r\nUsage: sortby(listoflists,sortcols)\r\nReturns: sorted list, unchanged column ordering\r\n\"\"\"\r\n newlist = abut(colex(listoflists,sortcols),listoflists)\r\n newlist.sort()\r\n try:\r\n numcols = len(sortcols)\r\n except TypeError:\r\n numcols = 1\r\n crit = '[' + str(numcols) + ':]'\r\n newlist = colex(newlist,crit)\r\n return newlist\r\n\r\n\r\ndef unique (inlist):\r\n \"\"\"\r\nReturns all unique items in the passed list. If the a list-of-lists\r\nis passed, unique LISTS are found (i.e., items in the first dimension are\r\ncompared).\r\n\r\nUsage: unique (inlist)\r\nReturns: the unique elements (or rows) in inlist\r\n\"\"\"\r\n uniques = []\r\n for item in inlist:\r\n if item not in uniques:\r\n uniques.append(item)\r\n return uniques\r\n\r\ndef duplicates(inlist):\r\n \"\"\"\r\nReturns duplicate items in the FIRST dimension of the passed list.\r\n\r\nUsage: duplicates (inlist)\r\n\"\"\"\r\n dups = []\r\n for i in range(len(inlist)):\r\n if inlist[i] in inlist[i+1:]:\r\n dups.append(inlist[i])\r\n return dups\r\n\r\n\r\ndef nonrepeats(inlist):\r\n \"\"\"\r\nReturns items that are NOT duplicated in the first dim of the passed list.\r\n\r\nUsage: nonrepeats (inlist)\r\n\"\"\"\r\n nonrepeats = []\r\n for i in range(len(inlist)):\r\n if inlist.count(inlist[i]) == 1:\r\n nonrepeats.append(inlist[i])\r\n return nonrepeats\r\n\r\n\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n\r\ntry: # DEFINE THESE *ONLY* IF numpy IS AVAILABLE\r\n import numpy as N\r\n \r\n def aabut (source, *args):\r\n \"\"\"\r\n Like the |Stat abut command. It concatenates two arrays column-wise\r\n and returns the result. CAUTION: If one array is shorter, it will be\r\n repeated until it is as long as the other.\r\n \r\n Usage: aabut (source, args) where args=any # of arrays\r\n Returns: an array as long as the LONGEST array past, source appearing on the\r\n 'left', arrays in <args> attached on the 'right'.\r\n \"\"\"\r\n if len(source.shape)==1:\r\n width = 1\r\n source = N.resize(source,[source.shape[0],width])\r\n else:\r\n width = source.shape[1]\r\n for addon in args:\r\n if len(addon.shape)==1:\r\n width = 1\r\n addon = N.resize(addon,[source.shape[0],width])\r\n else:\r\n width = source.shape[1]\r\n if len(addon) < len(source):\r\n addon = N.resize(addon,[source.shape[0],addon.shape[1]])\r\n elif len(source) < len(addon):\r\n source = N.resize(source,[addon.shape[0],source.shape[1]])\r\n source = N.concatenate((source,addon),1)\r\n return source\r\n \r\n \r\n def acolex (a,indices,axis=1):\r\n \"\"\"\r\n Extracts specified indices (a list) from passed array, along passed\r\n axis (column extraction is default). BEWARE: A 1D array is presumed to be a\r\n column-array (and that the whole array will be returned as a column).\r\n \r\n Usage: acolex (a,indices,axis=1)\r\n Returns: the columns of a specified by indices\r\n \"\"\"\r\n if type(indices) not in [list,tuple,N.ndarray]:\r\n indices = [indices]\r\n if len(N.shape(a)) == 1:\r\n cols = N.resize(a,[a.shape[0],1])\r\n else:\r\n cols = N.take(a,indices,axis)\r\n return cols\r\n \r\n \r\n def acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):\r\n \"\"\"\r\n Averages data in collapsecol, keeping all unique items in keepcols\r\n (using unique, which keeps unique LISTS of column numbers), retaining\r\n the unique sets of values in keepcols, the mean for each. If stderror or\r\n N of the mean are desired, set either or both parameters to 1.\r\n \r\n Usage: acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)\r\n Returns: unique 'conditions' specified by the contents of columns specified\r\n by keepcols, abutted with the mean(s) of column(s) specified by\r\n collapsecols\r\n \"\"\"\r\n def acollmean (inarray):\r\n return N.sum(N.ravel(inarray))\r\n \r\n if type(keepcols) not in [list,tuple,N.ndarray]:\r\n keepcols = [keepcols]\r\n if type(collapsecols) not in [list,tuple,N.ndarray]:\r\n collapsecols = [collapsecols]\r\n \r\n if cfcn == None:\r\n cfcn = acollmean\r\n if keepcols == []:\r\n avgcol = acolex(a,collapsecols)\r\n means = N.sum(avgcol)/float(len(avgcol))\r\n if fcn1!=None:\r\n try:\r\n test = fcn1(avgcol)\r\n except:\r\n test = N.array(['N/A']*len(means))\r\n means = aabut(means,test)\r\n if fcn2!=None:\r\n try:\r\n test = fcn2(avgcol)\r\n except:\r\n test = N.array(['N/A']*len(means))\r\n means = aabut(means,test)\r\n return means\r\n else:\r\n if type(keepcols) not in [list,tuple,N.ndarray]:\r\n keepcols = [keepcols]\r\n values = colex(a,keepcols) # so that \"item\" can be appended (below)\r\n uniques = unique(values) # get a LIST, so .sort keeps rows intact\r\n uniques.sort()\r\n newlist = []\r\n for item in uniques:\r\n if type(item) not in [list,tuple,N.ndarray]:\r\n item =[item]\r\n tmprows = alinexand(a,keepcols,item)\r\n for col in collapsecols:\r\n avgcol = acolex(tmprows,col)\r\n item.append(acollmean(avgcol))\r\n if fcn1!=None:\r\n try:\r\n test = fcn1(avgcol)\r\n except:\r\n test = 'N/A'\r\n item.append(test)\r\n if fcn2!=None:\r\n try:\r\n test = fcn2(avgcol)\r\n except:\r\n test = 'N/A'\r\n item.append(test)\r\n newlist.append(item)\r\n try:\r\n new_a = N.array(newlist)\r\n except TypeError:\r\n new_a = N.array(newlist,'O')\r\n return new_a\r\n \r\n \r\n def adm (a,criterion):\r\n \"\"\"\r\n Returns rows from the passed list of lists that meet the criteria in\r\n the passed criterion expression (a string as a function of x).\r\n \r\n Usage: adm (a,criterion) where criterion is like 'x[2]==37'\r\n \"\"\"\r\n function = 'filter(lambda x: '+criterion+',a)'\r\n lines = eval(function)\r\n try:\r\n lines = N.array(lines)\r\n except:\r\n lines = N.array(lines,dtype='O')\r\n return lines\r\n \r\n \r\n def isstring(x):\r\n if type(x)==str:\r\n return 1\r\n else:\r\n return 0\r\n \r\n \r\n def alinexand (a,columnlist,valuelist):\r\n \"\"\"\r\n Returns the rows of an array where col (from columnlist) = val\r\n (from valuelist). One value is required for each column in columnlist.\r\n \r\n Usage: alinexand (a,columnlist,valuelist)\r\n Returns: the rows of a where columnlist[i]=valuelist[i] for ALL i\r\n \"\"\"\r\n if type(columnlist) not in [list,tuple,N.ndarray]:\r\n columnlist = [columnlist]\r\n if type(valuelist) not in [list,tuple,N.ndarray]:\r\n valuelist = [valuelist]\r\n criterion = ''\r\n for i in range(len(columnlist)):\r\n if type(valuelist[i])==str:\r\n critval = '\\'' + valuelist[i] + '\\''\r\n else:\r\n critval = str(valuelist[i])\r\n criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'\r\n criterion = criterion[0:-3] # remove the \"and\" after the last crit\r\n return adm(a,criterion)\r\n \r\n \r\n def alinexor (a,columnlist,valuelist):\r\n \"\"\"\r\n Returns the rows of an array where col (from columnlist) = val (from\r\n valuelist). One value is required for each column in columnlist.\r\n The exception is if either columnlist or valuelist has only 1 value,\r\n in which case that item will be expanded to match the length of the\r\n other list.\r\n \r\n Usage: alinexor (a,columnlist,valuelist)\r\n Returns: the rows of a where columnlist[i]=valuelist[i] for ANY i\r\n \"\"\"\r\n if type(columnlist) not in [list,tuple,N.ndarray]:\r\n columnlist = [columnlist]\r\n if type(valuelist) not in [list,tuple,N.ndarray]:\r\n valuelist = [valuelist]\r\n criterion = ''\r\n if len(columnlist) == 1 and len(valuelist) > 1:\r\n columnlist = columnlist*len(valuelist)\r\n elif len(valuelist) == 1 and len(columnlist) > 1:\r\n valuelist = valuelist*len(columnlist)\r\n for i in range(len(columnlist)):\r\n if type(valuelist[i])==str:\r\n critval = '\\'' + valuelist[i] + '\\''\r\n else:\r\n critval = str(valuelist[i])\r\n criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'\r\n criterion = criterion[0:-2] # remove the \"or\" after the last crit\r\n return adm(a,criterion)\r\n \r\n \r\n def areplace (a,oldval,newval):\r\n \"\"\"\r\n Replaces all occurrences of oldval with newval in array a.\r\n \r\n Usage: areplace(a,oldval,newval)\r\n \"\"\"\r\n return N.where(a==oldval,newval,a)\r\n \r\n \r\n def arecode (a,listmap,col='all'):\r\n \"\"\"\r\n Remaps the values in an array to a new set of values (useful when\r\n you need to recode data from (e.g.) strings to numbers as most stats\r\n packages require. Can work on SINGLE columns, or 'all' columns at once.\r\n @@@BROKEN 2007-11-26\r\n \r\n Usage: arecode (a,listmap,col='all')\r\n Returns: a version of array a where listmap[i][0] = (instead) listmap[i][1]\r\n \"\"\"\r\n ashape = a.shape\r\n if col == 'all':\r\n work = a.ravel()\r\n else:\r\n work = acolex(a,col)\r\n work = work.ravel()\r\n for pair in listmap:\r\n if type(pair[1]) == str or work.dtype.char=='O' or a.dtype.char=='O':\r\n work = N.array(work,dtype='O')\r\n a = N.array(a,dtype='O')\r\n for i in range(len(work)):\r\n if work[i]==pair[0]:\r\n work[i] = pair[1]\r\n if col == 'all':\r\n return N.reshape(work,ashape)\r\n else:\r\n return N.concatenate([a[:,0:col],work[:,N.newaxis],a[:,col+1:]],1)\r\n else: # must be a non-Object type array and replacement\r\n work = N.where(work==pair[0],pair[1],work)\r\n return N.concatenate([a[:,0:col],work[:,N.newaxis],a[:,col+1:]],1)\r\n \r\n \r\n def arowcompare(row1, row2):\r\n \"\"\"\r\n Compares two rows from an array, regardless of whether it is an\r\n array of numbers or of python objects (which requires the cmp function).\r\n @@@PURPOSE? 2007-11-26\r\n \r\n Usage: arowcompare(row1,row2)\r\n Returns: an array of equal length containing 1s where the two rows had\r\n identical elements and 0 otherwise\r\n \"\"\"\r\n return \r\n if row1.dtype.char=='O' or row2.dtype=='O':\r\n cmpvect = N.logical_not(abs(N.array(list(map(cmp,row1,row2))))) # cmp fcn gives -1,0,1\r\n else:\r\n cmpvect = N.equal(row1,row2)\r\n return cmpvect\r\n \r\n \r\n def arowsame(row1, row2):\r\n \"\"\"\r\n Compares two rows from an array, regardless of whether it is an\r\n array of numbers or of python objects (which requires the cmp function).\r\n \r\n Usage: arowsame(row1,row2)\r\n Returns: 1 if the two rows are identical, 0 otherwise.\r\n \"\"\"\r\n cmpval = N.alltrue(arowcompare(row1,row2))\r\n return cmpval\r\n \r\n \r\n def asortrows(a,axis=0):\r\n \"\"\"\r\n Sorts an array \"by rows\". This differs from the Numeric.sort() function,\r\n which sorts elements WITHIN the given axis. Instead, this function keeps\r\n the elements along the given axis intact, but shifts them 'up or down'\r\n relative to one another.\r\n \r\n Usage: asortrows(a,axis=0)\r\n Returns: sorted version of a\r\n \"\"\"\r\n return N.sort(a,axis=axis,kind='mergesort')\r\n \r\n \r\n def aunique(inarray):\r\n \"\"\"\r\n Returns unique items in the FIRST dimension of the passed array. Only\r\n works on arrays NOT including string items.\r\n \r\n Usage: aunique (inarray)\r\n \"\"\"\r\n uniques = N.array([inarray[0]])\r\n if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY\r\n for item in inarray[1:]:\r\n if N.add.reduce(N.equal(uniques,item).ravel()) == 0:\r\n try:\r\n uniques = N.concatenate([uniques,N.array[N.newaxis,:]])\r\n except TypeError:\r\n uniques = N.concatenate([uniques,N.array([item])])\r\n else: # IT MUST BE A 2+D ARRAY\r\n if inarray.dtype.char != 'O': # not an Object array\r\n for item in inarray[1:]:\r\n if not N.sum(N.alltrue(N.equal(uniques,item),1)):\r\n try:\r\n uniques = N.concatenate( [uniques,item[N.newaxis,:]] )\r\n except TypeError: # the item to add isn't a list\r\n uniques = N.concatenate([uniques,N.array([item])])\r\n else:\r\n pass # this item is already in the uniques array\r\n else: # must be an Object array, alltrue/equal functions don't work\r\n for item in inarray[1:]:\r\n newflag = 1\r\n for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=>\r\n test = N.sum(abs(N.array(list(map(cmp,item,unq)))))\r\n if test == 0: # if item identical to any 1 row in uniques\r\n newflag = 0 # then not a novel item to add\r\n break\r\n if newflag == 1:\r\n try:\r\n uniques = N.concatenate( [uniques,item[N.newaxis,:]] )\r\n except TypeError: # the item to add isn't a list\r\n uniques = N.concatenate([uniques,N.array([item])])\r\n return uniques\r\n \r\n \r\n def aduplicates(inarray):\r\n \"\"\"\r\n Returns duplicate items in the FIRST dimension of the passed array. Only\r\n works on arrays NOT including string items.\r\n \r\n Usage: aunique (inarray)\r\n \"\"\"\r\n inarray = N.array(inarray)\r\n if len(inarray.shape) == 1: # IF IT'S A 1D ARRAY\r\n dups = []\r\n inarray = inarray.tolist()\r\n for i in range(len(inarray)):\r\n if inarray[i] in inarray[i+1:]:\r\n dups.append(inarray[i])\r\n dups = aunique(dups)\r\n else: # IT MUST BE A 2+D ARRAY\r\n dups = []\r\n aslist = inarray.tolist()\r\n for i in range(len(aslist)):\r\n if aslist[i] in aslist[i+1:]:\r\n dups.append(aslist[i])\r\n dups = unique(dups)\r\n dups = N.array(dups)\r\n return dups\r\n \r\nexcept ImportError: # IF NUMERIC ISN'T AVAILABLE, SKIP ALL arrayfuncs\r\n pass\r\n", "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom six.moves import map\nfrom six.moves import range\nif 1:\n import numpy as N\n from statlib import pstat, stats\n from .pstat import *\n from .stats import *\n from numpy import linalg as LA\n import operator, math\n\n def aanova(data,effects=['A','B','C','D','E','F','G','H','I','J','K']):\n \"\"\"\n Prints the results of single-variable between- and within-subject ANOVA\n designs. The function can only handle univariate ANOVAs with a single\n random factor. The random factor is coded in column 0 of the input\n list/array (see below) and the measured variable is coded in the last\n column of the input list/array. The following were used as references\n when writing the code:\n\n Maxwell, SE, Delaney HD (1990) Designing Experiments and Analyzing\n Data, Wadsworth: Belmont, CA.\n Lindman, HR (1992) Analysis of Variance in Experimental Design,\n Springer-Verlag: New York.\n\n TO DO: Increase Current Max Of 10 Levels Per W/I-Subject Factor\n Consolidate Between-Subj Analyses For Between And Within/Between\n Front-end for different input data-array shapes/organization\n Axe mess of 'global' statements (particularly for Drestrict fcns)\n\n Usage: anova(data, data = |Stat format\n effects=['A','B','C','D','E','F','G','H','I','J','K'])\n\n Note: |Stat format is as follows ... one datum per row, first element of\n row is the subject identifier, followed by all within/between subject\n variable designators, and the measured data point as the last element in the\n row. Thus, [1, 'short', 'drugY', 2, 14.7] represents subject 1 when measured\n in the short / drugY / 2 condition, and subject 1 gave a measured value of\n 14.7 in this combination of conditions. Thus, all input lists are '2D'\n lists-of-lists.\n \"\"\"\n global alluniqueslist, Nlevels, Nfactors, Nsubjects, Nblevels, Nallsources\n global Bscols, Bbetweens, SSlist, SSsources, DM, DN, Bwonly_sources, D\n global Bwithins, alleffects, alleffsources\n outputlist = []\n SSbtw = []\n SSbtwsources = []\n SSwb = []\n SSwbsources = []\n alleffects = []\n alleffsources = []\n SSlist = []\n SSsources = []\n\n print()\n variables = 1 # this function only handles one measured variable\n\n if type(data)!=type([]):\n data = data.tolist()\n\n## Create a list of all unique values in each column, and a list of these Ns\n alluniqueslist = [0]*(len(data[0])-variables) # all cols but data cols\n Nlevels = [0]*(len(data[0])-variables) # (as above)\n for column in range(len(Nlevels)):\n alluniqueslist[column] = pstat.unique(pstat.colex(data,column))\n Nlevels[column] = len(alluniqueslist[column])\n\n Ncells = N.multiply.reduce(Nlevels[1:]) # total num cells (w/i AND btw)\n Nfactors = len(Nlevels[1:]) # total num factors\n Nallsources = 2**(Nfactors+1) # total no. possible sources (factor-combos)\n Nsubjects = len(alluniqueslist[0]) # total # subj in study (# of diff. subj numbers in column 0)\n\n## Within-subj factors defined as those where there are fewer subj than\n## scores in the first level of a factor (quick and dirty; findwithin() below)\n Bwithins = findwithin(data) # binary w/i subj factors (excl. col 0)\n Bbetweens = ~Bwithins & (Nallsources-1) - 1\n\n Wcolumns = makelist(Bwithins,Nfactors+1) # get list of cols of w/i factors\n Wscols = [0] + Wcolumns # w/i subj columns INCL col 0\n Bscols = makelist(Bbetweens+1,Nfactors+1) #list of btw-subj cols,INCL col 0\n Nwifactors = len(Wscols) - 1 # WAS len(Wcolumns)\n Nwlevels = N.take(N.array(Nlevels),Wscols) # no.lvls for each w/i subj fact\n Nbtwfactors = len(Bscols) - 1 # WASNfactors - Nwifactors + 1\n Nblevels = N.take(N.array(Nlevels),Bscols)\n\n Nwsources = 2**Nwifactors - 1 # num within-subject factor-combos\n Nbsources = Nallsources - Nwsources\n\n #\n # CALC M-VARIABLE (LIST) and Marray/Narray VARIABLES (ARRAY OF CELL MNS/NS)\n #\n # Eliminate replications for the same subject in same condition as well as\n # within-subject repetitions, keep as list\n M = pstat.collapse(data,Bscols,-1,None,None,mean)\n # Create an arrays of Nblevels shape (excl. subj dim)\n Marray = N.zeros(Nblevels[1:],'f')\n Narray = N.zeros(Nblevels[1:],'f')\n # Fill arrays by looping through all scores in the (collapsed) M\n for row in M:\n idx = []\n for i in range(len(row[:-1])):\n idx.append(alluniqueslist[Bscols[i]].index(row[i]))\n idx = idx[1:]\n Marray[idx] = Marray[idx] + row[-1]\n Narray[idx] = Narray[idx] + 1\n Marray = Marray / Narray\n\n #\n # CREATE DATA ARRAY, DA, FROM ORIGINAL INPUT DATA\n # (this is an unbelievably bad, wasteful data structure, but it makes lots\n # of tasks much easier; should nevertheless be fixed someday)\n\n # This limits the within-subject level count to 10!\n coefflist =[[[1]],\n [[-1,1]],\n [[-1,0,1],[1,-2,1]],\n [[-3,-1,1,3],[1,-1,-1,1],[-1,3,-3,1]],\n [[-2,-1,0,1,2],[2,-1,-2,-1,2],[-1,2,0,-2,1],[1,-4,6,-4,1]],\n [[-5,-3,-1,1,3,5],[5,-1,-4,-4,-1,5],[-5,7,4,-4,-7,5],\n [1,-3,2,2,-3,1],[-1,5,-10,10,-5,1]],\n [[-3,-2,-1,0,1,2,3],[5,0,-3,-4,-3,0,5],[-1,1,1,0,-1,-1,1],\n [3,-7,1,6,1,-7,3],[-1,4,-5,0,5,-4,1],[1,-6,15,-20,15,-6,1]],\n [[-7,-5,-3,-1,1,3,5,7],[7,1,-3,-5,-5,-3,1,7],\n [-7,5,7,3,-3,-7,-5,7],[7,-13,-3,9,9,-3,-13,7],\n [-7,23,-17,-15,15,17,-23,7],[1,-5,9,-5,-5,9,-5,1],\n [-1,7,-21,35,-35,21,-7,1]],\n [[-4,-3,-2,-1,0,1,2,3,4],[28,7,-8,-17,-20,-17,-8,7,28],\n [-14,7,13,9,0,-9,-13,-7,14],[14,-21,-11,9,18,9,-11,-21,14],\n [-4,11,-4,-9,0,9,4,-11,4],[4,-17,22,1,-20,1,22,-17,4],\n [-1,6,-14,14,0,-14,14,-6,1],[1,-8,28,-56,70,-56,28,-8,1]],\n [[-9,-7,-5,-3,-1,1,3,5,7,9],[6,2,-1,-3,-4,-4,-3,-1,2,6],\n [-42,14,35,31,12,-12,-31,-35,-14,42],\n [18,-22,-17,3,18,18,3,-17,-22,18],\n [-6,14,-1,-11,-6,6,11,1,-14,6],[3,-11,10,6,-8,-8,6,10,-11,3],\n [9,-47,86,-42,-56,56,42,-86,47,-9],\n [1,-7,20,-28,14,14,-28,20,-7,1],\n [-1,9,-36,84,-126,126,-84,36,-9,1]]]\n\n dindex = 0\n # Prepare a list to be filled with arrays of D-variables, array per within-\n # subject combo (i.e., for 2 w/i subj factors E and F ... E, F, ExF)\n NDs = [0]* Nwsources\n for source in range(Nwsources):\n if subset(source,Bwithins):\n NDs[dindex] = numlevels(source,Nlevels)\n dindex = dindex + 1\n\n # Collapse multiple repetitions on the same subject and same condition\n cdata = pstat.collapse(data,list(range(Nfactors+1)),-1,None,None,mean)\n\n # Find a value that's not a data score with which to fill the array DA\n dummyval = -1\n datavals = pstat.colex(data,-1)\n while dummyval in datavals: # find a value that's not a data score\n dummyval = dummyval - 1\n DA = N.ones(Nlevels,'f')*dummyval # create plenty of data-slots to fill\n\n if len(Bscols) == 1: # ie., if no btw-subj factors\n # 1 (below) needed because we need 2D array even w/ only 1 group of subjects\n subjslots = N.ones((Nsubjects,1))\n else: # create array to hold 1s (subj present) and 0s (subj absent)\n subjslots = N.zeros(Nblevels)\n for i in range(len(data)): # for every datapoint given as input\n idx = []\n for j in range(Nfactors+1): # get n-D bin idx for this datapoint\n new = alluniqueslist[j].index(data[i][j])\n idx.append(new)\n DA[idx] = data[i][-1] # put this data point in proper place in DA\n btwidx = N.take(idx,N.array(Bscols))\n subjslots[btwidx] = 1\n # DONE CREATING DATA ARRAY, DA ... #dims = numfactors+1, dim 0=subjects\n # dim -1=measured values, dummyval = values used to fill empty slots in DA\n\n # PREPARE FOR MAIN LOOP\n dcount = -1 # prepare for pre-increment of D-variable pointer\n Bwsources = [] # binary #s, each=source containing w/i subj factors\n Bwonly_sources = [] # binary #s, each=source of w/i-subj-ONLY factors\n D = N.zeros(Nwsources,N.PyObject) # one slot for each Dx,2**Nwifactors\n DM = [0] *Nwsources # Holds arrays of cell-means\n DN = [0] *Nwsources # Holds arrays of cell-ns\n\n # BEGIN MAIN LOOP!!!!!\n # BEGIN MAIN LOOP!!!!!\n # BEGIN MAIN LOOP!!!!!\n for source in range(3,Nallsources,2): # all sources that incl. subjects\n if ((source-1) & Bwithins) != 0: # 1 or more w/i subj sources?\n Bwsources.append(source-1) # add it to a list\n #\n # WITHIN-SUBJECT-ONLY TERM? IF SO ... NEED TO CALCULATE NEW D-VARIABLE\n # (per Maxwell & Delaney pp.622-4)\n if subset((source-1),Bwithins):\n # Keep track of which D-var set we're working with (De, Df, Def, etc.)\n dcount = dcount + 1\n Bwonly_sources.append(source-1) #add source, minus subj,to list\n dwsc = 1.0 * DA # get COPY of w/i-subj data array\n # Find all non-source columns, note ~source alone (below) -> negative number\n Bnonsource = (Nallsources-1) & ~source\n Bwscols = makebin(Wscols) # make a binary version of Wscols\n # Figure out which cols from the ORIGINAL (input) data matrix are both non-\n # source and also within-subj vars (excluding subjects col)\n Bwithinnonsource = Bnonsource & Bwscols\n\n # Next, make a list of the above. The list is a list of dimensions in DA\n # because DA has the same number of dimensions as there are factors\n # (including subjects), but with extra dummyval='-1' values the original\n # data array (assuming between-subj vars exist)\n Lwithinnonsource = makelist(Bwithinnonsource,Nfactors+1)\n\n # Collapse all non-source, w/i subj dims, FROM THE END (otherwise the\n # dim-numbers change as you collapse). THIS WORKS BECAUSE WE'RE\n # COLLAPSING ACROSS W/I SUBJECT DIMENSIONS, WHICH WILL ALL HAVE THE\n # SAME SUBJ IN THE SAME ARRAY LOCATIONS (i.e., dummyvals will still exist\n # but should remain the same value through the amean() function\n for i in range(len(Lwithinnonsource)-1,-1,-1):\n dwsc = amean(dwsc,Lwithinnonsource[i])\n mns = dwsc\n\n # NOW, ACTUALLY COMPUTE THE D-VARIABLE ENTRIES FROM DA\n # CREATE LIST OF COEFF-COMBINATIONS TO DO (len=e-1, f-1, (e-1)*(f-1), etc...)\n #\n # Figure out which cols are both source and within-subjects, including col 0\n Bwithinsource = source & Bwscols\n # Make a list of within-subj cols, incl subjects col (0)\n Lwithinsourcecol = makelist(Bwithinsource, Nfactors+1)\n # Make a list of cols that are source within-subj OR btw-subj\n Lsourceandbtws = makelist(source | Bbetweens, Nfactors+1)\n if Lwithinnonsource != []:\n Lwithinsourcecol = list(map(Lsourceandbtws.index,Lwithinsourcecol))\n # Now indxlist should hold a list of indices into the list of possible\n # coefficients, one row per combo of coefficient. Next line PRESERVES dummyval\n dvarshape = N.array(N.take(mns.shape,Lwithinsourcecol[1:])) -1\n idxarray = N.indices(dvarshape)\n newshape = N.array([idxarray.shape[0],\n N.multiply.reduce(idxarray.shape[1:])])\n indxlist = N.swapaxes(N.reshape(idxarray,newshape),0,1)\n\n # The following is what makes the D-vars 2D. It takes an n-dim array\n # and retains the first (num of factors) dim while making the 2nd dim\n # equal to the total number of source within-subject cells.\n\n #\n # CREATE ALL D-VARIABLES FOR THIS COMBINATION OF FACTORS\n #\n for i in range(len(indxlist)):\n #\n # FILL UP COEFFMATRIX (OF SHAPE = MNS) WITH CORRECT COEFFS FOR 1 D-VAR\n #\n coeffmatrix = N.ones(mns.shape,N.Float) # fewer dims than DA (!!)\n # Make a list of dim #s that are both in source AND w/i subj fact, incl subj\n Wsourcecol = makelist(Bwscols&source,Nfactors+1)\n # Fill coeffmatrix with a complete set of coeffs (1 per w/i-source factor)\n for wfactor in range(len(Lwithinsourcecol[1:])):\n #put correct coeff. axis as first axis, or \"swap it up\"\n coeffmatrix = N.swapaxes(coeffmatrix,0,\n Lwithinsourcecol[wfactor+1])\n # Find appropriate ROW of (static) coefflist we need\n nlevels = coeffmatrix.shape[0]\n # Get the next coeff in that row\n try:\n nextcoeff = coefflist[nlevels-1][indxlist[i,wfactor]]\n except IndexError:\n raise IndexError(\"anova() can only handle up to 10 levels on a within-subject factors\")\n for j in range(nlevels):\n coeffmatrix[j] = coeffmatrix[j] * nextcoeff[j]\n # Swap it back to where it came from\n coeffmatrix = N.swapaxes(coeffmatrix,0,\n Lwithinsourcecol[wfactor+1])\n\n # CALCULATE D VARIABLE\n scratch = coeffmatrix * mns\n # Collapse all dimensions EXCEPT subjects dim (dim 0)\n for j in range(len(coeffmatrix.shape[1:])):\n scratch = N.add.reduce(scratch,1)\n if len(scratch.shape) == 1:\n scratch.shape = list(scratch.shape)+[1]\n try:\n # Tack this column onto existing ones\n tmp = D[dcount].shape\n D[dcount] = pstat.aabut(D[dcount],scratch)\n except AttributeError: # i.e., D[dcount]=integer/float\n # If this is the first, plug it in\n D[dcount] = scratch\n\n\n # Big long thing to create DMarray (list of DM variables) for this source\n variables = D[dcount].shape[1] # Num variables for this source\n tidx = list(range(1,len(subjslots.shape))) + [0] # [0] = Ss dim\n tsubjslots = N.transpose(subjslots,tidx) # put Ss in last dim\n DMarray = N.zeros(list(tsubjslots.shape[0:-1]) +\n [variables],'f') # btw-subj dims, then vars\n DNarray = N.zeros(list(tsubjslots.shape[0:-1]) +\n [variables],'f') # btw-subj dims, then vars\n idx = [0] *len(tsubjslots.shape[0:-1])\n idx[0] = -1\n loopcap = N.array(tsubjslots.shape[0:-1]) -1\n while incr(idx,loopcap) != -1:\n DNarray[idx] = float(asum(tsubjslots[idx]))\n thismean = (N.add.reduce(tsubjslots[idx] * # 1=subj dim\n N.transpose(D[dcount]),1) /\n DNarray[idx])\n thismean = N.array(thismean,N.PyObject)\n DMarray[idx] = thismean\n DM[dcount] = DMarray\n DN[dcount] = DNarray\n\n #\n # DONE CREATING M AND D VARIABLES ... TIME FOR SOME SS WORK\n # DONE CREATING M AND D VARIABLES ... TIME FOR SOME SS WORK\n #\n if Bscols[1:] != []:\n BNs = pstat.colex([Nlevels],Bscols[1:])\n else:\n BNs = [1]\n #\n # FIGURE OUT WHICH VARS TO RESTRICT, see p.680 (Maxwell&Delaney)\n #\n # BETWEEN-SUBJECTS VARIABLES ONLY, use M variable for analysis\n #\n if ((source-1) & Bwithins) == 0: # btw-subjects vars only?\n sourcecols = makelist(source-1,Nfactors+1)\n\n # Determine cols (from input list) required for n-way interaction\n Lsource = makelist((Nallsources-1)&Bbetweens,Nfactors+1)\n # NOW convert this list of between-subject column numbers to a list of\n # DIMENSIONS in M, since M has fewer dims than the original data array\n # (assuming within-subj vars exist); Bscols has list of between-subj cols\n # from input list, the indices of which correspond to that var's loc'n in M\n btwcols = list(map(Bscols.index,Lsource))\n # Obviously-needed loop to get cell means is embedded in the collapse fcn, -1\n # represents last (measured-variable) column, None=std, 1=retain Ns\n\n hn = aharmonicmean(Narray,-1) # -1=unravel first\n\n # CALCULATE SSw ... SUBTRACT APPROPRIATE CELL MEAN FROM EACH SUBJ SCORE\n SSw = 0.0\n idxlist = pstat.unique(pstat.colex(M,btwcols))\n for row in M:\n idx = []\n for i in range(len(row[:-1])):\n idx.append(alluniqueslist[Bscols[i]].index(row[i]))\n idx = idx[1:] # Strop off Ss col/dim\n newval = row[-1] - Marray[idx]\n SSw = SSw + (newval)**2\n\n # Determine which cols from input are required for this source\n Lsource = makelist(source-1,Nfactors+1)\n # NOW convert this list of between-subject column numbers to a list of\n # DIMENSIONS in M, since M has fewer dims than the original data array\n # (assuming within-subj vars exist); Bscols has list of between-subj cols\n # from input list, the indices of which correspond to that var's loc'n in M\n btwsourcecols = (N.array(list(map(Bscols.index,Lsource)))-1).tolist()\n\n # Average Marray and get harmonic means of Narray OVER NON-SOURCE DIMS\n Bbtwnonsourcedims = ~source & Bbetweens\n Lbtwnonsourcedims = makelist(Bbtwnonsourcedims,Nfactors+1)\n btwnonsourcedims = (N.array(list(map(Bscols.index,Lbtwnonsourcedims)))-1).tolist()\n\n ## Average Marray over non-source dimensions (1=keep squashed dims)\n sourceMarray = amean(Marray,btwnonsourcedims,1)\n\n ## Calculate harmonic means for each level in source\n sourceNarray = aharmonicmean(Narray,btwnonsourcedims,1)\n\n ## Calc grand average (ga), used for ALL effects\n ga = asum((sourceMarray*sourceNarray)/\n asum(sourceNarray))\n ga = N.reshape(ga,N.ones(len(Marray.shape)))\n\n ## If GRAND interaction, use harmonic mean of ALL cell Ns\n if source == Nallsources-1:\n sourceNarray = aharmonicmean(Narray)\n\n ## Calc all SUBSOURCES to be subtracted from sourceMarray (M&D p.320)\n sub_effects = 1.0 * ga # start with grand mean\n for subsource in range(3,source,2):\n ## Make a list of the non-subsource dimensions\n if subset(subsource-1,source-1):\n sub_effects = (sub_effects +\n alleffects[alleffsources.index(subsource)])\n ## Calc this effect (a(j)'s, b(k)'s, ab(j,k)'s, whatever)\n effect = sourceMarray - sub_effects\n\n ## Save it so you don't have to calculate it again next time\n alleffects.append(effect)\n alleffsources.append(source)\n\n ## Calc and save sums of squares for this source\n SS = asum((effect**2 *sourceNarray) *\n N.multiply.reduce(N.take(Marray.shape,btwnonsourcedims)))\n ## Save it so you don't have to calculate it again next time\n SSlist.append(SS)\n SSsources.append(source)\n\n collapsed = pstat.collapse(M,btwcols,-1,None,len,mean)\n # Obviously needed for-loop to get source cell-means embedded in collapse fcns\n contrastmns = pstat.collapse(collapsed,btwsourcecols,-2,sterr,len,mean)\n # Collapse again, this time SUMMING instead of averaging (to get cell Ns)\n contrastns = pstat.collapse(collapsed,btwsourcecols,-1,None,None,\n N.sum)\n # Collapse again, this time calculating harmonicmeans (for hns)\n contrasthns = pstat.collapse(collapsed,btwsourcecols,-1,None,None,\n harmonicmean)\n # CALCULATE *BTW-SUBJ* dfnum, dfden\n sourceNs = pstat.colex([Nlevels],makelist(source-1,Nfactors+1))\n dfnum = N.multiply.reduce(N.ravel(N.array(sourceNs)-1))\n dfden = Nsubjects - N.multiply.reduce(N.ravel(BNs))\n\n # CALCULATE MS, MSw, F AND PROB FOR ALL-BETWEEN-SUBJ SOURCES ONLY\n MS = SS / dfnum\n MSw = SSw / dfden\n if MSw != 0:\n f = MS / MSw\n else:\n f = 0 # i.e., absolutely NO error in the full model\n\n if f >= 0:\n prob = fprob(dfnum, dfden, f)\n else:\n prob = 1.0\n # Now this falls thru to output stage\n\n #\n # SOME WITHIN-SUBJECTS FACTORS TO DEAL WITH ... use appropriate D variable\n #\n else: # Source has some w/i subj factors\n # FIGURE OUT WHICH D-VAR TO USE BASED ON WHICH W/I-SUBJ FACTORS ARE IN SOURCE\n # Determine which w/i-subj factors are in this source\n sourcewithins = (source-1) & Bwithins\n # Use D-var that was created for that w/i subj combo (the position of that\n # source within Bwsources determines the index of that D-var in D)\n workD = D[Bwonly_sources.index(sourcewithins)]\n\n # CALCULATE Er, Ef\n ## Set up workD and subjslots for upcoming calcs\n if len(workD.shape)==1:\n workD = workD[:,N.NewAxis]\n if len(subjslots.shape)==1:\n subjslots = subjslots[:,N.NewAxis]\n\n ## Calculate full-model sums of squares\n ef = Dfull_model(workD,subjslots) # Uses cell-means model\n\n #\n # **ONLY** WITHIN-SUBJECT VARIABLES TO CONSIDER\n #\n if subset((source-1),Bwithins):\n # restrict grand mean, as per M&D p.680\n er = Drestrict_mean(workD,subjslots)\n #\n # **BOTH** WITHIN- AND BETWEEN-SUBJECTS VARIABLES TO CONSIDER\n #\n else:\n er = Drestrict_source(workD,subjslots,source) + ef\n SSw = LA.determinant(ef)\n SS = LA.determinant(er) - SSw\n\n # CALCULATE *W/I-SUBJ* dfnum, dfden\n sourceNs = pstat.colex([Nlevels],makelist(source,Nfactors+1))\n # Calculation of dfnum is straightforward regardless\n dfnum = N.multiply.reduce(N.ravel(N.array(sourceNs)-1)[1:])\n # If only within-subject factors are involved, dfden is straightforward\n if subset(source-1,Bwithins):\n dfden = Nsubjects -N.multiply.reduce(N.ravel(BNs))-dfnum +1\n MS = SS / dfnum\n MSw = SSw / dfden\n if MSw != 0:\n f = MS / MSw\n else:\n f = 0 # i.e., absolutely NO error in full model\n\n if f >= 0:\n prob = fprob(dfnum, dfden, f)\n else:\n prob = 1.0\n\n # If combined within-between source, must use Rao's approximation for dfden\n # from Tatsuoka, MM (1988) Multivariate Analysis (2nd Ed), MacMillan: NY p93\n else: # it's a within-between combo source\n try:\n p = workD.shape[1]\n except IndexError:\n p = 1\n k = N.multiply.reduce(N.ravel(BNs))\n m = Nsubjects -1 -(p+k)/2.0\n d_en = float(p**2 + (k-1)**2 - 5)\n if d_en == 0.0:\n s = 1.0\n else:\n s = math.sqrt(((p*(k-1))**2-4) / d_en)\n dfden = m*s - dfnum/2.0 + 1\n\n # Given a within-between combined source, Wilk's Lambda is appropriate\n if LA.determinant(er) != 0:\n lmbda = LA.determinant(ef) / LA.determinant(er)\n W = math.pow(lmbda,(1.0/s))\n f = ((1.0-W)/W) * (dfden/dfnum)\n else:\n f = 0 # i.e., absolutely NO error in restricted model\n\n if f >= 0:\n prob = fprob(dfnum,dfden,f)\n else:\n prob = 1.0\n\n #\n # CREATE STRING-LIST FOR RESULTS FROM THIS PARTICULAR SOURCE\n #\n suffix = '' # for *s after the p-value\n if prob < 0.001: suffix = '***'\n elif prob < 0.01: suffix = '**'\n elif prob < 0.05: suffix = '*'\n adjsourcecols = N.array(makelist(source-1,Nfactors+1)) -1\n thiseffect = ''\n for col in adjsourcecols:\n if len(adjsourcecols) > 1:\n thiseffect = thiseffect + effects[col][0]\n else:\n thiseffect = thiseffect + (effects[col])\n outputlist = (outputlist\n # These terms are for the numerator of the current effect/source\n + [[thiseffect, round4(SS),dfnum,\n round4(SS/float(dfnum)),round4(f),\n round4(prob),suffix]]\n # These terms are for the denominator for the current effect/source\n + [[thiseffect+'/w', round4(SSw),dfden,\n round4(SSw/float(dfden)),'','','']]\n + [['\\n']])\n\n #\n # PRINT OUT ALL MEANS AND Ns FOR THIS SOURCE (i.e., this combo of factors)\n #\n Lsource = makelist(source-1,Nfactors+1)\n collapsed = pstat.collapse(cdata,Lsource,-1,sterr,len,mean)\n\n # First, get the list of level-combos for source cells\n prefixcols = list(range(len(collapsed[0][:-3])))\n outlist = pstat.colex(collapsed,prefixcols)\n # Start w/ factor names (A,B,C, or ones input to anova())\n eff = []\n for col in Lsource:\n eff.append(effects[col-1])\n # Add in the mean and N labels for printout\n for item in ['MEAN','STERR','N']:\n eff.append(item)\n # To the list of level-combos, abut the corresp. means and Ns\n outlist = pstat.abut(outlist,\n list(map(round4,pstat.colex(collapsed,-3))),\n list(map(round4,pstat.colex(collapsed,-2))),\n list(map(round4,pstat.colex(collapsed,-1))))\n outlist = [eff] + outlist # add titles to the top of the list\n pstat.printcc(outlist) # print it in customized columns\n print()\n\n\n###\n### OUTPUT FINAL RESULTS (ALL SOURCES TOGETHER)\n### Note: All 3 types of source-calcs fall through to here\n###\n print()\n title = [['FACTORS: ','RANDOM'] + effects[:Nfactors]]\n title = title + [['LEVELS: ']+Nlevels]\n facttypes = ['BETWEEN']*Nfactors\n for i in range(len(Wscols[1:])):\n facttypes[Wscols[i+1]-1] = 'WITHIN'\n title = title + [['TYPE: ','RANDOM']+facttypes]\n pstat.printcc(title)\n print()\n\n title = [['Effect','SS','DF','MS','F','p','sig']] + ['dashes']\n outputlist = title + outputlist\n pstat.printcc(outputlist)\n return\n\n\n def Dfull_model(workd,subjslots):\n \"\"\"\n RESTRICTS NOTHING (i.e., FULL MODEL CALCULATION). Subtracts D-variable\n cell-mean for each between-subj group and then calculates the SS array.\n \"\"\"\n workd = subtr_cellmeans(workd,subjslots)\n sserr = multivar_SScalc(workd)\n return sserr\n\n\n def Drestrict_mean(workd,subjslots):\n \"\"\"\n RESTRICTS GRAND MEAN. Subtracts D-variable cell-mean for each between-\n subj group, and then adds back each D-variable's grand mean.\n \"\"\"\n # subtract D-variable cell-mean for each (btw-subj) group\n errors = subtr_cellmeans(workd,subjslots)\n\n # add back in appropriate grand mean from individual scores\n grandDmeans = amean(workd,0,1)\n errors = errors + N.transpose(grandDmeans) # errors has reversed dims!!\n # SS for mean-restricted model is calculated below. Note: already put\n # subj as last dim because later code expects this code here to leave\n # workd that way\n sserr = multivar_SScalc(errors)\n return sserr\n\n\n def Drestrict_source(workd,subjslots,source):\n \"\"\"\n Calculates error for a given model on array workd. Subjslots is an\n array of 1s and 0s corresponding to whether or not the subject is a\n member of that between-subjects variable combo. source is the code\n for the type of model to calculate. source=-1 means no restriction;\n source=0 means to restrict workd's grand mean; source>0 means to\n restrict the columns of the main data array, DA, specified (in binary)\n by the source-value.\n\n Usage: Derrorcalc(workd,subjslots,source) source:-1=nothing, 0=mean\n Returns: SS array for multivariate F calculation\n \"\"\"\n###\n### RESTRICT COLUMNS/DIMENSIONS SPECIFIED IN source (BINARY)\n### (i.e., is the value of source not equal to 0 or -1?)\n###\n if source > 0:\n sourcewithins = (source-1) & Bwithins\n sourcebetweens = (source-1) & Bbetweens\n dindex = Bwonly_sources.index(sourcewithins)\n all_cellmeans = N.transpose(DM[dindex],[-1]+list(range(0,len(DM[dindex].shape)-1)))\n all_cellns = N.transpose(DN[dindex],[-1]+list(range(0,len(DN[dindex].shape)-1)))\n hn = aharmonicmean(all_cellns)\n\n levels = D[dindex].shape[1] # GENERAL, 'cause each workd is always 2D\n SSm = N.zeros((levels,levels),'f') #called RCm=SCm in Lindman,p.317-8\n tworkd = N.transpose(D[dindex])\n\n ## Calculate SSw, within-subj variance (Lindman approach)\n RSw = N.zeros((levels,levels),'f')\n RSinter = N.zeros((levels,levels),N.PyObject)\n for i in range(levels):\n for j in range(i,levels):\n RSw[i,j] = RSw[j,i] = N.sum(tworkd[i]*tworkd[j])\n cross = all_cellmeans[i] * all_cellmeans[j]\n multfirst = asum(cross*all_cellns[i])\n RSinter[i,j] = RSinter[j,i] = N.asarray(multfirst)\n SSm[i,j] = SSm[j,i] = (amean(all_cellmeans[i]) *\n amean(all_cellmeans[j]) *\n len(all_cellmeans[i]) *hn)\n SSw = RSw - RSinter\n\n### HERE BEGINS THE MAXWELL & DELANEY APPROACH TO CALCULATING SS\n Lsource = makelist(sourcebetweens,Nfactors+1)\n btwsourcecols = (N.array(list(map(Bscols.index,Lsource)))-1).tolist()\n Bbtwnonsourcedims = ~source & Bbetweens\n Lbtwnonsourcedims = makelist(Bbtwnonsourcedims,Nfactors+1)\n btwnonsourcedims = (N.array(list(map(Bscols.index,Lbtwnonsourcedims)))-1).tolist()\n\n ## Average Marray over non-source dimensions\n sourceDMarray = DM[dindex] *1.0\n for dim in btwnonsourcedims: # collapse all non-source dims\n if dim == len(DM[dindex].shape)-1:\n raise ValueError(\"Crashing ... shouldn't ever collapse ACROSS variables\")\n sourceDMarray = amean(sourceDMarray,dim,1)\n\n ## Calculate harmonic means for each level in source\n sourceDNarray = aharmonicmean(DN[dindex],btwnonsourcedims,1)\n\n ## Calc grand average (ga), used for ALL effects\n variableNs = asum(sourceDNarray,\n list(range(len(sourceDMarray.shape)-1)))\n ga = asum((sourceDMarray*sourceDNarray) /\n variableNs,\n list(range(len(sourceDMarray.shape)-1)),1)\n\n ## If GRAND interaction, use harmonic mean of ALL cell Ns\n if source == Nallsources-1:\n sourceDNarray = aharmonicmean(DN[dindex],\n list(range(len(sourceDMarray.shape)-1)))\n\n ## Calc all SUBSOURCES to be subtracted from sourceMarray (M&D p.320)\n sub_effects = ga *1.0 # start with grand mean\n for subsource in range(3,source-2,2):\n ## Make a list of the non-subsource dimensions\n subsourcebtw = (subsource-1) & Bbetweens\n if (propersubset(subsource-1,source-1) and\n (subsource-1)&Bwithins == (source-1)&Bwithins and\n (subsource-1) != (source-1)&Bwithins):\n sub_effects = (sub_effects +\n alleffects[alleffsources.index(subsource)])\n\n ## Calc this effect (a(j)'s, b(k)'s, ab(j,k)'s, whatever)\n effect = sourceDMarray - sub_effects\n\n ## Save it so you don't have to calculate it again next time\n alleffects.append(effect)\n alleffsources.append(source)\n\n ## Calc and save sums of squares for this source\n SS = N.zeros((levels,levels),'f')\n SS = asum((effect**2 *sourceDNarray) *\n N.multiply.reduce(N.take(DM[dindex].shape,btwnonsourcedims)),\n list(range(len(sourceDMarray.shape)-1)))\n ## Save it so you don't have to calculate it again next time\n SSlist.append(SS)\n SSsources.append(source)\n\n return SS\n\n\n def multivar_SScalc(workd):\n###\n### DO SS CALCS ON THE OUTPUT FROM THE SOURCE=0 AND SOURCE=-1 CASES\n###\n # this section expects workd to have subj. in LAST dimension!!!!!!\n if len(workd.shape) == 1:\n levels = 1\n else:\n levels = workd.shape[0] # works because workd is always 2D\n\n sserr = N.zeros((levels,levels),'f')\n for i in range(levels):\n for j in range(i,levels):\n ssval = N.add.reduce(workd[i]*workd[j])\n sserr[i,j] = ssval\n sserr[j,i] = ssval\n return sserr\n\n\n def subtr_cellmeans(workd,subjslots):\n \"\"\"\n Subtract all cell means when within-subjects factors are present ...\n i.e., calculate full-model using a D-variable.\n \"\"\"\n # Get a list of all dims that are source and between-subj\n sourcedims = makelist(Bbetweens,Nfactors+1)\n\n # Now, fix this list by mapping the dims from the original source\n # to dims for a between-subjects variable (namely, subjslots)\n transidx = list(range(len(subjslots.shape)))[1:] + [0] # put subj dim at end\n tsubjslots = N.transpose(subjslots,transidx) # get all Ss for this idx\n tworkd = N.transpose(workd) # swap subj. and variable dims\n errors = 1.0 * tworkd\n\n if len(sourcedims) == 0:\n idx = [-1]\n loopcap = [0]\n if len(sourcedims) != 0:\n btwsourcedims = list(map(Bscols.index,sourcedims))\n idx = [0] * len(btwsourcedims)\n idx[0] = -1 # compensate for pre-increment of 1st slot in incr()\n\n # Get a list of the maximum values each factor can handle\n loopcap = N.take(N.array(Nlevels),sourcedims)-1\n\n### WHILE STILL MORE GROUPS, CALCULATE GROUP MEAN FOR EACH D-VAR\n while incr(idx,loopcap) != -1: # loop through source btw level-combos\n mask = tsubjslots[idx]\n thisgroup = tworkd*mask[N.NewAxis,:]\n groupmns = amean(N.compress(mask,thisgroup),1)\n\n### THEN SUBTRACT THEM FROM APPROPRIATE SUBJECTS\n errors = errors - N.multiply.outer(groupmns,mask)\n return errors\n\n\n def F_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):\n \"\"\"\n Calculation of Wilks lambda F-statistic for multivarite data, per\n Maxwell & Delaney p.657.\n\n Usage: F_value_wilks_lambda(ER,EF,dfnum,dfden,a,b)\n \"\"\"\n if type(ER) in [int, float]:\n ER = N.array([[ER]])\n if type(EF) in [int, float]:\n EF = N.array([[EF]])\n lmbda = LA.determinant(EF) / LA.determinant(ER)\n if (a-1)**2 + (b-1)**2 == 5:\n q = 1\n else:\n q = math.sqrt( ((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 -5) )\n n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)\n d_en = lmbda**(1.0/q) / (m*q - 0.5*(a-1)*(b-1) + 1)\n return n_um / d_en\n\n def member(factor,source):\n return (1 << factor) & source != 0\n\n def setsize(source):\n size = 0\n for bit in source:\n if bit == 1:\n size = size + 1\n return size\n\n def subset (a,b):\n return (a&b)==a\n\n def propersubset (a,b):\n sub = ((a&b)==a)\n if a==b:\n sub = 0\n return sub\n\n def numlevels(source,Nlevels):\n for i in range(30): # find the biggest i such that 2**i >= source\n if 1<<i >= source:\n break\n levelcount = 1\n for j in range(i): # loop up through each bit\n if subset(1<<j,source):\n levelcount = levelcount * Nlevels[j] - 1\n return levelcount\n\n def numbitson(a):\n numon = 0\n while a>0:\n numon = numon + a%2\n a = a>>1\n return numon\n\n def makebin(sourcelist):\n outbin = 0\n for item in sourcelist:\n outbin = outbin + 2**item\n return outbin\n\n def makelist(source,ncols):\n levellist = []\n for j in range(ncols):\n if subset(1<<j,source):\n levellist.append(j)\n return levellist\n\n def round4(num):\n try:\n return round(num,4)\n except:\n return 'N/A'\n" ]
[ [ "numpy.resize", "numpy.take", "numpy.reshape", "numpy.sort", "numpy.concatenate", "numpy.shape", "numpy.equal", "numpy.ravel", "numpy.array", "numpy.where", "numpy.sum" ], [ "numpy.multiply.reduce", "numpy.swapaxes", "numpy.linalg.determinant", "numpy.take", "numpy.reshape", "numpy.asarray", "numpy.add.reduce", "numpy.indices", "numpy.compress", "numpy.ones", "numpy.multiply.outer", "numpy.transpose", "numpy.ravel", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nbl97/nni
[ "1530339d3e964a5ea95a0afde1775ec9167cdcc0", "1530339d3e964a5ea95a0afde1775ec9167cdcc0" ]
[ "nni/retiarii/nn/pytorch/cell.py", "examples/model_compress/pruning/simple_pruning_torch.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport copy\nimport warnings\nfrom typing import Callable, Dict, List, Union, Optional, Tuple, Sequence, cast\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal\n\nimport torch\nimport torch.nn as nn\n\nfrom .api import ChosenInputs, LayerChoice, InputChoice\nfrom .nn import ModuleList # pylint: disable=no-name-in-module\nfrom .mutation_utils import generate_new_label\n\n\nclass _ListIdentity(nn.Identity):\n # workaround for torchscript\n def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:\n return x\n\n\nclass _DefaultPostprocessor(nn.Module):\n # this is also a workaround for torchscript\n\n def forward(self, this_cell: torch.Tensor, prev_cell: List[torch.Tensor]) -> torch.Tensor:\n return this_cell\n\n\n_cell_op_factory_type = Callable[[int, int, Optional[int]], nn.Module]\n\n\nclass Cell(nn.Module):\n \"\"\"\n Cell structure that is popularly used in NAS literature.\n\n Find the details in:\n\n * `Neural Architecture Search with Reinforcement Learning <https://arxiv.org/abs/1611.01578>`__.\n * `Learning Transferable Architectures for Scalable Image Recognition <https://arxiv.org/abs/1707.07012>`__.\n * `DARTS: Differentiable Architecture Search <https://arxiv.org/abs/1806.09055>`__\n\n `On Network Design Spaces for Visual Recognition <https://arxiv.org/abs/1905.13214>`__\n is a good summary of how this structure works in practice.\n\n A cell consists of multiple \"nodes\". Each node is a sum of multiple operators. Each operator is chosen from\n ``op_candidates``, and takes one input from previous nodes and predecessors. Predecessor means the input of cell.\n The output of cell is the concatenation of some of the nodes in the cell (by default all the nodes).\n\n Two examples of searched cells are illustrated in the figure below.\n In these two cells, ``op_candidates`` are series of convolutions and pooling operations.\n ``num_nodes_per_node`` is set to 2. ``num_nodes`` is set to 5. ``merge_op`` is ``loose_end``.\n Assuming nodes are enumerated from bottom to top, left to right,\n ``output_node_indices`` for the normal cell is ``[2, 3, 4, 5, 6]``.\n For the reduction cell, it's ``[4, 5, 6]``.\n Please take a look at this\n `review article <https://sh-tsang.medium.com/review-nasnet-neural-architecture-search-network-image-classification-23139ea0425d>`__\n if you are interested in details.\n\n .. image:: ../../../img/nasnet_cell.png\n :width: 900\n :align: center\n\n Here is a glossary table, which could help better understand the terms used above:\n\n .. list-table::\n :widths: 25 75\n :header-rows: 1\n\n * - Name\n - Brief Description\n * - Cell\n - A cell consists of ``num_nodes`` nodes.\n * - Node\n - A node is the **sum** of ``num_ops_per_node`` operators.\n * - Operator\n - Each operator is independently chosen from a list of user-specified candidate operators.\n * - Operator's input\n - Each operator has one input, chosen from previous nodes as well as predecessors.\n * - Predecessors\n - Input of cell. A cell can have multiple predecessors. Predecessors are sent to *preprocessor* for preprocessing.\n * - Cell's output\n - Output of cell. Usually concatenation of some nodes (possibly all nodes) in the cell. Cell's output,\n along with predecessors, are sent to *postprocessor* for postprocessing.\n * - Preprocessor\n - Extra preprocessing to predecessors. Usually used in shape alignment (e.g., predecessors have different shapes).\n By default, do nothing.\n * - Postprocessor\n - Extra postprocessing for cell's output. Usually used to chain cells with multiple Predecessors\n (e.g., the next cell wants to have the outputs of both this cell and previous cell as its input).\n By default, directly use this cell's output.\n\n .. tip::\n\n It's highly recommended to make the candidate operators have an output of the same shape as input.\n This is because, there can be dynamic connections within cell. If there's shape change within operations,\n the input shape of the subsequent operation becomes unknown.\n In addition, the final concatenation could have shape mismatch issues.\n\n Parameters\n ----------\n op_candidates : list of module or function, or dict\n A list of modules to choose from, or a function that accepts current index and optionally its input index, and returns a module.\n For example, (2, 3, 0) means the 3rd op in the 2nd node, accepts the 0th node as input.\n The index are enumerated for all nodes including predecessors from 0.\n When first created, the input index is ``None``, meaning unknown.\n Note that in graph execution engine, support of function in ``op_candidates`` is limited.\n num_nodes : int\n Number of nodes in the cell.\n num_ops_per_node: int\n Number of operators in each node. The output of each node is the sum of all operators in the node. Default: 1.\n num_predecessors : int\n Number of inputs of the cell. The input to forward should be a list of tensors. Default: 1.\n merge_op : \"all\", or \"loose_end\"\n If \"all\", all the nodes (except predecessors) will be concatenated as the cell's output, in which case, ``output_node_indices``\n will be ``list(range(num_predecessors, num_predecessors + num_nodes))``.\n If \"loose_end\", only the nodes that have never been used as other nodes' inputs will be concatenated to the output.\n Predecessors are not considered when calculating unused nodes.\n Details can be found in `NDS paper <https://arxiv.org/abs/1905.13214>`__. Default: all.\n preprocessor : callable\n Override this if some extra transformation on cell's input is intended.\n It should be a callable (``nn.Module`` is also acceptable) that takes a list of tensors which are predecessors,\n and outputs a list of tensors, with the same length as input.\n By default, it does nothing to the input.\n postprocessor : callable\n Override this if customization on the output of the cell is intended.\n It should be a callable that takes the output of this cell, and a list which are predecessors.\n Its return type should be either one tensor, or a tuple of tensors.\n The return value of postprocessor is the return value of the cell's forward.\n By default, it returns only the output of the current cell.\n concat_dim : int\n The result will be a concatenation of several nodes on this dim. Default: 1.\n label : str\n Identifier of the cell. Cell sharing the same label will semantically share the same choice.\n\n Examples\n --------\n Choose between conv2d and maxpool2d.\n The cell have 4 nodes, 1 op per node, and 2 predecessors.\n\n >>> cell = nn.Cell([nn.Conv2d(32, 32, 3, padding=1), nn.MaxPool2d(3, padding=1)], 4, 1, 2)\n\n In forward:\n\n >>> cell([input1, input2])\n\n The \"list bracket\" can be omitted:\n\n >>> cell(only_input) # only one input\n >>> cell(tensor1, tensor2, tensor3) # multiple inputs\n\n Use ``merge_op`` to specify how to construct the output.\n The output will then have dynamic shape, depending on which input has been used in the cell.\n\n >>> cell = nn.Cell([nn.Conv2d(32, 32, 3), nn.MaxPool2d(3)], 4, 1, 2, merge_op='loose_end')\n >>> cell_out_channels = len(cell.output_node_indices) * 32\n\n The op candidates can be callable that accepts node index in cell, op index in node, and input index.\n\n >>> cell = nn.Cell([\n ... lambda node_index, op_index, input_index: nn.Conv2d(32, 32, 3, stride=2 if input_index < 1 else 1),\n ... ], 4, 1, 2)\n\n Predecessor example: ::\n\n class Preprocessor:\n def __init__(self):\n self.conv1 = nn.Conv2d(16, 32, 1)\n self.conv2 = nn.Conv2d(64, 32, 1)\n\n def forward(self, x):\n return [self.conv1(x[0]), self.conv2(x[1])]\n\n cell = nn.Cell([nn.Conv2d(32, 32, 3), nn.MaxPool2d(3)], 4, 1, 2, preprocessor=Preprocessor())\n cell([torch.randn(1, 16, 48, 48), torch.randn(1, 64, 48, 48)]) # the two inputs will be sent to conv1 and conv2 respectively\n\n Warnings\n --------\n :class:`Cell` is not supported in :ref:`graph-based execution engine <graph-based-execution-engine>`.\n\n Attributes\n ----------\n output_node_indices : list of int\n An attribute that contains indices of the nodes concatenated to the output (a list of integers).\n\n When the cell is first instantiated in the base model, or when ``merge_op`` is ``all``,\n ``output_node_indices`` must be ``range(num_predecessors, num_predecessors + num_nodes)``.\n\n When ``merge_op`` is ``loose_end``, ``output_node_indices`` is useful to compute the shape of this cell's output,\n because the output shape depends on the connection in the cell, and which nodes are \"loose ends\" depends on mutation.\n \"\"\"\n\n def __init__(self,\n op_candidates: Union[\n Callable[[], List[nn.Module]],\n List[nn.Module],\n List[_cell_op_factory_type],\n Dict[str, nn.Module],\n Dict[str, _cell_op_factory_type]\n ],\n num_nodes: int,\n num_ops_per_node: int = 1,\n num_predecessors: int = 1,\n merge_op: Literal['all', 'loose_end'] = 'all',\n preprocessor: Optional[Callable[[List[torch.Tensor]], List[torch.Tensor]]] = None,\n postprocessor: Optional[Callable[[torch.Tensor, List[torch.Tensor]],\n Union[Tuple[torch.Tensor, ...], torch.Tensor]]] = None,\n concat_dim: int = 1,\n *,\n label: Optional[str] = None):\n super().__init__()\n self._label = generate_new_label(label)\n\n # modules are created in \"natural\" order\n # first create preprocessor\n self.preprocessor = preprocessor or _ListIdentity()\n # then create intermediate ops\n self.ops = ModuleList()\n self.inputs = ModuleList()\n # finally postprocessor\n self.postprocessor = postprocessor or _DefaultPostprocessor()\n\n self.num_nodes = num_nodes\n self.num_ops_per_node = num_ops_per_node\n self.num_predecessors = num_predecessors\n assert merge_op in ['all', 'loose_end']\n self.merge_op = merge_op\n self.output_node_indices = list(range(num_predecessors, num_predecessors + num_nodes))\n\n self.concat_dim = concat_dim\n\n # fill-in the missing modules\n self._create_modules(op_candidates)\n\n def _create_modules(self, op_candidates):\n for i in range(self.num_predecessors, self.num_nodes + self.num_predecessors):\n self.ops.append(ModuleList())\n self.inputs.append(ModuleList())\n for k in range(self.num_ops_per_node):\n inp = InputChoice(i, 1, label=f'{self.label}/input_{i}_{k}')\n chosen = None\n\n if isinstance(inp, ChosenInputs):\n # now we are in the fixed mode\n # the length of chosen should be 1\n chosen = inp.chosen[0]\n if self.merge_op == 'loose_end' and chosen in self.output_node_indices:\n # remove it from concat indices\n self.output_node_indices.remove(chosen)\n\n # this is needed because op_candidates can be very complex\n # the type annoation and docs for details\n ops = self._convert_op_candidates(op_candidates, i, k, chosen)\n\n # though it's layer choice and input choice here, in fixed mode, the chosen module will be created.\n cast(ModuleList, self.ops[-1]).append(LayerChoice(ops, label=f'{self.label}/op_{i}_{k}'))\n cast(ModuleList, self.inputs[-1]).append(inp)\n\n @property\n def label(self):\n return self._label\n\n def forward(self, *inputs: Union[List[torch.Tensor], torch.Tensor]) -> Union[Tuple[torch.Tensor, ...], torch.Tensor]:\n \"\"\"Forward propagation of cell.\n\n Parameters\n ----------\n inputs\n Can be a list of tensors, or several tensors.\n The length should be equal to ``num_predecessors``.\n\n Returns\n -------\n Tuple[torch.Tensor] | torch.Tensor\n The return type depends on the output of ``postprocessor``.\n By default, it's the output of ``merge_op``, which is a contenation (on ``concat_dim``)\n of some of (possibly all) the nodes' outputs in the cell.\n \"\"\"\n processed_inputs: List[torch.Tensor]\n if len(inputs) == 1 and isinstance(inputs[0], list):\n processed_inputs = list(inputs[0]) # shallow copy\n else:\n processed_inputs = cast(List[torch.Tensor], list(inputs))\n assert len(processed_inputs) == self.num_predecessors, 'The number of inputs must be equal to `num_predecessors`.'\n states: List[torch.Tensor] = self.preprocessor(processed_inputs)\n for ops, inps in zip(\n cast(Sequence[Sequence[LayerChoice]], self.ops),\n cast(Sequence[Sequence[InputChoice]], self.inputs)\n ):\n current_state = []\n for op, inp in zip(ops, inps):\n current_state.append(op(inp(states)))\n current_state = torch.sum(torch.stack(current_state), 0)\n states.append(current_state)\n if self.merge_op == 'all':\n # a special case for graph engine\n this_cell = torch.cat(states[self.num_predecessors:], self.concat_dim)\n else:\n this_cell = torch.cat([states[k] for k in self.output_node_indices], self.concat_dim)\n return self.postprocessor(this_cell, processed_inputs)\n\n @staticmethod\n def _convert_op_candidates(op_candidates, node_index, op_index, chosen) -> Union[Dict[str, nn.Module], List[nn.Module]]:\n # convert the complex type into the type that is acceptable to LayerChoice\n def convert_single_op(op):\n if isinstance(op, nn.Module):\n return copy.deepcopy(op)\n elif callable(op):\n # FIXME: I don't know how to check whether we are in graph engine.\n return op(node_index, op_index, chosen)\n else:\n raise TypeError(f'Unrecognized type {type(op)} for op {op}')\n\n if isinstance(op_candidates, list):\n return [convert_single_op(op) for op in op_candidates]\n elif isinstance(op_candidates, dict):\n return {key: convert_single_op(op) for key, op in op_candidates.items()}\n elif callable(op_candidates):\n warnings.warn(f'Directly passing a callable into Cell is deprecated. Please consider migrating to list or dict.',\n DeprecationWarning)\n return op_candidates()\n else:\n raise TypeError(f'Unrecognized type {type(op_candidates)} for {op_candidates}')\n", "import sys\nfrom tqdm import tqdm\n\nimport torch\nfrom torchvision import datasets, transforms\n\nfrom nni.compression.pytorch.pruning import L1NormPruner\nfrom nni.compression.pytorch.speedup import ModelSpeedup\n\nfrom pathlib import Path\nsys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))\nfrom cifar10.vgg import VGG\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nnormalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n\ntrain_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ]), download=True),\n batch_size=128, shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=128, shuffle=False)\ncriterion = torch.nn.CrossEntropyLoss()\n\ndef trainer(model, optimizer, criterion, epoch):\n model.train()\n for data, target in tqdm(iterable=train_loader, desc='Epoch {}'.format(epoch)):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n\ndef evaluator(model):\n model.eval()\n correct = 0\n with torch.no_grad():\n for data, target in tqdm(iterable=test_loader, desc='Test'):\n data, target = data.to(device), target.to(device)\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n acc = 100 * correct / len(test_loader.dataset)\n print('Accuracy: {}%\\n'.format(acc))\n return acc\n\n\nif __name__ == '__main__':\n model = VGG().to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)\n criterion = torch.nn.CrossEntropyLoss()\n\n print('\\nPre-train the model:')\n for i in range(5):\n trainer(model, optimizer, criterion, i)\n evaluator(model)\n\n config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]\n pruner = L1NormPruner(model, config_list)\n _, masks = pruner.compress()\n\n print('\\nThe accuracy with masks:')\n evaluator(model)\n\n pruner._unwrap_model()\n ModelSpeedup(model, dummy_input=torch.rand(10, 3, 32, 32).to(device), masks_file=masks).speedup_model()\n\n print('\\nThe accuracy after speedup:')\n evaluator(model)\n\n # Need a new optimizer due to the modules in model will be replaced during speedup.\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)\n print('\\nFinetune the model after speedup:')\n for i in range(5):\n trainer(model, optimizer, criterion, i)\n evaluator(model)\n" ]
[ [ "torch.stack", "torch.cat" ], [ "torch.nn.CrossEntropyLoss", "torch.no_grad", "torch.rand", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LBJ-Wade/MultiLens
[ "0d88d734b07c178725f926b0a055c6084b91f6d7" ]
[ "MultiLens/MultiLens.py" ]
[ "#! /usr/bin/env python\n\n# Copyright (C) 2016 ETH Zurich, Institute for Astronomy\n\n# System imports\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\n\n# External modules\nimport numpy as np\nimport copy\n\n# MultiLens imports\nfrom MultiLens.analytic_lens import AnalyticLens\nfrom MultiLens.Cosmo.cosmo import CosmoProp\n\nclass MultiLens(object):\n \"\"\"\n this class aims to compute the lensing quantities of multi-plane lenses with full ray-tracing and approximation methods\n \"\"\"\n\n def __init__(self):\n self.analyticLens = AnalyticLens()\n self.cosmo = CosmoProp()\n\n def full_ray_tracing(self, lensAssembly, z_source, x_array, y_array, observer_frame=True):\n \"\"\"\n full ray-tracing routine (eqn 10,11 in Birrer in prep), implemented with equation 12 in a recursive way\n (!assuming flat cosmology!)\n :param object_list: list of sources with specified physical deflection angles (sorted by redshift)\n :param z_source: redshift of the source\n :param x_array: x-coords of the rays\n :param y_array: y-coords of the rays\n :return: deflections delta x_coords, delta y_coords such that x_source = x - delta x_source\n \"\"\"\n if observer_frame:\n self._full_ray_tracing_observer(lensAssembly)\n object_list = lensAssembly.object_array\n alpha_x_tot = copy.deepcopy(x_array)\n alpha_y_tot = copy.deepcopy(y_array)\n x_k = np.zeros_like(alpha_x_tot)\n y_k = np.zeros_like(alpha_x_tot)\n z_last = 0\n for lensObject in object_list:\n z = lensObject.redshift\n if z < z_source:\n T_k_last = self.cosmo.T_xy(z_last, z)\n x_k += alpha_x_tot*T_k_last\n y_k += alpha_y_tot*T_k_last\n x_k_phys, y_k_phys = x_k/(1+z), y_k/(1+z)\n alpha_x, alpha_y = lensObject.deflection(x_k_phys, y_k_phys)\n alpha_x_tot -= alpha_x\n alpha_y_tot -= alpha_y\n z_last = z\n else:\n pass\n T_k_last = self.cosmo.T_xy(z_last, z_source)\n D_s = self.cosmo.D_xy(0, z_source)\n x_k += alpha_x_tot*T_k_last\n y_k += alpha_y_tot*T_k_last\n x_s_phys, y_s_phys = x_k/(1+z_source), y_k/(1+z_source)\n beta_sx = x_s_phys / D_s\n beta_sy = y_s_phys / D_s\n return beta_sx, beta_sy\n\n def _full_ray_tracing_observer(self, lensAssembly):\n \"\"\"\n computes the real positions of the lens objects given the position in the observer frame\n :param lensAssembly:\n :return:\n \"\"\"\n object_list = lensAssembly.object_array\n alpha_x_tot, alpha_y_tot = lensAssembly.get_visible_positions()\n x_k = np.zeros_like(alpha_x_tot)\n y_k = np.zeros_like(alpha_y_tot)\n z_last = 0\n i = 0\n for lensObject in object_list:\n z = lensObject.redshift\n T_k_last = self.cosmo.T_xy(z_last, z)\n x_k += alpha_x_tot*T_k_last\n y_k += alpha_y_tot*T_k_last\n x_k_phys, y_k_phys = x_k/(1+z), y_k/(1+z)\n lensObject.update_position(x_k_phys[i], y_k_phys[i]) # update position of the i'th lens according to the deflection\n alpha_x, alpha_y = lensObject.deflection(x_k_phys, y_k_phys)\n alpha_x_tot -= alpha_x\n alpha_y_tot -= alpha_y\n z_last = z\n i += 1\n return 0\n\n def combined_ray_tracing(self, lensAssembly, z_source, x_array, y_array, observer_frame=True):\n \"\"\"\n ray-tracing routine with Born approximation for the objects specified (eqn 17 in Birrer in prep)\n :param object_list: list of sources with specified physical deflection angles (sorted by redshift)\n :param z_source: redshift of the source\n :param x_array: x-coords of the rays\n :param y_array: y-coords of the rays\n :return: deflections delta x_coords, delta y_coords such that x_source = x - delta x_source\n \"\"\"\n if observer_frame:\n self._combined_ray_tracing_observer(lensAssembly, z_source)\n else:\n lensAssembly.reset_observer_frame()\n object_list = lensAssembly.object_array\n mainLens = lensAssembly.main_deflector()\n z_d = mainLens.redshift\n beta_dx = x_array.copy()\n beta_dy = y_array.copy()\n beta_sx = x_array.copy()\n beta_sy = y_array.copy()\n alpha_x_foreground = np.zeros_like(x_array)\n alpha_y_foreground = np.zeros_like(y_array)\n alpha_dx, alpha_dy = 0, 0\n Ds = self.cosmo.D_xy(0, z_source)\n Dd = self.cosmo.D_xy(0, z_d)\n i = 0\n for lensObject in object_list:\n z = lensObject.redshift\n if z < z_d:\n D_k = self.cosmo.D_xy(0, z)\n D_ks = self.cosmo.D_xy(z, z_source)\n D_kd = self.cosmo.D_xy(z, z_d)\n alpha_x, alpha_y = lensObject.deflection(D_k*x_array, D_k*y_array)\n alpha_x_foreground += alpha_x\n alpha_y_foreground += alpha_y\n beta_sx -= D_ks/Ds*alpha_x\n beta_sy -= D_ks/Ds*alpha_y\n beta_dx -= D_kd/Dd*alpha_x\n beta_dy -= D_kd/Dd*alpha_y\n elif lensObject.main is True:\n D_ds = self.cosmo.D_xy(z_d, z_source)\n alpha_dx, alpha_dy = lensObject.deflection(Dd*beta_dx, Dd*beta_dy)\n beta_sx -= D_ds/Ds*alpha_dx\n beta_sy -= D_ds/Ds*alpha_dy\n elif z >= z_d:\n D_k = self.cosmo.D_xy(0, z)\n D_ks = self.cosmo.D_xy(z, z_source)\n D_kd = self.cosmo.D_xy(z_d, z)\n beta_x = beta_dx - D_kd/D_k*(alpha_dx + alpha_x_foreground) # equation 16 in Birrer in prep\n beta_y = beta_dy - D_kd/D_k*(alpha_dy + alpha_y_foreground) # equation 16 in Birrer in prep\n alpha_x, alpha_y = lensObject.deflection(D_k*beta_x, D_k*beta_y)\n beta_sx -= D_ks/Ds*alpha_x\n beta_sy -= D_ks/Ds*alpha_y\n i += 1\n return beta_sx, beta_sy\n\n def _combined_ray_tracing_observer(self, lensAssembly, z_source):\n \"\"\"\n computes the real position of the lensing objects given observer frame coordinates\n :param lensAssembly:\n :return:\n \"\"\"\n object_list = lensAssembly.object_array\n mainLens = lensAssembly.main_deflector()\n z_d = mainLens.redshift\n x_array, y_array = lensAssembly.get_visible_positions()\n beta_dx = x_array.copy()\n beta_dy = y_array.copy()\n beta_sx = x_array.copy()\n beta_sy = y_array.copy()\n alpha_x_foreground = 0\n alpha_y_foreground = 0\n alpha_dx, alpha_dy = 0, 0\n Ds = self.cosmo.D_xy(0, z_source)\n Dd = self.cosmo.D_xy(0, z_d)\n i = 0\n for lensObject in object_list:\n z = lensObject.redshift\n if z < z_d:\n D_k = self.cosmo.D_xy(0, z)\n D_ks = self.cosmo.D_xy(z, z_source)\n D_kd = self.cosmo.D_xy(z, z_d)\n lensObject.update_position(D_k*x_array[i], D_k*y_array[i])\n alpha_x, alpha_y = lensObject.deflection(D_k*x_array, D_k*y_array)\n alpha_x_foreground += alpha_x\n alpha_y_foreground += alpha_y\n beta_sx -= D_ks/Ds*alpha_x\n beta_sy -= D_ks/Ds*alpha_y\n beta_dx -= D_kd/Dd*alpha_x\n beta_dy -= D_kd/Dd*alpha_y\n elif lensObject.main is True:\n D_ds = self.cosmo.D_xy(z_d, z_source)\n lensObject.update_position(Dd*x_array[i], Dd*y_array[i])\n alpha_dx, alpha_dy = lensObject.deflection(Dd*beta_dx, Dd*beta_dy)\n alpha_dx *= D_ds/Ds\n alpha_dy *= D_ds/Ds\n beta_sx -= alpha_dx\n beta_sy -= alpha_dy\n elif z >= z_d:\n D_k = self.cosmo.D_xy(0, z)\n D_kd = self.cosmo.D_xy(z_d, z)\n beta_x = beta_dx - D_kd/D_k*(alpha_dx + alpha_x_foreground) # equation 16 in Birrer in prep\n beta_y = beta_dy - D_kd/D_k*(alpha_dy + alpha_y_foreground) # equation 16 in Birrer in prep\n lensObject.update_position(D_k*beta_x[i], D_k*beta_y[i])\n i += 1\n return 0\n\n def born_ray_tracing(self, lensAssembly, z_source, x_array, y_array):\n \"\"\"\n routine with Born approximation for all objects (eqn 14 in Birrer in prep)\n :param object_list: list of sources with specified physical deflection angles (sorted by redshift)\n :param z_source: redshift of the source\n :param x_array: x-coords of the rays\n :param y_array: y-coords of the rays\n :return: deflections delta x_coords, delta y_coords such that x_source = x - delta x_source\n \"\"\"\n lensAssembly.reset_observer_frame()\n object_list = lensAssembly.object_array\n beta_sx = copy.deepcopy(y_array)\n beta_sy = copy.deepcopy(x_array)\n Ds = self.cosmo.D_xy(0, z_source)\n for lensObject in object_list:\n z = lensObject.redshift\n if z < z_source:\n D_k = self.cosmo.D_xy(0, z)\n D_ks = self.cosmo.D_xy(z, z_source)\n delta_x, delta_y = lensObject.deflection(D_k*x_array, D_k*y_array)\n beta_sx -= delta_x*D_ks/Ds\n beta_sy -= delta_y*D_ks/Ds\n return beta_sx, beta_sy\n\n def analytic_mapping(self, lensAssembly, z_source, x_array, y_array, LOS_corrected=True, observer_frame=True):\n \"\"\"\n computes equation 29 in Birrer in prep with analytic terms for the LOS structure\n :param object_list:\n :param z_source:\n :param x_array:\n :param y_array:\n :return:\n \"\"\"\n if observer_frame:\n self._full_ray_tracing_observer(lensAssembly)\n else:\n lensAssembly.reset_observer_frame()\n object_list = lensAssembly.object_array\n mainLens = lensAssembly.main_deflector()\n z_d = mainLens.redshift\n D_ds = self.cosmo.D_xy(z_d, z_source)\n Ds = self.cosmo.D_xy(0, z_source)\n Dd = self.cosmo.D_xy(0, z_d)\n gamma_A = self.analyticLens.shear_lens(object_list, z_d)\n gamma_B = self.analyticLens.shear_foreground(object_list, z_lens=z_d, z_source=z_source)\n if LOS_corrected is True:\n gamma_C = self.analyticLens.shear_background_first_order(object_list, z_d, z_source)\n else:\n gamma_C = self.analyticLens.shear_background_zero(object_list, z_d, z_source)\n gamma_BC = gamma_B + gamma_C\n x_lens = gamma_A[0][0]*x_array + gamma_A[0][1]*y_array + x_array\n y_lens = gamma_A[1][0]*x_array + gamma_A[1][1]*y_array + y_array\n shear_x = gamma_BC[0][0]*x_array + gamma_BC[0][1]*y_array\n shear_y = gamma_BC[1][0]*x_array + gamma_BC[1][1]*y_array\n\n alpha_x, alpha_y = mainLens.deflection(Dd*x_lens, Dd*y_lens)\n beta_sx = x_array - D_ds/Ds * alpha_x + shear_x\n beta_sy = y_array - D_ds/Ds * alpha_y + shear_y\n return beta_sx, beta_sy\n\n def analytic_matrices(self, lensAssembly, z_source, LOS_corrected=True, observer_frame=True):\n \"\"\"\n computes equation 29 in Birrer in prep with analytic terms for the LOS structure\n :param object_list:\n :param z_source:\n :param x_array:\n :param y_array:\n :return:\n \"\"\"\n if observer_frame:\n self._full_ray_tracing_observer(lensAssembly)\n else:\n lensAssembly.reset_observer_frame()\n object_list = lensAssembly.object_array\n mainLens = lensAssembly.main_deflector()\n z_d = mainLens.redshift\n gamma_A = self.analyticLens.shear_lens(object_list, z_d)\n gamma_B = self.analyticLens.shear_foreground(object_list, z_lens=z_d, z_source=z_source)\n if LOS_corrected is True:\n gamma_C = self.analyticLens.shear_background_first_order(object_list, z_d, z_source)\n else:\n gamma_C = self.analyticLens.shear_background_zero(object_list, z_d, z_source)\n gamma_BC = gamma_B + gamma_C\n\n return gamma_A, gamma_BC" ]
[ [ "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kizzhang/langevinSimulation
[ "98e0c3f33e4c9d6ecb11972e5fbe9388626274f8" ]
[ "abp_modified.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 19 12:08:47 2021\r\n\r\n@author: Kaneki\r\n\"\"\"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 18 17:28:36 2021\r\n\r\n@author: Kaneki\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\n\r\ndef Periodicity(pos,l):\r\n if pos >= -l and pos <= l :\r\n return pos\r\n elif pos < -l:\r\n return pos + 2*l\r\n elif pos > l:\r\n return pos - 2*l\r\n\r\ndef ABP_move(t, dt, N, crit_coll_num, l):\r\n coll_num = np.zeros((N, int(t/dt)))\r\n for i in range(0, int(t/dt) - 1): # time evolution\r\n # Collision\r\n for p1 in range(0,N):\r\n for p2 in range(p1,N):\r\n if p1 == p2:\r\n continue\r\n \r\n # Collision criteria\r\n r = np.sqrt((x[p1,i] - x[p2,i]) ** 2 + (y[p1,i] - y[p2,i]) ** 2)\r\n if r > 2.1 * a:\r\n continue\r\n else:\r\n coll_num[p1,i] += 1\r\n coll_num[p2,i] += 1\r\n \r\n \r\n for dum in range(len(coll_num)):\r\n if coll_num[dum, i] >= crit_coll_num:\r\n theta[dum,i] = theta[dum,i] + np.random.uniform(0,2*np.pi) # a random angle to avoid coll \r\n dx = v * np.cos(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * np.random.randn()\r\n dy = v * np.sin(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * np.random.randn()\r\n \r\n \r\n x_new = x[dum,i] + dx\r\n y_new = y[dum,i] + dy\r\n \r\n theta[dum,i+1] = theta[dum,i] + np.sqrt(2*Dr*dt) * np.random.randn()\r\n\r\n # Periodic boundary condition on x \r\n x[dum,i+1] = Periodicity(x_new, l)\r\n \r\n # Periodic boundary condition on y \r\n y[dum,i+1] = Periodicity(y_new, l)\r\n \r\n # x position if there is no jump\r\n x_nojump[dum,i+1] = x_nojump[dum,i] + dx \r\n \r\n # y position if there is no jump\r\n y_nojump[dum,i+1] = y_nojump[dum,i] + dy\r\n \r\n else:\r\n dx = v * np.cos(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * np.random.randn()\r\n dy = v * np.sin(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * np.random.randn()\r\n \r\n x_new = x[dum,i] + dx\r\n y_new = y[dum,i] + dy\r\n \r\n theta[dum,i+1] = theta[dum,i] + np.sqrt(2*Dr*dt) * np.random.randn() \r\n \r\n # Periodic boundary condition on x \r\n x[dum,i+1] = Periodicity(x_new, l)\r\n \r\n # Periodic boundary condition on x \r\n y[dum,i+1] = Periodicity(y_new,l)\r\n \r\n # x position if there is no jump\r\n x_nojump[dum,i+1] = x_nojump[dum,i] + dx \r\n \r\n # y position if there is no jump\r\n y_nojump[dum,i+1] = y_nojump[dum,i] + dy\r\n \r\n print(\"Time Step: \", i)\r\n return x, y, theta, coll_num\r\n\r\n# CONSTANTS\r\n\r\nv = 3.12e-5 # swimming speed of B. Subtilis [m/s]\r\nk = 1.38e-23 # Boltzmann constant [m^2kg/s^2K]\r\nT = 293 # Room temperature [K]\r\neta = 1e-3 # viscosity of water [Pa s]\r\na = 2e-6 # spherical cell radius [m]\r\nDr = k*T/(8*np.pi*eta*a**3) # rotational diffusion coefficient of B. Subtilis\r\nDt = k*T/(6*np.pi*eta*a) # translation diffusion coefficient of B. Subtilis\r\n\r\n# ADJUSTABLE PARAMETERS\r\n\r\nt = 10 # time over which motion is observed [s]\r\ndt = 0.01 # time step between recorded positions\r\nN = 900 # number of cells \r\ncrit_coll_num = 1 # number of collisions a bacetrium will walk away\r\nl = 0.5 * 1e-4 # box width\r\npsi = N * np.pi * a**2 / (2*l)**2 # packing fraction\r\n\r\n\r\n# INITIAL CONDITIONS\r\n\r\ntheta = np.zeros((N,int(t/dt))) # initial swimming orientation [radians]\r\nx = np.zeros((N,int(t/dt))) # initial x position [m]\r\ny = np.zeros((N,int(t/dt))) # initial y position [m]\r\nx_nojump = np.zeros((N,int(t/dt))) # x position without jump\r\ny_nojump = np.zeros((N,int(t/dt))) # y position without jump\r\n\r\n# Initializing x y theta\r\nfor n in range(N): \r\n # x positions\r\n x[n,0] = np.random.uniform(-l,l)\r\n x_nojump[n,0] = x[n,0]\r\n \r\n # y positions\r\n y[n,0] = np.random.uniform(-l,l)\r\n y_nojump[n,0] = y[n,0]\r\n \r\n theta[n,0] = np.random.uniform(-2*np.pi, 2*np.pi)\r\n \r\n \r\nx,y,_,col_num = ABP_move(t,dt,N,crit_coll_num,l)\r\nprint(\"Packing Fraction = \", psi)\r\n\r\n'''\r\nimport pandas as pd\r\ndf_x = pd.DataFrame(x)\r\ndf_y = pd.DataFrame(y)\r\ndf_x_non_p = pd.DataFrame(x_nojump)\r\ndf_y_non_p = pd.DataFrame(y_nojump)\r\n\r\n\r\ndf_x.to_csv('x_p.dat')\r\ndf_y.to_csv('y_p.dat')\r\ndf_x_non_p.to_csv('x_nonp.dat')\r\ndf_y_non_p.to_csv('y_nonp.dat')\r\n'''\r\n\r\n# MAIN SCRIPT\r\n\r\nfig = plt.figure(dpi = 141)\r\nax = plt.axes(xlim=(-1*l, 1*l), ylim=(-1*l, 1*l))\r\nax.set_aspect(1)\r\nfig.canvas.draw()\r\n\r\ns = (ax.get_window_extent().width * 72./fig.dpi * a / l)**2\r\n\r\nscat = ax.scatter([], [], s)\r\nscat1 = ax.scatter([], [], s) \r\n\r\ndef animation(frame):\r\n data = np.hstack((x[:,frame, np.newaxis], y[:,frame, np.newaxis]))\r\n scat.set_offsets(data)\r\n return scat,\r\n \r\ndef animation_non_Periodic(frame):\r\n data1 = np.hstack((x_nojump[:,frame, np.newaxis], y_nojump[:,frame, np.newaxis]))\r\n scat1.set_offsets(data1)\r\n return scat1,\r\n\r\ndef animation_with_trajectory(frame):\r\n ax.cla()\r\n for i in range(N):\r\n ax.plot(x[i,:frame], y[i,:frame], linestyle = '-', color = 'blue')\r\n ax.plot(x[i,frame], y[i,frame], 'ro')\r\n \r\n ax.set_xlim(-l,l)\r\n ax.set_ylim(-l,l)\r\n\r\nani = FuncAnimation(fig, animation, frames=range(int(t/dt)),\\\r\n interval = 10, repeat=False)\r\n\r\nani.save(\"movie2.mp4\", fps = 40)\r\n" ]
[ [ "numpy.hstack", "numpy.sqrt", "numpy.cos", "numpy.sin", "matplotlib.pyplot.axes", "numpy.random.randn", "numpy.random.uniform", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cathyzhyi/mlir-npcomp
[ "79a3f639bfb449ba1409ff6dff161badf5a11b44", "79a3f639bfb449ba1409ff6dff161badf5a11b44" ]
[ "frontends/pytorch/test/ivalue_import/list.py", "frontends/pytorch/test/ivalue_import/submodules-select.py" ]
[ "# -*- Python -*-\n# This file is licensed under a pytorch-style license\n# See frontends/pytorch/LICENSE for license information.\n\nimport typing\n\nimport torch\nimport torch_mlir\n\n# RUN: %PYTHON %s | npcomp-opt | FileCheck %s\n\nmb = torch_mlir.ModuleBuilder()\n\nclass TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.l = [1, 2]\n# CHECK: torch.class_type @[[CLASSTYPE:.*]] {\n# TODO: Don't lose element type.\n# CHECK: torch.attr \"l\" : !basicpy.ListType\n# CHECK: }\n# CHECK: %[[N1:.*]] = basicpy.numeric_constant 1 : i64\n# CHECK: %[[N2:.*]] = basicpy.numeric_constant 2 : i64\n# CHECK: %[[LIST:.*]] = basicpy.build_list %[[N1]], %[[N2]] : (i64, i64) -> !basicpy.ListType\n# CHECK: torch.nn_module {\n# CHECK: torch.slot \"l\", %[[LIST]] : !basicpy.ListType\n# CHECK: } : !torch.nn.Module<\"[[CLASSTYPE]]\">\n\n\ntest_module = TestModule()\nrecursivescriptmodule = torch.jit.script(test_module)\n# TODO: Automatically handle unpacking Python class RecursiveScriptModule into the underlying ScriptModule.\nmb.import_module(recursivescriptmodule._c)\nmb.module.operation.print()\n", "# -*- Python -*-\n# This file is licensed under a pytorch-style license\n# See frontends/pytorch/LICENSE for license information.\n\nimport typing\n\nimport torch\nimport torch_mlir\n\n# RUN: %PYTHON %s | npcomp-opt | FileCheck %s\n\nmb = torch_mlir.ModuleBuilder()\n\nclass Submodule(torch.nn.Module):\n def __init__(self, n):\n super().__init__()\n self.n = n\n def forward(self):\n return self.n\n\nclass TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.s1 = Submodule(1)\n self.s2 = Submodule(2)\n\n # CHECK-LABEL: func private @{{.*}}TestModule.forward\n def forward(self, b: bool):\n # Modules with the same class can be selected between.\n # CHECK: %[[MOD:.*]] = scf.if\n s = self.s1 if b else self.s2\n # CHECK: %[[N:.*]] = torch.prim.CallMethod %[[MOD]][\"forward\"] ()\n # CHECK: return %[[N]]\n return s.forward()\n\n\ntest_module = TestModule()\nrecursivescriptmodule = torch.jit.script(test_module)\n# TODO: Automatically handle unpacking Python class RecursiveScriptModule into the underlying ScriptModule.\nmb.import_module(recursivescriptmodule._c)\nmb.module.operation.print()\n" ]
[ [ "torch.jit.script" ], [ "torch.jit.script" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LeoLugoF/TDSpectrum
[ "bec2a86294ab563db889e8a4b1f9c9d7c4599871" ]
[ "TDSpectrum.py" ]
[ "import matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport platform\r\nfrom math import exp\r\n\r\n#########################################################################\r\n# TDSpectrum #\r\n# Made by: Leonardo Israel Lugo Fuentes (LeoLugoF) # \r\n# Date: 30/May/2019 #\r\n# Requirements: Mathplotlib, numpy. #\r\n# #\r\n# Description: #\r\n# This program reads gaussian output files with .log or .out termation. #\r\n# It reads the oscilator forces and wave lenght for each excitation, #\r\n# and shows a UV-Vis spectrum, but the data can also be saved. #\r\n# Data is stored with name of the old file + \".txt\". #\r\n# How the calculations are done can be consulted at the gaussian page: #\r\n# https://gaussian.com/uvvisplot/ #\r\n# Note: Y values are in absorbance e(L mol-1 cm-1) #\r\n# #\r\n# Command line:[python3] [*.py] [*.log/*.out] [Sigma] [MinWave] ... #\r\n# [MaxWave] [NONE/-s] #\r\n# Arguments: #\r\n# [Sigma] = Value of sigma; Gaussian recommends 0.4 (eV). #\r\n# [MinWave] = Minimum Wavelenght (nm) #\r\n# [MaxWave] = Maximum Wavelenght (nm) #\r\n# [-s] = Saves only the data; doesn't show the graph #\r\n# [NONE] = Shows the graph only without saving data #\r\n# #\r\n# Examples: #\r\n# Example 1: python TDSpectrum.py YourFile.log 0.4 300 500 #\r\n# Example 2: python TDSpectrum.py YourFile.out 0.2 200 600 -s #\r\n# #\r\n# The first example will show only the UV-Vis plot. #\r\n# The second example will save only the data without showing the plot. #\r\n#########################################################################\r\n\r\nOSplit = \"\\\\\"\r\nif platform.system() == \"Linux\":\r\n OSplit = \"/\"\r\n\r\nclass Global:\r\n \"\"\"Global variables; Stores information.\"\"\"\r\n WaveLenghts = np.array([])\r\n Forces = np.array([])\r\n XValues = np.array([])\r\n YValues = np.array([])\r\n ShowPlot = True\r\n\r\ndef ReadFile(FilePath):\r\n \"\"\"Reads the output file and stores the information.\"\"\"\r\n fstream = open(FilePath,\"r\")\r\n lines = fstream.readlines()\r\n fstream.close()\r\n for line in lines:\r\n if \"Excited State\" in line and \"<S**2>=\" in line:\r\n i = 0\r\n for subsentence in line.split(\" \"):\r\n if(len(subsentence) > 1):\r\n if i == 6:\r\n # This element always corresponds to the Wavelenght (nm)\r\n Global.WaveLenghts = np.append(Global.WaveLenghts,float(subsentence))\r\n i += 1\r\n elif i == 8:\r\n # This element always corresponds to the oscilator force (F)\r\n Global.Forces = np.append(Global.Forces,float(subsentence.split(\"=\")[1]))\r\n break\r\n else:\r\n i += 1 \r\n return\r\n\r\ndef DoCalcs(Sigma,MinWave,MaxWave):\r\n \"\"\"Calculates the Y values from the MinWave and MaxWave giving with the sigma value.\"\"\"\r\n CMm1 = Sigma*(10**7)*0.000806556\r\n NMm1 = 0.000806556*Sigma\r\n Global.XValues = np.arange(MinWave,MaxWave,1)\r\n Global.YValues = np.zeros(len(Global.XValues))\r\n Matrix = np.zeros((len(Global.XValues),len(Global.WaveLenghts)))\r\n #Row number\r\n i = 0\r\n for row in Matrix:\r\n #Column Number\r\n j = 0\r\n for cell in row:\r\n Constant = 130629740*(Global.Forces[j]/CMm1)\r\n Matrix[i,j] = Constant*exp(-((((1/Global.XValues[i])-(1/Global.WaveLenghts[j]))/NMm1)**2)) \r\n j += 1 \r\n i += 1\r\n #Sum columns\r\n i = 0\r\n for Row in Matrix:\r\n Summatory = 0\r\n for Cell in Row:\r\n Summatory += Cell\r\n Global.YValues[i] = Summatory\r\n i += 1\r\n return\r\n\r\ndef ShowGraph():\r\n \"\"\"Shows the plot,\"\"\"\r\n fig, ax = plt.subplots()\r\n ax.plot(Global.XValues,Global.YValues)\r\n plt.xlabel(\"λ(nm)\")\r\n plt.ylabel(\"e(L mol-1 cm-1)\")\r\n plt.show()\r\n\r\ndef SaveFile():\r\n \"\"\"Stores the x and y data into a text file.\"\"\"\r\n SaveFilePath = FilePath.split(\".\")[0] + \".txt\"\r\n f = open(SaveFilePath,\"a\")\r\n i = 0\r\n for XValue in Global.XValues:\r\n f.write(str(XValue) + \"\\t\" + str(Global.YValues[i]) + \"\\n\")\r\n i += 1\r\n f.close()\r\n\r\n\r\nFilePath = \"\"\r\ni = 0\r\n#Reads the extra comment arguments giving\r\nfor arg in sys.argv:\r\n if \".out\" in arg or \".log\" in arg or \".OUT\" in arg or \".LOG\" in arg:\r\n FilePath = os.getcwd() + OSplit + arg\r\n elif \"-s\" in arg:\r\n Global.ShowPlot = False\r\n else:\r\n try:\r\n Number = float(arg)\r\n if i == 0:\r\n Sigma = Number\r\n if i == 1:\r\n MinWave = Number\r\n if i == 2:\r\n MaxWave = Number\r\n i += 1\r\n except:\r\n pass\r\n \r\n#If no comment arguments are giving it will ask for it.\r\nif FilePath == \"\":\r\n FilePath = input(\"Please Insert the file path: \")\r\n ReadFile(FilePath)\r\n Sigma = input(\"Sigma Value: \")\r\n MinWave = input(\"Min WaveLenght (nm): \")\r\n MaxWave = input(\"Max WaveLenght (nm): \")\r\n\r\nReadFile(FilePath)\r\nif(len(Global.WaveLenghts) == 0):\r\n print(\"No excited states found.\")\r\nelse:\r\n DoCalcs(float(Sigma),float(MinWave),float(MaxWave))\r\n if Global.ShowPlot is True:\r\n ShowGraph()\r\n else:\r\n SaveFile()\r\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anyboby/ConstrainedMBPO
[ "036f4ffefc464e676a287c35c92cc5c0b8925fcf", "036f4ffefc464e676a287c35c92cc5c0b8925fcf", "036f4ffefc464e676a287c35c92cc5c0b8925fcf" ]
[ "softlearning/samplers/model_sampler.py", "mbpo/models/nn.py", "softlearning/policies/safe_utils/logx.py" ]
[ "from collections import defaultdict\nfrom collections import deque, OrderedDict\nfrom itertools import islice\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nfrom softlearning.samplers.cpo_sampler import CpoSampler\nfrom softlearning.policies.safe_utils.logx import EpochLogger\nfrom softlearning.policies.safe_utils.mpi_tools import mpi_sum\n\nfrom .base_sampler import BaseSampler\nACTION_PROCESS_ENVS = [\n 'Safexp-PointGoal2',\n ]\nEPS = 1e-8\n\nclass ModelSampler(CpoSampler):\n def __init__(self,\n max_path_length,\n batch_size=1000,\n store_last_n_paths = 10,\n cares_about_cost = False,\n max_uncertainty_c = 3,\n max_uncertainty_r = 3,\n rollout_mode = False,\n logger = None):\n self._max_path_length = max_path_length\n self._path_length = np.zeros(batch_size)\n\n self.cares_about_cost = cares_about_cost\n self.rollout_mode = rollout_mode\n\n if logger:\n self.logger = logger\n else: \n self.logger = EpochLogger()\n\n self._store_last_n_paths = store_last_n_paths\n self._last_n_paths = deque(maxlen=store_last_n_paths)\n\n self._current_path = defaultdict(list)\n self._last_path_return = 0\n self._max_path_return = -np.inf\n self._current_observation = None\n self._last_action = None\n self._max_uncertainty_c = max_uncertainty_c\n self._max_uncertainty_rew = max_uncertainty_r,\n\n self._total_samples = 0\n self._n_episodes = 0\n self._total_Vs = 0\n self._total_CVs = 0\n self._total_rew = 0\n self._total_rew_var = 0\n self._total_cost = 0\n self._total_cost_var = 0\n self._total_dyn_ep_var = 0\n self._path_dyn_var = 0\n self._total_dkl = 0\n self._max_dkl = 0\n self._dyn_dkl_path = 0\n self._total_mean_var = 0\n\n self.env = None\n self.policy = None\n self.pool = None\n\n def initialize(self, env, policy, pool):\n self.env = env\n self.policy = policy\n self.pool = pool\n self.ensemble_size = env.num_networks\n\n def set_debug_buf(self, pool):\n self.pool_debug = pool\n\n def set_policy(self, policy):\n self.policy = policy\n\n def set_logger(self, logger):\n \"\"\"\n provide a logger (Sampler creates it's own logger by default, \n but you might want to share a logger between algo, samplers, etc.)\n \n automatically shares logger with agent\n Args: \n logger : instance of EpochLogger\n \"\"\" \n self.logger = logger \n\n def terminate(self):\n self.env.close()\n\n def get_diagnostics(self):\n diagnostics = OrderedDict({'pool-size': self.pool.size})\n mean_rollout_length = self._total_samples / (self.batch_size+EPS)\n\n ensemble_rew_var_perstep = self._total_rew_var/(self._total_samples+EPS)\n ensemble_cost_var_perstep = self._total_cost_var/(self._total_samples+EPS)\n ensemble_dyn_var_perstep = self._total_dyn_ep_var/(self._total_samples+EPS)\n\n if len(self._path_cost.shape)>1:\n cost_sum = np.sum(np.mean(self._path_cost, axis=0))\n else:\n cost_sum = np.sum(self._path_cost)\n \n if len(self._path_return.shape)>1:\n ret_sum = np.sum(np.mean(self._path_return, axis=0))\n else:\n ret_sum = np.sum(self._path_return)\n\n ensemble_cost_rate = cost_sum/(self._total_samples+EPS)\n ensemble_rew_rate = ret_sum/(self._total_samples+EPS)\n\n vals_mean = self._total_Vs / (self._total_samples+EPS)\n\n cval_mean = self._total_CVs / (self._total_samples+EPS)\n\n dyn_Dkl = self._total_dkl / (self._total_samples+EPS)\n mean_var = self._total_mean_var/ (self._total_samples+EPS)\n diagnostics.update({\n 'msampler/samples_added': self._total_samples,\n 'msampler/rollout_H_max': self._n_episodes,\n 'msampler/rollout_H_mean': mean_rollout_length,\n 'msampler/rew_var_perstep': ensemble_rew_var_perstep,\n 'msampler/cost_var_perstep' : ensemble_cost_var_perstep,\n 'msampler/dyn_var_perstep' : ensemble_dyn_var_perstep,\n 'msampler/cost_rate' : ensemble_cost_rate,\n 'msampler/rew_rate' : ensemble_rew_rate,\n 'msampler/v_mean':vals_mean,\n 'msampler/cv_mean':cval_mean,\n 'msampler/ens_DKL': dyn_Dkl,\n 'msampler/ens_mean_var': mean_var,\n 'msampler/max_path_return': self._max_path_return,\n 'msampler/max_dkl': self._max_dkl,\n })\n\n return diagnostics\n\n def __getstate__(self):\n state = {\n key: value for key, value in self.__dict__.items()\n if key not in ('env', 'policy', 'pool')\n }\n\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n self.env = None\n self.policy = None\n self.pool = None\n\n def clear_last_n_paths(self):\n self._last_n_paths.clear()\n\n def compute_dynamics_dkl(self, obs_batch, depth=1):\n for _ in range(depth):\n get_action_outs = self.policy.get_action_outs(obs_batch, factored=True, inc_var=True)\n a = get_action_outs['pi']\n next_obs, _, terminal, info = self.env.step(obs_batch, a)\n dyn_dkl_mean = info.get('ensemble_dkl_mean', 0)\n \n n_paths = next_obs.shape[0]\n self._total_dkl += dyn_dkl_mean*n_paths\n self._total_samples += n_paths\n\n obs_batch = next_obs[np.squeeze(~terminal)]\n\n dkl_mean = self.dyn_dkl\n dkl_path_mean = dkl_mean*depth\n\n return dkl_path_mean\n \n def set_rollout_dkl(self, dkl):\n self.dkl_lim = dkl\n\n def set_max_path_length(self, path_length):\n self._max_path_length = path_length\n\n def get_last_n_paths(self, n=None):\n if n is None:\n n = self._store_last_n_paths\n\n last_n_paths = tuple(islice(self._last_n_paths, None, n))\n\n return last_n_paths\n\n @property\n def dyn_dkl(self):\n return self._total_dkl / (self._total_samples+EPS)\n\n def batch_ready(self):\n return self.pool.size >= self.pool.max_size\n\n def _process_observations(self,\n observation,\n action,\n reward,\n cost,\n terminal,\n next_observation,\n info):\n\n processed_observation = {\n 'observations': observation,\n 'actions': action,\n 'rewards': reward,\n 'cost' : cost,\n 'terminals': terminal,\n 'next_observations': next_observation,\n 'infos': info,\n }\n\n return processed_observation\n\n def reset(self, observations):\n self.batch_size = observations.shape[0]\n\n self._starting_uncertainty = np.zeros(self.batch_size)\n\n if self.rollout_mode == 'iv_gae':\n self._current_observation = np.tile(observations[None], (self.ensemble_size, 1, 1))\n else:\n self._current_observation = observations\n\n self.policy.reset() #does nohing for cpo policy atm\n self.pool.reset(self.batch_size, self.env.dyn_target_var)\n\n self._path_length = np.zeros(self.batch_size)\n if self.rollout_mode=='iv_gae':\n self._path_return = np.zeros(shape=(self.ensemble_size, self.batch_size))\n self._path_cost = np.zeros(shape=(self.ensemble_size, self.batch_size))\n else:\n self._path_return = np.zeros(shape=(self.batch_size))\n self._path_cost = np.zeros(shape=(self.batch_size))\n\n self._path_return_var = np.zeros(self.batch_size)\n self._path_cost_var = np.zeros(self.batch_size)\n self._path_dyn_var = np.zeros(self.batch_size)\n self._dyn_dkl_path = np.zeros(self.batch_size)\n\n self.model_inds = np.random.randint(self.ensemble_size)\n self.v_inds = self.policy.random_v_inds(self.batch_size)\n self.vc_inds = self.policy.random_vc_inds(self.batch_size)\n self.batch_inds = np.arange(0, self.batch_size)\n\n self._total_samples = 0\n self._n_episodes = 0\n self._total_Vs = 0\n self._total_CVs = 0\n self._total_cost = 0\n self._total_cost_var = 0\n self._total_rew = 0\n self._total_rew_var = 0\n self._total_dyn_ep_var = 0\n self._total_dkl = 0\n self._max_dkl = 0\n self._total_mean_var = 0\n self._max_path_return = 0\n\n def sample(self, max_samples=None):\n assert self.pool.has_room #pool full! empty before sampling.\n assert self._current_observation is not None # reset before sampling !\n assert self.pool.alive_paths.any() # reset before sampling !\n\n self._n_episodes += 1\n alive_paths = self.pool.alive_paths\n current_obs = self._current_observation\n\n # Get outputs from policy\n get_action_outs = self.policy.get_action_outs(current_obs, factored=True, inc_var=True)\n \n a = get_action_outs['pi']\n logp_t = get_action_outs['logp_pi']\n pi_info_t = get_action_outs['pi_info']\n\n ##### @anyboby temporary\n ### unpack ensemble outputs, if gaussian\n if self.rollout_mode=='iv_gae':\n v_t = get_action_outs['v']\n vc_t = get_action_outs.get('vc', 0)\n\n v_var = get_action_outs.get('v_var', 0)\n vc_var = get_action_outs.get('vc_var', 0) \n else:\n v_t = get_action_outs['v'][self.v_inds,self.batch_inds]\n vc_t = get_action_outs['vc'][self.vc_inds,self.batch_inds]\n\n v_var = np.mean(get_action_outs.get('v_var', 0), axis=0)\n vc_var = np.mean(get_action_outs.get('vc_var', 0), axis=0)\n #####\n\n ## ____________________________________________ ##\n ## Step ##\n ## ____________________________________________ ##\n\n next_obs, reward, terminal, info = self.env.step(current_obs, a)\n\n reward = np.squeeze(reward, axis=-1)\n c = np.squeeze(info.get('cost', np.zeros(reward.shape)))\n terminal = np.squeeze(terminal, axis=-1)\n dyn_dkl_mean = info.get('ensemble_dkl_mean', 0)\n dyn_dkl_path = info.get('ensemble_dkl_path', 0)\n dyn_ep_var = info.get('ensemble_ep_var', np.zeros(shape=reward.shape[1:]))\n ens_mean_var = info.get('ensemble_mean_var', 0)\n\n if self._n_episodes == 1:\n self._starting_uncertainty = np.mean(dyn_ep_var, axis=-1)\n self._starting_uncertainty_dkl = dyn_dkl_path\n ## ____________________________________________ ##\n ## Check Uncertainty f. each Trajectory ##\n ## ____________________________________________ ##\n\n ### check if too uncertain before storing info of the taken step \n ### (so we don't take a \"bad step\" by appending values of next state)\n if self.rollout_mode=='uncertainty':\n next_dkl = self._dyn_dkl_path[self.pool.alive_paths]+dyn_dkl_path\n too_uncertain_paths = next_dkl>=self.dkl_lim\n else:\n too_uncertain_paths = np.zeros(shape=self.pool.alive_paths.sum(), dtype=np.bool)\n \n ### early terminate paths if max_samples is given\n if max_samples and not self.rollout_mode=='iv_gae':\n n = self._total_samples + alive_paths.sum() - too_uncertain_paths.sum()\n n = max(n-max_samples, 0)\n early_term = np.zeros_like(too_uncertain_paths[~too_uncertain_paths], dtype=np.bool)\n early_term[:n] = True\n too_uncertain_paths[~too_uncertain_paths] = early_term\n\n ### finish too uncertain paths before storing info of the taken step into buffer\n # remaining_paths refers to the paths we have finished and has the same shape \n # as our terminal mask (too_uncertain_mask)\n # alive_paths refers to all original paths and therefore has shape batch_size\n remaining_paths = self._finish_paths(too_uncertain_paths, append_vals=True, append_cvals=True)\n alive_paths = self.pool.alive_paths\n if not alive_paths.any():\n info['alive_ratio'] = 0\n return next_obs, reward, terminal, info\n\n ## ____________________________________________ ##\n ## Store Info of the remaining paths ##\n ## ____________________________________________ ##\n\n if self.rollout_mode=='iv_gae':\n current_obs = current_obs[:,remaining_paths]\n a = a[:,remaining_paths]\n next_obs = next_obs[:,remaining_paths]\n reward = reward[:,remaining_paths]\n v_t = v_t[:,remaining_paths]\n v_var = v_var[:,remaining_paths]\n\n c = c[:,remaining_paths]\n vc_t = vc_t[:, remaining_paths]\n vc_var = vc_var[:, remaining_paths]\n terminal = terminal[:,remaining_paths]\n\n logp_t = logp_t[:,remaining_paths]\n pi_info_t = {k:v[:,remaining_paths] for k,v in pi_info_t.items()}\n else:\n current_obs = current_obs[remaining_paths]\n a = a[remaining_paths]\n next_obs = next_obs[remaining_paths]\n reward = reward[remaining_paths]\n \n v_t = v_t[remaining_paths]\n v_var = v_var[remaining_paths]\n self.v_inds = self.v_inds[remaining_paths]\n\n c = c[remaining_paths]\n vc_t = vc_t[remaining_paths]\n vc_var = vc_var[remaining_paths]\n self.vc_inds = self.vc_inds[remaining_paths]\n self.batch_inds = np.arange(0,np.sum(remaining_paths))\n terminal = terminal[remaining_paths]\n dyn_dkl_path = dyn_dkl_path[remaining_paths]\n logp_t = logp_t[remaining_paths]\n pi_info_t = {k:v[remaining_paths] for k,v in pi_info_t.items()}\n\n dyn_ep_var = dyn_ep_var[remaining_paths]\n\n #### update some sampler infos\n self._total_samples += alive_paths.sum()\n\n if self.rollout_mode=='iv_gae':\n self._total_cost += c[self.model_inds].sum()\n self._total_rew += reward[self.model_inds].sum()\n self._path_return[:,alive_paths] += reward\n self._path_cost[:,alive_paths] += c\n\n else:\n self._total_cost += c.sum()\n self._total_rew += reward.sum()\n self._path_return[alive_paths] += reward\n self._path_cost[alive_paths] += c\n\n self._path_length[alive_paths] += 1\n self._path_dyn_var[alive_paths] += np.mean(dyn_ep_var, axis=-1)\n self._total_dyn_ep_var += dyn_ep_var.sum()\n\n if self.rollout_mode=='iv_gae':\n self._total_Vs += v_t[self.model_inds].sum()\n self._total_CVs += vc_t[self.model_inds].sum()\n else:\n self._total_Vs += v_t.sum()\n self._total_CVs += vc_t.sum()\n\n self._total_dkl += dyn_dkl_mean*alive_paths.sum()\n self._total_mean_var =+ ens_mean_var*alive_paths.sum()\n\n self._max_dkl = max(self._max_dkl, np.max(dyn_dkl_path))\n self._dyn_dkl_path[alive_paths] += dyn_dkl_path\n self._max_path_return = max(self._max_path_return, np.max(self._path_return))\n\n #### only store one trajectory in buffer \n self.pool.store_multiple(current_obs,\n a,\n next_obs,\n reward,\n v_t,\n v_var,\n c,\n vc_t,\n vc_var,\n np.mean(dyn_ep_var, axis=-1),\n logp_t,\n pi_info_t,\n terminal)\n\n #### terminate mature termination due to path length\n ## update obs before finishing paths (_finish_paths() uses current obs)\n self._current_observation = next_obs\n\n path_end_mask = (self._path_length >= self._max_path_length-1)[alive_paths] \n remaining_paths = self._finish_paths(term_mask=path_end_mask, append_vals=True, append_cvals=True)\n if not remaining_paths.any():\n info['alive_ratio'] = 0\n return next_obs, reward, terminal, info\n\n ## update remaining paths and obs\n if self.rollout_mode=='iv_gae':\n self._current_observation = self._current_observation[:,remaining_paths]\n prem_term_mask = np.any(terminal[:,remaining_paths], axis=0) ##@anyboby maybe check later, if terminal per model should be possible\n else:\n self._current_observation = self._current_observation[remaining_paths]\n self.v_inds = self.v_inds[remaining_paths]\n self.vc_inds = self.vc_inds[remaining_paths]\n self.batch_inds = np.arange(0,np.sum(remaining_paths))\n\n prem_term_mask = terminal\n \n #### terminate real termination due to env end\n remaining_paths = self._finish_paths(term_mask=prem_term_mask, append_vals=False, append_cvals=True)\n if not remaining_paths.any():\n info['alive_ratio'] = 0\n return next_obs, reward, terminal, info\n\n ### update alive paths\n alive_paths = self.pool.alive_paths\n if self.rollout_mode=='iv_gae':\n self._current_observation = self._current_observation[:,remaining_paths]\n else:\n self._current_observation = self._current_observation[remaining_paths]\n self.v_inds = self.v_inds[remaining_paths]\n self.vc_inds = self.vc_inds[remaining_paths]\n self.batch_inds = np.arange(0,np.sum(remaining_paths))\n\n alive_ratio = sum(alive_paths)/self.batch_size\n info['alive_ratio'] = alive_ratio\n\n return next_obs, reward, terminal, info\n\n def compute_td_losses(self, obs):\n # Get outputs from policy\n get_action_outs = self.policy.get_action_outs(obs, factored=True, inc_var=True)\n a = get_action_outs['pi']\n v = get_action_outs['v']\n vc = get_action_outs.get('vc', 0) # Agent may not use cost value func\n next_obs, reward, terminal, info = self.env.step(obs, a)\n reward = np.squeeze(reward)\n c = np.squeeze(info.get('cost', 0))\n\n nv = self.policy.get_v(next_obs, factored=True)\n nvc = self.policy.get_vc(next_obs, factored=True)\n \n td = np.repeat(reward[None], v.shape[0], axis=0) + self.policy.gamma*nv - v\n tdc = np.repeat(c[None], v.shape[0], axis=0) + self.policy.cost_gamma*nvc - vc\n \n ep_td = np.mean(np.var(td, axis=0))\n ep_tdc = np.mean(np.var(tdc, axis=0))\n return ep_td, ep_tdc\n\n\n def _finish_paths(self, term_mask, append_vals=False, append_cvals=False):\n \"\"\"\n terminates paths that are indicated in term_mask. Append_vals should be set to \n True/False to indicate, whether values of the current states of those paths should \n be appended (Note: Premature termination due to environment term should not \n include appended values, while Mature termination upon path length excertion should \n include appended values)\n\n Warning! throws error if trying to terminate an already terminated path. \n\n Args:\n term_mask: Mask with the shape of the currently alive paths that indicates which \n paths should be termianted\n append_vals: True/False whether values of the current state should be appended\n \n Returns: \n remaining_mask: A Mask that indicates the remaining alive paths. Has the same shape \n as the arg term_mask\n \"\"\"\n if not term_mask.any():\n return np.logical_not(term_mask)\n\n # We do not count env time out (mature termination) as true terminal state, append values\n if append_vals:\n if self.rollout_mode=='iv_gae':\n last_val = self.policy.get_v(self._current_observation[:,term_mask], factored=False)\n else:\n last_val = self.policy.get_v(self._current_observation[term_mask], factored=True)[self.v_inds[term_mask],np.arange(term_mask.sum())]\n else:\n # init final values\n if self.rollout_mode=='iv_gae':\n last_val = np.zeros(shape=(self.ensemble_size, term_mask.sum()))\n else:\n last_val = np.zeros(shape=(term_mask.sum()))\n\n if append_cvals:\n if self.rollout_mode=='iv_gae':\n last_cval = self.policy.get_vc(self._current_observation[:,term_mask], factored=False)\n else:\n last_cval = self.policy.get_vc(self._current_observation[term_mask], factored=True)[self.vc_inds[term_mask],np.arange(term_mask.sum())]\n else:\n # init final values\n if self.rollout_mode=='iv_gae':\n last_cval = np.zeros(shape=(self.ensemble_size, term_mask.sum()))\n else:\n last_cval = np.zeros(shape=(term_mask.sum()))\n\n self.pool.finish_path_multiple(term_mask, last_val, last_cval)\n\n remaining_path_mask = np.logical_not(term_mask)\n\n return remaining_path_mask\n \n def finish_all_paths(self):\n\n alive_paths=self.pool.alive_paths ##any paths that are still alive did not terminate by env\n # init final values and quantify according to termination type\n # Note: we do not count env time out as true terminal state\n if not alive_paths.any(): return self.get_diagnostics()\n\n if alive_paths.any():\n term_mask = np.ones(shape=alive_paths.sum(), dtype=np.bool)\n if self.policy.agent.reward_penalized:\n last_val = self.policy.get_v(self._current_observation, factored=True)[self.v_inds[term_mask],np.arange(term_mask.sum())]\n else:\n last_val = self.policy.get_v(self._current_observation, factored=True)[self.v_inds[term_mask],np.arange(term_mask.sum())]\n last_cval = self.policy.get_vc(self._current_observation, factored=True)[self.vc_inds[term_mask],np.arange(term_mask.sum())]\n\n self.pool.finish_path_multiple(term_mask, last_val, last_cval)\n \n alive_paths = self.pool.alive_paths\n assert alive_paths.sum()==0 ## something went wrong with finishing all paths\n \n return self.get_diagnostics()\n\n def set_max_uncertainty(self, max_uncertainty):\n self.max_uncertainty = max_uncertainty", "from builtins import range\nfrom builtins import object\n# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\nimport numpy as np\nfrom itertools import product\n\nclass FeedForwardNet(object):\n \"\"\"Custom feed-forward network layer.\"\"\"\n def __init__(self, name, in_size, out_shape, layers=1, hidden_dim=32, final_nonlinearity=None, get_uncertainty=False):\n self.name = name\n self.in_size = in_size\n self.out_shape = out_shape\n self.out_size = np.prod(out_shape)\n self.layers = layers\n self.hidden_dim = hidden_dim\n self.final_nonlinearity = (lambda x:x) if final_nonlinearity is None else final_nonlinearity\n self.get_uncertainty = get_uncertainty\n\n self.weights = [None] * layers\n self.biases = [None] * layers\n\n self.params_list = []\n\n with tf.variable_scope(name):\n for layer_i in range(self.layers):\n in_size = self.hidden_dim\n out_size = self.hidden_dim\n if layer_i == 0: in_size = self.in_size\n if layer_i == self.layers - 1: out_size = self.out_size\n self.weights[layer_i] = tf.get_variable(\"weights%d\" % layer_i, [in_size, out_size], initializer=tf.contrib.layers.xavier_initializer())\n self.biases[layer_i] = tf.get_variable(\"bias%d\" % layer_i, [1, out_size], initializer=tf.constant_initializer(0.0))\n self.params_list += [self.weights[layer_i], self.biases[layer_i]]\n\n def __call__(self, x, stop_params_gradient=False, is_eval=True, ensemble_idxs=None, pre_expanded=None, reduce_mode=\"none\"):\n original_shape = tf.shape(x)\n h = tf.reshape(x, [-1, self.in_size])\n for layer_i in range(self.layers):\n nonlinearity = tf.nn.relu if layer_i + 1 < self.layers else self.final_nonlinearity\n if stop_params_gradient: h = nonlinearity(tf.matmul(h, tf.stop_gradient(self.weights[layer_i])) + tf.stop_gradient(self.biases[layer_i]))\n else: h = nonlinearity(tf.matmul(h, self.weights[layer_i]) + self.biases[layer_i])\n if len(self.out_shape) > 0: h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant(self.out_shape)], -1))\n else: h = tf.reshape(h, original_shape[:-1])\n if pre_expanded is None: pre_expanded = ensemble_idxs is not None\n if reduce_mode == \"none\" and not pre_expanded and self.get_uncertainty:\n if len(self.out_shape) > 0: h = tf.expand_dims(h, -2)\n else: h = tf.expand_dims(h, -1)\n return h\n\n def l2_loss(self):\n return tf.add_n([tf.reduce_sum(.5 * tf.square(mu)) for mu in self.params_list])\n\nclass BayesianDropoutFeedForwardNet(FeedForwardNet):\n \"\"\"Custom feed-forward network layer, with dropout as a Bayesian approximation.\"\"\"\n def __init__(self, name, in_size, out_shape, layers=1, hidden_dim=32, final_nonlinearity=None, get_uncertainty=False, keep_prob=.5, eval_sample_count=2, consistent_random_seed=False):\n super(BayesianDropoutFeedForwardNet, self).__init__(name, in_size, out_shape, layers=layers, hidden_dim=hidden_dim,\n final_nonlinearity=final_nonlinearity, get_uncertainty=get_uncertainty)\n self.keep_prob = keep_prob\n self.eval_sample_count = eval_sample_count\n if eval_sample_count < 2: raise Exception(\"eval_sample_count must be at least 2 to estimate uncertainty\")\n self.dropout_seed = tf.random_uniform([layers], maxval=1e18, dtype=tf.int64) if consistent_random_seed else [None] * layers\n\n def __call__(self, x, stop_params_gradient=False, is_eval=True, pre_expanded=False, ensemble_idxs=None, reduce_mode=\"none\"):\n if is_eval:\n x = tf.tile(tf.expand_dims(x,0), tf.concat([tf.constant([self.eval_sample_count]), tf.ones_like(tf.shape(x))], 0))\n original_shape = tf.shape(x)\n h = tf.reshape(x, [-1, self.in_size])\n for layer_i in range(self.layers):\n nonlinearity = tf.nn.relu if layer_i + 1 < self.layers else self.final_nonlinearity\n if layer_i > 0: h = tf.nn.dropout(h, keep_prob=self.keep_prob, seed=self.dropout_seed[layer_i])\n if stop_params_gradient: h = nonlinearity(tf.matmul(h, tf.stop_gradient(self.weights[layer_i])) + tf.stop_gradient(self.biases[layer_i]))\n else: h = nonlinearity(tf.matmul(h, self.weights[layer_i]) + self.biases[layer_i])\n if len(self.out_shape) > 0: h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant(self.out_shape)], -1))\n else: h = tf.reshape(h, original_shape[:-1])\n if is_eval:\n h, uncertainty = tf.nn.moments(h, 0)\n if self.get_uncertainty: return h, uncertainty\n else: return h\n else:\n return h\n\n\nclass EnsembleFeedForwardNet(FeedForwardNet):\n \"\"\"Custom feed-forward network layer with an ensemble.\"\"\"\n def __init__(self, name, in_size, out_shape, layers=1, hidden_dim=32, final_nonlinearity=None, get_uncertainty=False, ensemble_size=2, train_sample_count=2, eval_sample_count=2):\n if train_sample_count > ensemble_size: raise Exception(\"train_sample_count cannot be larger than ensemble size\")\n if eval_sample_count > ensemble_size: raise Exception(\"eval_sample_count cannot be larger than ensemble size\")\n self.name = name\n self.in_size = in_size\n self.out_shape = out_shape\n self.out_size = np.prod(out_shape)\n self.layers = layers\n self.hidden_dim = hidden_dim\n self.final_nonlinearity = (lambda x:x) if final_nonlinearity is None else final_nonlinearity\n self.get_uncertainty = get_uncertainty\n self.ensemble_size = ensemble_size\n self.train_sample_count = train_sample_count\n self.eval_sample_count = eval_sample_count\n\n self.weights = [None] * layers\n self.biases = [None] * layers\n\n self.params_list = []\n\n with tf.variable_scope(name):\n for layer_i in range(self.layers):\n in_size = self.hidden_dim\n out_size = self.hidden_dim\n if layer_i == 0: in_size = self.in_size\n if layer_i == self.layers - 1: out_size = self.out_size\n self.weights[layer_i] = tf.get_variable(\"weights%d\" % layer_i, [ensemble_size, in_size, out_size], initializer=tf.contrib.layers.xavier_initializer())\n self.biases[layer_i] = tf.get_variable(\"bias%d\" % layer_i, [ensemble_size, out_size], initializer=tf.constant_initializer(0.0))\n self.params_list += [self.weights[layer_i], self.biases[layer_i]]\n\n def __call__(self, x, stop_params_gradient=False, is_eval=True, ensemble_idxs=None, pre_expanded=None, reduce_mode=\"none\"):\n if pre_expanded is None: pre_expanded = ensemble_idxs is not None\n if ensemble_idxs is None:\n ensemble_idxs = tf.random_shuffle(tf.range(self.ensemble_size))\n ensemble_sample_n = self.eval_sample_count if is_eval else self.train_sample_count\n ensemble_idxs = ensemble_idxs[:ensemble_sample_n]\n else:\n ensemble_sample_n = tf.shape(ensemble_idxs)[0]\n\n weights = [tf.gather(w, ensemble_idxs, axis=0) for w in self.weights]\n biases = [tf.expand_dims(tf.gather(b, ensemble_idxs, axis=0),0) for b in self.biases]\n\n original_shape = tf.shape(x)\n if pre_expanded: h = tf.reshape(x, [-1, ensemble_sample_n, self.in_size])\n else: h = tf.tile(tf.reshape(x, [-1, 1, self.in_size]), [1, ensemble_sample_n, 1])\n for layer_i in range(self.layers):\n nonlinearity = tf.nn.relu if layer_i + 1 < self.layers else self.final_nonlinearity\n if stop_params_gradient: h = nonlinearity(tf.einsum('bri,rij->brj', h, tf.stop_gradient(weights[layer_i])) + tf.stop_gradient(biases[layer_i]))\n else: h = nonlinearity(tf.einsum('bri,rij->brj', h, weights[layer_i]) + biases[layer_i])\n\n if pre_expanded:\n if len(self.out_shape) > 0: h = tf.reshape(\n h, \n tf.concat([original_shape[:-1], \n tf.constant(self.out_shape)], -1)\n )\n else: h = tf.reshape(h, original_shape[:-1])\n else:\n if len(self.out_shape) > 0: h = tf.reshape(\n h,\n tf.concat([original_shape[:-1],\n tf.constant([ensemble_sample_n]),\n tf.constant(self.out_shape)], -1)\n )\n else: h = tf.reshape(\n h, \n tf.concat([original_shape[:-1], \n tf.constant([ensemble_sample_n])], -1)\n )\n\n if reduce_mode == \"none\":\n pass\n elif reduce_mode == \"random\":\n if len(self.out_shape) > 0: h = tf.reduce_sum(\n h * tf.reshape(\n tf.one_hot(tf.random_uniform(\n [tf.shape(h)[0]],\n 0, \n ensemble_sample_n, dtype=tf.int64), \n ensemble_sample_n\n ), \n tf.concat(\n [tf.shape(h)[:1], \n tf.ones_like(tf.shape(h)[1:-2]), \n tf.constant([ensemble_sample_n]), \n tf.constant([1])], 0\n )\n ), \n -2\n )\n else: h = tf.reduce_sum(\n h * tf.reshape(\n tf.one_hot(\n tf.random_uniform(\n [tf.shape(h)[0]], \n 0, \n ensemble_sample_n, dtype=tf.int64\n ), \n ensemble_sample_n\n ), \n tf.concat(\n [tf.shape(h)[:1], \n tf.ones_like(tf.shape(h)[1:-1]), \n tf.constant([ensemble_sample_n])], \n 0\n )\n ),\n -1\n )\n elif reduce_mode == \"mean\":\n if len(self.out_shape) > 0: h = tf.reduce_mean(h, -2)\n else: h = tf.reduce_mean(h, -1)\n else: raise Exception(\"use a valid reduce mode: none, random, or mean\")\n\n return h\n\n\nclass ReparamNormal(object):\n \"\"\"Wrapper to make a feedforward network that outputs both mu and logsigma,\n for use in the reparameterization trick.\"\"\"\n def __init__(self, base_net, name, in_size, out_shape, layers=2, hidden_dim=32, final_nonlinearity=None, ls_start_bias=0.0, final_net=FeedForwardNet, logsigma_min=-5., logsigma_max=2., **kwargs):\n assert layers > 1\n self.main_encoder = base_net(name+\"_base\", in_size, [hidden_dim], layers, hidden_dim, final_nonlinearity=tf.nn.relu, **kwargs)\n self.mu = final_net(name+\"_mu\", hidden_dim, out_shape, layers=1, final_nonlinearity=final_nonlinearity, **kwargs)\n self.logsigma = final_net(name+\"_logsigma\", hidden_dim, out_shape, layers=1, final_nonlinearity=None, **kwargs)\n self.ls_start_bias = ls_start_bias\n self.params_list = self.main_encoder.params_list + self.mu.params_list + self.logsigma.params_list\n self.logsigma_min = logsigma_min\n self.logsigma_max = logsigma_max\n\n def __call__(self, x):\n encoded = self.main_encoder(x)\n mu = self.mu(encoded)\n logsigma = tf.clip_by_value(self.logsigma(encoded) + self.ls_start_bias, self.logsigma_min, self.logsigma_max)\n return mu, logsigma\n\n def l2_loss(self):\n return self.main_encoder.l2_loss() + self.mu.l2_loss() + self.logsigma.l2_loss()\n", "\"\"\"\n\nSome simple logging functionality, vendored from Spinning Up's utilities.\n\n\"\"\"\nimport json\nimport joblib\nimport shutil\nimport numpy as np\nimport time\nimport tensorflow as tf\nfrom copy import deepcopy\nimport os.path as osp, time, atexit, os\nfrom softlearning.policies.safe_utils.mpi_tools import proc_id, mpi_statistics_scalar\nfrom softlearning.policies.safe_utils.serialization_utils import convert_json\nfrom softlearning.environments.utils import get_environment_from_params\nfrom softlearning.environments.gym import mujoco_safety_gym\n\nfrom tensorflow.python.lib.io import file_io ###@anyboby not good!\nimport traceback\nfrom gym import wrappers\n\nDEFAULT_DATA_DIR = osp.join(osp.abspath(osp.dirname(osp.dirname(osp.dirname(__file__)))),'data')\n\ncolor2num = dict(\n gray=30,\n red=31,\n green=32,\n yellow=33,\n blue=34,\n magenta=35,\n cyan=36,\n white=37,\n crimson=38\n)\n\ndef colorize(string, color, bold=False, highlight=False):\n \"\"\"\n Colorize a string.\n\n This function was originally written by John Schulman.\n \"\"\"\n attr = []\n num = color2num[color]\n if highlight: num += 10\n attr.append(str(num))\n if bold: attr.append('1')\n return '\\x1b[%sm%s\\x1b[0m' % (';'.join(attr), string)\n\ndef load_policy(fpath, itr='last', deterministic=False):\n\n # handle which epoch to load from\n if itr=='last':\n saves = [int(x[11:]) for x in os.listdir(fpath) if 'simple_save' in x and len(x)>11]\n itr = '%d'%max(saves) if len(saves) > 0 else ''\n else:\n itr = '%d'%itr\n\n # load the things!\n gpu_options = tf.GPUOptions(allow_growth=True)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n tf.keras.backend.set_session(sess)\n sess = tf.keras.backend.get_session()\n \n #sess = tf.Session(graph=tf.Graph())\n \n saver = Saver()\n model = saver.restore_tf_graph(sess, fpath)\n\n # get the correct op for executing actions\n if deterministic and 'mu' in model.keys():\n # 'deterministic' is only a valid option for SAC policies\n print('Using deterministic action op.')\n action_op = model['mu']\n else:\n print('Using default action op.')\n action_op = model['pi']\n\n # make function for producing an action given a single state\n get_action = lambda x : sess.run(action_op, feed_dict={model['x']: x})\n\n # try to load environment from save\n # (sometimes this will fail because the environment could not be pickled)\n try:\n state = joblib.load(osp.join(fpath, 'vars'+itr+'.pkl'))\n env = state['env']\n except:\n environment_params = {}\n environment_params['universe'] = 'gym'\n environment_params['task'] = 'v2'\n environment_params['domain'] = 'HumanoidSafe'\n environment_params['kwargs'] = {}\n env = get_environment_from_params(environment_params)\n # env = wrappers.Monitor(env, '/home/uvday/ray_mbpo/AntSafe/', force = True)\n\n return env, get_action, sess\n\nclass Saver:\n def __init__(self, verbose=False):\n self.verbose = verbose\n \n def init_saver(self, scope):\n var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)\n self.saver = tf.train.Saver(\n var_list= var_list,\n sharded=True,\n allow_empty=True)\n self.builder = None \n\n def restore_tf_graph(self, sess, fpath):\n \"\"\"\n Loads graphs saved by Logger.\n\n Will output a dictionary whose keys and values are from the 'inputs' \n and 'outputs' dict you specified with logger.setup_tf_saver().\n\n Args:\n sess: A Tensorflow session.\n fpath: Filepath to save directory.\n\n Returns:\n A dictionary mapping from keys to tensors in the computation graph\n loaded from ``fpath``. \n \"\"\"\n tf.saved_model.loader.load(\n sess,\n [tf.saved_model.tag_constants.SERVING],\n fpath\n )\n model_info = joblib.load(osp.join(fpath, 'model_info.pkl'))\n graph = sess.graph #tf.get_default_graph()\n model = dict()\n model.update({k: graph.get_tensor_by_name(v) for k,v in model_info['inputs'].items()})\n model.update({k: graph.get_tensor_by_name(v) for k,v in model_info['outputs'].items()})\n return model\n\n def save_config(self, config, config_dir, exp_name='CPO_config'):\n \"\"\"\n Log an experiment configuration.\n\n Call this once at the top of your experiment, passing in all important\n config vars as a dict. This will serialize the config to JSON, while\n handling anything which can't be serialized in a graceful way (writing\n as informative a string as possible). \n\n Example use:\n\n .. code-block:: python\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n \"\"\"\n config_json = convert_json(config)\n if exp_name is not None:\n config_json['exp_name'] = exp_name\n if proc_id()==0:\n output = json.dumps(config_json, separators=(',',':\\t'), indent=4, sort_keys=True)\n print(colorize('Saving config:\\n', color='cyan', bold=True))\n print(output)\n with open(osp.join(config_dir, \"config.json\"), 'w') as out:\n out.write(output)\n\n def save_state(self, state_dict, output_dir, itr=None):\n \"\"\"\n\n Saves the state of an experiment.\n\n To be clear: this is about saving *state*, not logging diagnostics.\n All diagnostic logging is separate from this function. This function\n will save whatever is in ``state_dict``---usually just a copy of the\n environment---and the most recent parameters for the model you \n previously set up saving for with ``setup_tf_saver``. \n\n Call with any frequency you prefer. If you only want to maintain a\n single state and overwrite it at each call with the most recent \n version, leave ``itr=None``. If you want to keep all of the states you\n save, provide unique (increasing) values for 'itr'.\n\n Args:\n state_dict (dict): Dictionary containing essential elements to\n describe the current state of training.\n\n output_dir: the target directory\n\n itr: An int, or None. Current iteration of training.\n\n Return:\n state_path: returns the full path to the saved file (the target \n directory + extension)\n \"\"\"\n\n ### dump state\n fname = 'vars.pkl' if itr is None else 'vars%d.pkl'%itr\n state_path = osp.join(output_dir, fname)\n try:\n joblib.dump(state_dict, state_path)\n except:\n print('Warning: could not pickle state_dict.')\n\n return state_path\n \n def save_tf (self, sess, inputs, outputs, output_dir, shards=1):\n \"\"\"\n Uses simple_save to save a trained model, plus info to make it easy\n to associate tensors to variables after restore. \n\n Args:\n sess: The Tensorflow session in which you train your computation\n graph.\n\n inputs (dict): A dictionary that maps from keys of your choice\n to the tensorflow placeholders that serve as inputs to the \n computation graph. Make sure that *all* of the placeholders\n needed for your outputs are included!\n\n outputs (dict): A dictionary that maps from keys of your choice\n to the outputs from your computation graph.\n\n output_dir: the target directory\n\n #itr: An int, or None. Current iteration of training.\n\n Returns: \n fpath: the path to the saved tf model\n\n model_info_path('*.pkl'): the path to the saved model_info dict\n \"\"\"\n ### save tf\n tf_saver_elements = dict(session=sess, inputs=inputs, outputs=outputs)\n tf_saver_info = {'inputs': {k:v.name for k,v in inputs.items()},\n 'outputs': {k:v.name for k,v in outputs.items()}}\n\n fpath = ''#'simple_save' + ('%d'%itr if itr is not None else '')\n fpath = osp.join(output_dir, fpath)\n if osp.exists(fpath):\n # simple_save refuses to be useful if fpath already exists,\n # so just delete fpath if it's there.\n shutil.rmtree(fpath)\n \n ##### @anyboby saving with builder since simple_save seemed to increase\n # chkpt size by adding save op every time\n try:\n builder = self._maybe_create_builder(self.builder, sess, fpath, inputs, outputs)\n builder.save(as_text=False)\n if self.verbose:\n print(\" SavedModel graph written successfully. \" )\n success = True\n except Exception as e:\n print(\" WARNING::SavedModel write FAILED. \" )\n traceback.print_tb(e.__traceback__)\n success = False\n\n #tf.saved_model.simple_save(export_dir=fpath, **tf_saver_elements)\n \n ### save model info\n model_info_path = osp.join(fpath, 'model_info.pkl')\n joblib.dump(tf_saver_info, model_info_path)\n\n return fpath, model_info_path, success\n\n def _maybe_create_builder(self, builder, sess, export_dir, inputs, outputs):\n \"\"\"\n hacky, but doesn't create a new savedmodelbuilder witch each call, but instead \n overwrites export_dir in the SavedModelBuilder. \n \"\"\"\n if builder:\n if file_io.file_exists(export_dir):\n if file_io.list_directory(export_dir):\n raise AssertionError(\n \"Export directory already exists, and isn't empty. Please choose \"\n \"a different export directory, or delete all the contents of the \"\n \"specified directory: %s\" % export_dir)\n else:\n file_io.recursive_create_dir(export_dir)\n \n builder._export_dir = export_dir\n return builder\n else:\n builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_dir=export_dir)\n signature_def_map = {\n tf.compat.v1.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n tf.compat.v1.saved_model.signature_def_utils.predict_signature_def(inputs, outputs)\n }\n assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS)\n builder.add_meta_graph_and_variables(sess,\n tags= [tf.saved_model.tag_constants.SERVING],\n signature_def_map=signature_def_map,\n assets_collection=assets_collection,\n saver=self.saver)\n return builder\n\nclass Logger:\n \"\"\"\n A general-purpose logger.\n\n Makes it easy to save diagnostics, hyperparameter configurations, the \n state of a training run, and the trained model.\n \"\"\"\n\n def __init__(self, output_dir=None, output_fname='progress.txt', exp_name=None):\n \"\"\"\n Initialize a Logger.\n\n Args:\n output_dir (string): A directory for saving results to. If \n ``None``, defaults to a temp directory of the form\n ``/tmp/experiments/somerandomnumber``.\n\n output_fname (string): Name for the tab-separated-value file \n containing metrics logged throughout a training run. \n Defaults to ``progress.txt``. \n\n exp_name (string): Experiment name. If you run multiple training\n runs and give them all the same ``exp_name``, the plotter\n will know to group them. (Use case: if you run the same\n hyperparameter configuration with multiple random seeds, you\n should give them all the same ``exp_name``.)\n \"\"\"\n # if proc_id()==0:\n # self.output_dir = output_dir or \"/tmp/experiments/%i\"%int(time.time())\n # if osp.exists(self.output_dir):\n # print(\"Warning: Log dir %s already exists! Storing info there anyway.\"%self.output_dir)\n # else:\n # os.makedirs(self.output_dir)\n # self.output_file = open(osp.join(self.output_dir, output_fname), 'w')\n # atexit.register(self.output_file.close)\n # print(colorize(\"Logging data to %s\"%self.output_file.name, 'green', bold=True))\n # else:\n # self.output_dir = None\n # self.output_file = None\n self.first_row=True\n self.log_headers = []\n self.log_current_row = {}\n self.exp_name = exp_name\n\n def log(self, msg, color='green'):\n \"\"\"Print a colorized message to stdout.\"\"\"\n if proc_id()==0:\n print(colorize(msg, color, bold=True))\n\n def log_tabular(self, key, val):\n \"\"\"\n Log a value of some diagnostic.\n\n Call this only once for each diagnostic quantity, each iteration.\n After using ``log_tabular`` to store values for each diagnostic,\n make sure to call ``dump_tabular`` to write them out to file and\n stdout (otherwise they will not get saved anywhere).\n \"\"\"\n if self.first_row:\n self.log_headers.append(key)\n else:\n assert key in self.log_headers, \"Trying to introduce a new key %s that you didn't include in the first iteration\"%key\n assert key not in self.log_current_row, \"You already set %s this iteration. Maybe you forgot to call dump_tabular()\"%key\n self.log_current_row[key] = val\n\n \n def dump_tabular(self, output_dir, print_out=True):\n \"\"\"\n Write all of the diagnostics from the current iteration.\n\n Writes both to stdout, and to the output file.\n\n Returns the current dictionary, if needed for other diagnostic \n purposes. \n\n Be sure to log all diagnostics you want before calling this!\n\n Args:\n fpath: path to the output directory\n\n print_out: set to False if you don't need the prints\n Returns:\n current_diagnostics: dictionary of the current diagnostics status\n \"\"\"\n if proc_id()==0:\n vals = []\n key_lens = [len(key) for key in self.log_headers]\n max_key_len = max(15,max(key_lens))\n keystr = '%'+'%d'%max_key_len\n fmt = \"| \" + keystr + \"s | %15s |\"\n n_slashes = 22 + max_key_len\n if print_out:\n print(\"-\"*n_slashes)\n for key in self.log_headers:\n val = self.log_current_row.get(key, \"\")\n valstr = \"%8.3g\"%val if hasattr(val, \"__float__\") else val\n if print_out:\n print(fmt%(key, valstr))\n vals.append(val)\n if print_out:\n print(\"-\"*n_slashes, flush=True)\n\n output_file = open(osp.join(output_dir, 'diagnostics.txt'), 'w')\n if self.first_row:\n output_file.write(\"\\t\".join(self.log_headers)+\"\\n\")\n output_file.write(\"\\t\".join(map(str,vals))+\"\\n\")\n output_file.flush()\n output_file.close()\n\n current_diagnostics = deepcopy(self.log_current_row)\n self.log_current_row.clear()\n self.first_row=False\n return current_diagnostics\n\nclass EpochLogger(Logger):\n \"\"\"\n A variant of Logger tailored for tracking average values over epochs.\n\n Typical use case: there is some quantity which is calculated many times\n throughout an epoch, and at the end of the epoch, you would like to \n report the average / std / min / max value of that quantity.\n\n With an EpochLogger, each time the quantity is calculated, you would\n use \n\n .. code-block:: python\n\n epoch_logger.store(NameOfQuantity=quantity_value)\n\n to load it into the EpochLogger's state. Then at the end of the epoch, you \n would use \n\n .. code-block:: python\n\n epoch_logger.log_tabular(NameOfQuantity, **options)\n\n to record the desired values.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.epoch_dict = dict()\n\n def store(self, **kwargs):\n \"\"\"\n Save something into the epoch_logger's current state.\n\n Provide an arbitrary number of keyword arguments with numerical \n values.\n \"\"\"\n for k,v in kwargs.items():\n if not(k in self.epoch_dict.keys()):\n self.epoch_dict[k] = []\n self.epoch_dict[k].append(v)\n\n def log_tabular(self, key, val=None, with_min_and_max=False, average_only=False):\n \"\"\"\n @anyboby: this should eventually be replaced by an overall diagnostics method !!!\n \n Log a value or possibly the mean/std/min/max values of a diagnostic.\n\n Args:\n key (string): The name of the diagnostic. If you are logging a\n diagnostic whose state has previously been saved with \n ``store``, the key here has to match the key you used there.\n\n val: A value for the diagnostic. If you have previously saved\n values for this key via ``store``, do *not* provide a ``val``\n here.\n\n with_min_and_max (bool): If true, log min and max values of the \n diagnostic over the epoch.\n\n average_only (bool): If true, do not log the standard deviation\n of the diagnostic over the epoch.\n \"\"\"\n if val is not None:\n super().log_tabular(key,val)\n else:\n v = self.epoch_dict[key]\n if v:\n vals = np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape)>0 else v\n stats = mpi_statistics_scalar(vals, with_min_and_max=with_min_and_max)\n super().log_tabular(key if average_only else key+'Average', stats[0])\n if not(average_only):\n super().log_tabular(key+'Std', stats[1])\n if with_min_and_max:\n super().log_tabular(key+'Max', stats[3])\n super().log_tabular(key+'Min', stats[2])\n self.epoch_dict[key] = []\n\n def get_stats(self, key):\n \"\"\"\n Lets an algorithm ask the logger for mean/std/min/max of a diagnostic.\n \"\"\"\n v = self.epoch_dict[key]\n vals = np.concatenate(v) if isinstance(v[0], np.ndarray) and len(v[0].shape)>0 else v\n return mpi_statistics_scalar(vals)\n\n\ndef setup_logger_kwargs(exp_name, seed=None, data_dir=None, datestamp=True):\n\n # Make base path\n ymd_time = time.strftime(\"%Y-%m-%d_\") if datestamp else ''\n relpath = ''.join([ymd_time, exp_name])\n \n if seed is not None:\n # Make a seed-specific subfolder in the experiment directory.\n if datestamp:\n hms_time = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n subfolder = ''.join([hms_time, '-', exp_name, '_s', str(seed)])\n else:\n subfolder = ''.join([exp_name, '_s', str(seed)])\n relpath = osp.join(relpath, subfolder)\n\n data_dir = data_dir or DEFAULT_DATA_DIR\n logger_kwargs = dict(output_dir=osp.join(data_dir, relpath), \n exp_name=exp_name)\n return logger_kwargs" ]
[ [ "numpy.logical_not", "numpy.arange", "numpy.squeeze", "numpy.tile", "numpy.max", "numpy.mean", "numpy.zeros_like", "numpy.any", "numpy.var", "numpy.repeat", "numpy.zeros", "numpy.sum", "numpy.random.randint" ], [ "tensorflow.matmul", "tensorflow.constant", "tensorflow.range", "tensorflow.shape", "tensorflow.reduce_mean", "tensorflow.nn.moments", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.stop_gradient", "tensorflow.constant_initializer", "tensorflow.gather", "tensorflow.einsum", "tensorflow.contrib.layers.xavier_initializer", "numpy.prod", "tensorflow.square", "tensorflow.variable_scope", "tensorflow.random_uniform", "tensorflow.nn.dropout" ], [ "tensorflow.compat.v1.saved_model.signature_def_utils.predict_signature_def", "tensorflow.keras.backend.set_session", "tensorflow.python.lib.io.file_io.recursive_create_dir", "tensorflow.get_collection", "tensorflow.keras.backend.get_session", "tensorflow.python.lib.io.file_io.file_exists", "tensorflow.ConfigProto", "numpy.concatenate", "tensorflow.python.lib.io.file_io.list_directory", "tensorflow.compat.v1.saved_model.builder.SavedModelBuilder", "tensorflow.GPUOptions", "tensorflow.train.Saver", "tensorflow.saved_model.loader.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
solsword/fleece
[ "f2259b67add9a660cc3185cb89681520d0e61b33" ]
[ "ngen.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nGenerates images that resemble input images.\n\nStacked denoising autoencoder-style neural network code using theano.\n\nWritten with reference to tutorial code from:\n https://github.com/lisa-lab/DeepLearningTutorials.git\nTutorial page:\n http://deeplearning.net/tutorial/SdA.html\n\"\"\"\n\nimport os\nimport sys\nimport timeit\nimport datetime\nimport functools\nimport copy\n\nimport gzip\nimport pickle\n\nimport numpy\n\nimport theano\nimport theano.tensor as T\n\nfrom PIL import Image\n\ndef debug(*args, **kwargs):\n print(*args, **kwargs)\n\ndef load_data(filename=\"data/examples.pkl.gz\"):\n '''\n Loads pickled gzipped data (see pdata.py).\n '''\n debug(\"... loading data ...\")\n\n # Load the dataset\n with gzip.open(filename, 'rb') as fin:\n dataset = pickle.load(fin)\n\n # format: a dictionary with three keys:\n # \"examples\": a numpy.ndarray with 2 dimensions where each row is an\n # example\n # \"palette\": a dictionary mapping colors to integers\n # \"r_palette\": the reverse of the palette, mapping from integers to colors\n\n dataset[\"examples\"] = numpy.array([\n explode_example(ex, len(dataset[\"palette\"]))\n for ex in dataset[\"examples\"]\n ])\n\n dataset[\"examples\"] = theano.shared(\n numpy.asarray(dataset[\"examples\"], dtype=theano.config.floatX),\n borrow = False\n )\n\n return dataset\n\nclass NeuroLayer:\n \"\"\"\n A layer of neurons whose weights can be used for both interpretation and\n reconstruction. It also has functions for training to denoise on a given\n input.\n \"\"\"\n\n def __init__(self, numpy_rng, input_size, output_size):\n # RNG for adding noise to input:\n self.theano_rng = theano.tensor.shared_randomstreams.RandomStreams(\n numpy_rng.randint(2 ** 30)\n )\n\n self.input_size = input_size\n self.output_size = output_size\n\n # Weights and offsets for deconstruction (input -> output):\n wsx = int(numpy_rng.uniform(low=0, high=input_size))\n wsy = int(numpy_rng.uniform(low=0, high=output_size))\n self.weights = theano.shared(\n value=numpy.asarray(\n numpy_rng.uniform(\n low=-4 * numpy.sqrt(6. / (input_size + output_size)),\n high=4 * numpy.sqrt(6. / (input_size + output_size)),\n size=(input_size*2, output_size*2)\n )[wsx:wsx+input_size,wsy:wsy+output_size],\n dtype=theano.config.floatX\n ),\n name='weights',\n borrow=True\n )\n\n self.de_offsets = theano.shared(\n value=numpy.zeros(\n output_size,\n dtype=theano.config.floatX\n ),\n name='de_offsets',\n borrow=True\n )\n\n # Offsets for reconstruction (output -> input):\n # Note that the weights are shared for both directions.\n self.re_offsets = theano.shared(\n value=numpy.zeros(\n input_size,\n dtype=theano.config.floatX\n ),\n name='re_offsets',\n borrow=True\n )\n\n self.params = [\n self.weights,\n self.de_offsets,\n self.re_offsets\n ]\n\n # Evaluation functions:\n def get_deconstruct(self, input):\n return T.nnet.sigmoid(\n T.dot(input, self.weights) + self.de_offsets\n )\n\n def get_reconstruct(self, output):\n return T.nnet.sigmoid(\n T.dot(output, self.weights.T) + self.re_offsets\n )\n\n # Training functions:\n def get_cost_and_updates(self, input, corruption, learning_rate):\n \"\"\"\n Returns a theano expression for the cost function of the network on the\n given input, along with an update list for updating the weights based on\n the cost gradient.\n \"\"\"\n\n corrupt = self.theano_rng.binomial(\n size=input.shape,\n n=1, # number of trials\n p=1 - corruption, # probability of success per trial\n dtype=theano.config.floatX\n )\n static = self.theano_rng.random_integers(\n size=input.shape,\n low=0,\n high=1\n )\n corrupted = (\n corrupt * input\n + (1 - corrupt) * static\n )\n\n rep = self.get_deconstruct(corrupted) # internal representation\n rec = self.get_reconstruct(rep) # reconstructed input\n #cost = T.sum(input * T.log(rec) + (1 - input) * T.log(1 - rec))\n cost = T.sum((input - rec) ** 2)\n\n # the gradients of the cost w/r/t/ each parameter:\n gradients = T.grad(cost, self.params)\n # generate the list of updates\n updates = [\n (param, param - learning_rate * gr)\n for param, gr in zip(self.params, gradients)\n ] + self.theano_rng.updates()\n\n return (cost, updates)\n\nclass NeuralNet:\n \"\"\"\n A stack of auto-encoders.\n \"\"\"\n def __init__(self, numpy_rng, input_size, layer_sizes, output_size):\n self.rng = numpy_rng\n self.input_size = input_size\n self.output_size = output_size\n self.layers = []\n i_size = input_size\n for i in range(len(layer_sizes)):\n o_size = layer_sizes[i]\n self.layers.append(NeuroLayer(numpy_rng, i_size, o_size))\n i_size = o_size\n self.layers.append(NeuroLayer(numpy_rng, i_size, output_size))\n\n def get_deconstruct(self, input, limit=-1):\n result = input\n if limit < 0:\n limit = len(self.layers)\n for i in range(limit):\n result = self.layers[i].get_deconstruct(result)\n return result\n\n def get_reconstruct(self, output, limit=-1):\n result = output\n if limit < 0:\n limit = len(self.layers)\n for i in range(limit-1, -1, -1):\n result = self.layers[i].get_reconstruct(result)\n return result\n\n def get_training_functions(self, corruption_rates, ae_learning_rates):\n \"\"\"\n Returns a theano shared variable for use as input and a list of functions\n for training each layer of the stack.\n \"\"\"\n functions = []\n training_input = T.vector(name=\"training_input\", dtype=theano.config.floatX)\n for i in range(len(self.layers)):\n inp = self.get_deconstruct(training_input, limit=i)\n cost_function, updates = self.layers[i].get_cost_and_updates(\n inp,\n corruption_rates[i],\n ae_learning_rates[i]\n )\n functions.append(\n theano.function(\n inputs = [training_input],\n outputs = cost_function,\n updates = updates,\n name = \"training_function_layer_{}\".format(i)\n )\n )\n return functions\n\n def get_specialization_function(self, input, cv_extract, learning_rate):\n \"\"\"\n Returns a theano function that uses an example to specialize the network by\n training it to predict the region of the input selected by the given\n cv_extract function.\n \"\"\"\n pfunc = self.get_deconstruct(input)\n\n compare_to = cv_extract(input)\n\n cost = T.sum((compare_to - pfunc) ** 2)\n\n params = []\n for l in self.layers:\n params.extend(l.params[:-1]) # ignore the reconstruction offsets\n\n gradients = T.grad(cost, params)\n\n # generate the list of updates\n updates = [\n (param, param - learning_rate * gr)\n for param, gr in zip(params, gradients)\n ]\n # TODO: are these really unnecessary here?\n # + [l.theano_rng.updates() for l in self.layers]\n\n return theano.function(\n inputs = [input],\n outputs = cost,\n updates = updates,\n name = \"specialization_function\"\n )\n\n def pretrain(self, examples, epoch_counts, corruption_rates, learning_rates):\n \"\"\"\n Trains the network for autoencoding on the given examples, given lists of\n epoch counts, corruption rates, and learning rates each equal in length to\n the number of layers in the stack.\n \"\"\"\n tfs = self.get_training_functions(corruption_rates, learning_rates)\n indices = list(range(examples.get_value(borrow=True).shape[0]))\n start_time = timeit.default_timer()\n for i in range(len(self.layers)):\n # TODO: batches?\n for epoch in range(epoch_counts[i]):\n self.rng.shuffle(indices)\n costs = []\n for j in indices:\n cost = tfs[i](examples.get_value(borrow=True)[j].reshape(-1))\n costs.append(cost)\n debug(\n \"... [{}] epoch {: 3d} at layer {: 2d} done {} ...\".format(\n str(datetime.timedelta(seconds=timeit.default_timer()-start_time)),\n epoch + 1,\n i,\n \"(min/avg cost {:0.3f}/{:0.3f})\".format(\n float(min(costs)),\n float(sum(costs)/float(len(costs))),\n )\n )\n )\n\n def train(self, examples, cv_extract, epochs, learning_rate):\n \"\"\"\n Specializes the network for prediction on the given examples, using the\n given center extract function, the given number of epochs, and the given\n learning rate.\n \"\"\"\n input = T.vector(name=\"training_input\", dtype=theano.config.floatX)\n tf = self.get_specialization_function(input, cv_extract, learning_rate)\n indices = list(range(examples.get_value(borrow=True).shape[0]))\n start_time = timeit.default_timer()\n # TODO: batches?\n for epoch in range(epochs):\n self.rng.shuffle(indices)\n costs = []\n for j in indices:\n cost = tf(examples.get_value(borrow=True)[j].reshape(-1))\n costs.append(cost)\n debug(\n \"... [{}] epoch {: 3d} done {} ...\".format(\n str(datetime.timedelta(seconds=timeit.default_timer()-start_time)),\n epoch + 1,\n \"(min/avg cost {:0.3f}/{:0.3f})\".format(\n float(float(min(costs))),\n float(float(sum(costs)/float(len(costs))))\n )\n )\n )\n\ndef get_central_values(flat_input, input_size, center_size, palette_size):\n \"\"\"\n Takes a flat array which is assumed to represent input_size by input_size by\n palette_size data, and returns a flat array that represents the center_size\n by center_size central values of the original array.\n \"\"\"\n lc = input_size//2 - center_size//2\n rs = flat_input.reshape((input_size, input_size, palette_size))\n sel = rs[lc:lc+center_size, lc:lc+center_size, :]\n return sel.reshape([-1])\n\ndef explode_example(data, n_layers):\n \"\"\"\n Returns an array with an extra dimension that encodes that data in the given\n array as a one-hot encoding. The values in the array should all be between 0\n and n_layers (exclusive).\n \"\"\"\n result = numpy.zeros(\n list(data.shape) + [n_layers],\n dtype=theano.config.floatX\n )\n rs = data.reshape(-1)\n for i, x in enumerate(rs):\n coords = []\n irem = i\n for j in range(len(data.shape)):\n if data.shape[j+1:]:\n b = functools.reduce(lambda x, y: x*y, data.shape[j+1:], 1)\n coords.append(irem // b)\n irem = irem % b\n else:\n coords.append(irem)\n result[tuple(coords + [x])] = 1\n return result\n\ndef implode_result(data):\n \"\"\"\n Returns an array with one fewer dimension than the input, where the input's\n final dimension is taken to represent a one-hot encoding of the desired data.\n \"\"\"\n dshape = data.shape[:-1]\n n_layers = data.shape[-1]\n\n result = numpy.zeros(dshape, dtype=theano.config.floatX)\n rs = data.reshape(-1, n_layers)\n\n for i, enc in enumerate(rs):\n coords = []\n irem = i\n for j in range(len(dshape)):\n if data.shape[j+1:]:\n b = functools.reduce(lambda x, y: x*y, dshape[j+1:], 1)\n coords.append(irem // b)\n irem = irem % b\n else:\n coords.append(irem)\n result[tuple(coords)] = numpy.argmax(enc)\n return result\n\ndef fake_palette(size):\n result = {}\n fp = [\n (0xdd, 0x00, 0x00),\n (0xee, 0x99, 0x00),\n (0xff, 0xee, 0x00),\n (0x00, 0x99, 0x00),\n (0x11, 0x22, 0xee),\n (0x00, 0x00, 0x55),\n (0x55, 0x00, 0x99),\n ]\n for i in range(size):\n inc = 0x33 * (i // len(fp))\n e = fp[i%len(fp)]\n result[i] = (\n min(e[0] + inc, 0xff),\n min(e[1] + inc, 0xff),\n min(e[2] + inc, 0xff)\n )\n return result\n\ndef build_network(\n examples,\n window_size=8,\n predict_size=2,\n palette_size=16,\n batch_size = 1, # TODO: Implement this\n #layer_sizes = (0.2,),\n #ae_epochs = (1,1,),# (30,),\n #corruption_rates = (0.3,0.3,),\n #ae_learning_rates = (0.05,0.05,), # (0.005,)\n #sp_epochs = 1,\n #sp_learning_rate = 0.05,\n #layer_sizes = (0.7,),\n #ae_epochs = (5,5,),# (30,),\n #corruption_rates = (0.3,0.3,),\n #ae_learning_rates = (0.05,0.05,), # (0.005,)\n #sp_epochs = 5,\n #sp_learning_rate = 0.05,\n #layer_sizes = (0.8,0.5),\n #ae_epochs = (12, 12, 12),\n #corruption_rates = (0.3, 0.3, 0.2),\n #ae_learning_rates = (0.05, 0.05, 0.05),\n #sp_epochs = 20,\n #sp_learning_rate = 0.05,\n layer_sizes = (0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1),\n ae_epochs = (14, 14, 14, 14, 14, 14, 14, 14, 14, 14),\n corruption_rates = (0.4, 0.3, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2),\n ae_learning_rates = (\n 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04\n ),\n sp_epochs = 20,\n sp_learning_rate = 0.05,\n):\n \"\"\"\n Builds and trains a network for recognizing image fragments.\n \"\"\"\n # Calculate input and layer sizes:\n input_size = window_size * window_size * palette_size\n hidden_sizes = [int(input_size * ls) for ls in layer_sizes]\n final_size = predict_size * predict_size * palette_size\n\n # Calculate the number of training batches:\n n_train_batches = examples.get_value(borrow=True).shape[0]\n n_train_batches //= batch_size\n\n # Set up the stacked denoising autoencoders:\n numpy_rng = numpy.random.RandomState(465746)\n net = NeuralNet(\n numpy_rng=numpy_rng,\n input_size = input_size,\n layer_sizes = hidden_sizes,\n output_size = final_size\n )\n\n # Visualize the network pre-training:\n vis_network(\n net,\n fake_palette(palette_size),\n window_size=window_size,\n outfile=\"vis-pre.png\"\n )\n\n # Train the network for autoencoding:\n debug(\"... pretraining the network ...\")\n start_time = timeit.default_timer()\n net.pretrain(\n examples,\n ae_epochs,\n corruption_rates,\n ae_learning_rates\n )\n end_time = timeit.default_timer()\n debug(\n \"... pretraining finished in {} ...\".format(\n str(datetime.timedelta(seconds=end_time - start_time))\n )\n )\n\n # Specialize the network for generation:\n debug(\"... specializing the network ...\")\n start_time = timeit.default_timer()\n net.train(\n examples,\n lambda a: get_central_values(a, window_size, predict_size, palette_size),\n sp_epochs,\n sp_learning_rate\n )\n end_time = timeit.default_timer()\n debug(\n \"... specialization finished in {} ...\".format(\n str(datetime.timedelta(seconds=end_time - start_time))\n )\n )\n\n return net\n\ndef write_image(data, palette, outdir, outfile):\n size = data.shape\n img = Image.new(\"RGB\", size)\n pixels = img.load()\n\n for x in range(size[0]):\n for y in range(size[1]):\n idx = int(data[x, y])\n if idx in palette:\n pixels[x, y] = palette[idx]\n else:\n pixels[x, y] = (255, 0, 255)\n\n img.save(os.path.join(outdir, outfile))\n\ndef write_grayscale(data, outdir, outfile, nbest=50):\n rs = data.reshape(-1)\n sqside = int((len(rs)**0.5) + 0.99999)\n shape = (sqside, sqside)\n\n normed = data / numpy.max(data)\n best = numpy.argsort(normed, axis=None)[-nbest:]\n\n img = Image.new(\"RGBA\", shape)\n pixels = img.load()\n\n i = 0\n for x in range(sqside):\n for y in range(sqside):\n if i < len(normed):\n g = int(normed[i] * 256)\n r = g\n a = 255\n if i in best:\n r = 0\n else:\n g = 0\n a = 0\n i += 1\n pixels[x, y] = (r, g, g, a)\n\n img.save(os.path.join(outdir, outfile))\n\ndef vis_network(\n net,\n palette,\n window_size=8,\n show=(12, 12),\n outdir=\"out\",\n outfile=\"vis.png\"\n):\n palette_size = len(palette)\n input = T.vector(name=\"input\", dtype=theano.config.floatX)\n output = T.vector(name=\"output\", dtype=theano.config.floatX)\n\n enc = theano.function(\n inputs=[input],\n outputs=net.get_deconstruct(input)\n )\n\n dec = theano.function(\n inputs=[output],\n outputs=net.get_reconstruct(output)\n )\n\n encoded = enc(\n numpy.zeros(\n (window_size, window_size, palette_size),\n dtype=theano.config.floatX\n ).reshape(-1)\n )\n\n exemplars = []\n\n for i in range(show[0]*show[1]):\n fake = numpy.zeros(encoded.shape, dtype=theano.config.floatX)\n fake = fake.reshape(-1)\n if i >= fake.shape[0]:\n continue\n fake[i] = 1\n fake = fake.reshape(encoded.shape)\n exemplars.append(\n implode_result(\n dec(fake).reshape((window_size, window_size, palette_size))\n )\n )\n\n result = numpy.full(\n ((window_size+1) * show[0], (window_size+1) * show[1]),\n palette_size,\n dtype=theano.config.floatX\n )\n i = 0\n for x in range(show[0]):\n for y in range(show[1]):\n if i < len(exemplars):\n result[\n x*(window_size+1):(x+1)*(window_size+1) - 1,\n y*(window_size+1):(y+1)*(window_size+1) - 1\n ] = exemplars[i]\n i += 1\n\n fp = copy.deepcopy(palette)\n fp[palette_size] = (0, 0, 0)\n\n write_image(\n result,\n fp,\n outdir,\n outfile\n )\n\ndef build_ae_munge(examples, net, nbest=2):\n # Express our inputs in terms of the last layer of our neural net, and get\n # the values using the net in its current state:\n n_ex = examples.shape[0]\n exreps, _ = theano.scan(\n fn=lambda i: net.get_deconstruct(examples[i].reshape([-1])),\n sequences=[T.arange(n_ex)]\n )\n exf = theano.function([], exreps)\n exconst = T.constant(exf())\n\n # An input variable:\n input = T.tensor3(name=\"input\", dtype=theano.config.floatX)\n\n # Build an expression for computing the net's deconstruction of a variable\n # input, and putting it into a column shape:\n irepcol = net.get_deconstruct(input.reshape([-1])).reshape([-1, 1])\n\n # An expression for getting the dot products between our representations of\n # each example and our representation of the input:\n dot_products = T.dot(exconst, irepcol)\n\n # The \"best\" examples are the ones which are most similar to the encoding of\n # the input:\n whichbest = T.argsort(dot_products, axis=None)[-nbest:].reshape([nbest])\n best = exconst[whichbest,:]\n bestweights = dot_products[whichbest].reshape([nbest])\n\n # Normalize the nbest entries and combine them:\n norm = bestweights / T.sum(bestweights)\n combined = T.dot(norm, best)\n\n rec = net.get_reconstruct(combined).reshape(input.shape)\n\n munge = theano.function(\n name=\"munge\",\n inputs=[input],\n outputs=[dot_products, rec]\n )\n\n # TODO: Get rid of this?\n #munge = theano.function(\n # inputs=[input],\n # outputs=net.get_reconstruct(\n # net.get_deconstruct(input.reshape(-1))\n # ).reshape(input.shape)\n #)\n\n return munge\n\ndef build_munge(net, patch_size):\n input = T.tensor3(name=\"input\", dtype=theano.config.floatX)\n predict = net.get_deconstruct(input.reshape([-1]))\n result = predict.reshape([patch_size, patch_size, input.shape[2]])\n return theano.function(\n name=\"munge\",\n inputs=[input],\n outputs=result\n )\n\ndef get_net(\n data=None,\n outdir=\"data\",\n outfile=\"network.pkl.gz\",\n center_size=2,\n rebuild=False\n):\n fn = os.path.join(outdir, outfile)\n if not data:\n # Load data:\n data = load_data()\n\n ws = data[\"window_size\"]\n hws = int(ws/2)\n ps = len(data[\"palette\"])\n r_palette = data[\"r_palette\"]\n\n if rebuild or not os.path.exists(fn):\n debug(\"... building network from scratch ...\")\n # Build network:\n net = build_network(\n data[\"examples\"],\n window_size=ws,\n predict_size=center_size,\n palette_size=ps\n )\n\n debug(\"... pickling trained network ...\")\n with gzip.open(fn, 'wb') as fout:\n pickle.dump(net, fout)\n\n debug(\"... visualizing trained network ...\")\n vis_network(\n net,\n r_palette,\n window_size=ws\n )\n else:\n debug(\"... loading pickled network ...\")\n with gzip.open(fn, 'rb') as fin:\n net = pickle.load(fin)\n\n return net\n\n\ndef generate_image(\n outdir=\"out\",\n outfile = \"result.lvl.png\",\n #size=(128,64),\n size=(32,32),\n patch_size=2,\n step_size=1,\n cycles=1,\n ini=\"distribution\"\n):\n # Load data:\n data = load_data()\n\n ws = data[\"window_size\"]\n hws = int(ws/2)\n ps = len(data[\"palette\"])\n border = data[\"border\"]\n r_palette = data[\"r_palette\"]\n fr_dist = data[\"fr_dist\"]\n exemplar = data[\"exemplar\"]\n\n net = get_net(data=data, center_size=patch_size, rebuild=False)\n\n if ini == \"random\":\n result = numpy.random.random_integers(\n 0,\n ps - 2, # avoid the last entry, which is the border value\n (size[0] + 2*ws, size[1] + 2*ws)\n )\n elif ini == \"shuffle\":\n result = numpy.zeros((size[0] + 2*ws, size[1] + 2*ws))\n ex = exemplar.reshape(-1)\n numpy.random.shuffle(ex)\n ex = ex.reshape(exemplar.shape)[:size[0],:size[1]]\n result[ws:size[0]+ws,ws:size[1]+ws] = ex\n elif ini == \"distribution\":\n result = numpy.zeros((size[0] + 2*ws, size[1] + 2*ws))\n for x in range(ws, size[0] + ws):\n for y in range(ws, size[1] + ws):\n sofar = 0\n choice = numpy.random.uniform(0, 1)\n for w, v in fr_dist:\n sofar += w\n if sofar >= choice:\n result[x, y] = v\n break\n\n # Set our border data to the border value:\n for x in range(ws):\n for y in range(size[1] + 2*ws):\n result[x,y] = border\n for x in range(size[0] + ws, size[0] + 2*ws):\n for y in range(size[1] + 2*ws):\n result[x,y] = border\n for y in range(ws):\n for x in range(size[0] + 2*ws):\n result[x,y] = border\n for y in range(size[1] + ws, size[1] + 2*ws):\n for x in range(size[0] + 2*ws):\n result[x,y] = border\n\n write_image(result, r_palette, outdir, \"pre.lvl.png\")\n\n result = explode_example(result, ps)\n\n indices = []\n for x in range(ws - hws, size[0] + ws - hws, step_size):\n for y in range(ws - hws, size[1] + ws - hws, step_size):\n indices.append((x, y))\n\n debug(\"... starting image generation ...\")\n munge = build_munge(net, patch_size)\n\n for epoch in range(cycles):\n numpy.random.shuffle(indices)\n patch = 0\n for x, y in indices:\n if (patch % 50 == 0):\n debug(\"... generating patch {}/{} ...\".format(patch + 1, len(indices)))\n patch += 1\n\n if epoch == 0 and patch == 20:\n write_image(\n implode_result(result),\n r_palette,\n outdir,\n \"patched.lvl.png\"\n )\n\n result[\n x + ws//2 - patch_size//2:x + ws//2 - patch_size//2 + patch_size,\n y + ws//2 - patch_size//2:y + ws//2 - patch_size//2 + patch_size,\n :\n ] = munge(result[x:x+ws,y:y+ws,:])\n\n debug(\"... generation cycle {}/{} completed ...\".format(epoch + 1, cycles))\n\n result = implode_result(result)\n debug(\"... writing result image ...\")\n write_image(result, r_palette, outdir, outfile)\n debug(\"... done.\")\n\ndef test_explode(filename=\"data/examples.pkl.gz\", size=8):\n # Load the dataset\n with gzip.open(filename, 'rb') as fin:\n dataset = pickle.load(fin)\n\n ex = dataset[\"examples\"][0]\n print(ex)\n exr = ex.reshape(size, size)\n print(exr)\n expl = explode_example(exr, len(dataset[\"palette\"]))\n print(expl)\n impl = implode_result(expl)\n print(impl)\n\n expl2 = explode_example(ex, len(dataset[\"palette\"]))\n impl2 = implode_result(expl2.reshape((size, size, 15)))\n print(impl2)\n print(impl2[7, 4], impl2[7, 5])\n\n img = Image.new(\"RGB\", (size, size))\n pixels = img.load()\n\n i = 0\n for x in range(impl2.shape[0]):\n for y in range(impl2.shape[1]):\n g = int(3*impl2[x, y])\n pixels[x, y] = (g, g, g)\n i += 1\n print(impl2[x, y], end=\" \")\n print()\n\n img.save(\"t.png\")\n\nif __name__ == \"__main__\":\n #test_explode()\n #generate_image(cycles=1, ini=\"distribution\")\n generate_image(cycles=1, ini=\"shuffle\")\n" ]
[ [ "numpy.sqrt", "numpy.asarray", "numpy.random.shuffle", "numpy.full", "numpy.max", "numpy.random.uniform", "numpy.argmax", "numpy.random.random_integers", "numpy.argsort", "numpy.random.RandomState", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PhoeniXuzoo/NU-Projects
[ "a217ad46e6876ceffb3dec1d6e52f775674b2e8b" ]
[ "EE475/Ch6P16.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef readData(csvname):\n data = np.loadtxt(csvname, delimiter=',')\n x = data[:-1, :]\n y = data[-1:, :]\n\n return x, y\n\nfot = lambda x : np.exp(-x) / (1 + np.exp(-x))\nsot = lambda x : ( 1 / (1 + np.exp(x))) * (1 - ( 1 / (1 + np.exp(x))))\n\n# power is y_p * x_p.T * w\n# firstOrderTerm is e^-power / (1 + e^-power)\ndef first_order(x, y, w, class_weight, power):\n total = np.zeros(w.shape)\n firstOrderTerm = fot(power)\n\n for i in range(np.size(y)):\n total += class_weight[i] * firstOrderTerm[:,i] * y[:,i] * x[:,[i]]\n\n return (-1) * (total / np.size(y))\n\n\ndef second_order(x, y, w, class_weight, power):\n total = np.zeros([x.shape[0], x.shape[0]])\n secondOrderTerm = sot(power)\n\n for i in range(np.size(y)):\n total += class_weight[i] * secondOrderTerm[:, i] * x[:, [i]] * x[:, [i]].T\n\n return total / np.size(y)\n\n\ndef newton_method(x, y, w, class_weight):\n power = y * np.transpose(np.dot(x.T, w))\n firstOrder = first_order(x, y, w, class_weight, power)\n secondOrder = second_order(x, y, w, class_weight, power)\n\n return w - np.dot(np.linalg.inv(secondOrder), firstOrder)\n\ndef costFunc(x, y, w, class_weight):\n temp = np.log(1 + np.exp(-y*np.transpose(np.dot(np.transpose(x), w))))\n cost = 0\n for i in range(np.size(y)):\n cost += temp[0][i] * class_weight[i]\n return cost / float(np.size(y))\n\nif __name__ == \"__main__\":\n csvname = '3d_classification_data_v2_mbalanced.csv'\n x, y = readData(csvname)\n w = np.ones([x.shape[0] + 1, 1])\n x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)\n\n positiveOneWeight = 7/11\n negativeOneWeight = 4/11\n\n class_weight = []\n for i in range(np.size(y)):\n if (y[:, i] > 0):\n class_weight.append(positiveOneWeight)\n else:\n class_weight.append(negativeOneWeight)\n\n position = x[[1, 2]]\n positiveOneXList = []\n positiveOneYList = []\n negativeOneXList = []\n negativeOneYList = []\n for i in range(position.shape[1]):\n if (y[0][i] > 0):\n positiveOneXList.append(position[0][i])\n positiveOneYList.append(position[1][i])\n else:\n negativeOneXList.append(position[0][i])\n negativeOneYList.append(position[1][i])\n\n plt.scatter(positiveOneXList, positiveOneYList, color='red')\n plt.scatter(negativeOneXList, negativeOneYList, color='blue')\n\n for i in range(5):\n w = newton_method(x, y, w, class_weight)\n\n a = -(w[1][0]/w[2][0])\n b = -(w[0][0]/w[2][0])\n\n foo = lambda x : a * x + b\n i = -0.1\n xList = []\n yList = []\n while (i < 1.1):\n xList.append(i)\n yList.append(foo(i))\n i += 0.1\n\n plt.plot(xList, yList)\n plt.show()\n\n\n\n" ]
[ [ "numpy.dot", "matplotlib.pyplot.scatter", "numpy.linalg.inv", "numpy.ones", "matplotlib.pyplot.plot", "numpy.size", "numpy.transpose", "numpy.exp", "matplotlib.pyplot.show", "numpy.zeros", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vbelz/Lyrics_classifier
[ "57d4fec9b00b3835a71ebdd3c234fed629079f22" ]
[ "prepare_for_training.py" ]
[ "import spacy as spacy_en\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport pickle\nimport os\n\nmodel = spacy_en.load('en_core_web_md')\n\ndef clean_my_text(song):\n\n \"\"\" It filters punctuation, numbers, stop word\n and returns lemmatized words\"\"\"\n\n doc = model(song)\n clean_text = ''\n\n for word in doc:\n\n if (word.is_stop == False) and (word.pos_ != 'PUNCT') and (word.pos_ != 'NUM'):\n\n word = word.lemma_\n clean_text += word + ' '\n\n return clean_text\n\ndef keep_english_for_spacy_nn(df):\n \"\"\"This function takes the DataFrame for songs\n and keep songs with english as main language\n for english version of spacy neural network for word processing\"\"\"\n\n #Keep only english for spacy NN English preprocessing words\n #Network for other languages like french, spanish, portuguese are also available\n df = df.loc[df['Main Language'] == 'en',:]\n #Drop the translation column not use for lyrics in english\n df.drop(['English Translation Lyrics'],axis =1,inplace = True)\n\n return df\n\ndef apply_spacy_nn_to_DataFrame(df):\n \"\"\"Apply reduction of words using clean_my_text Function\n to the lyrics column\"\"\"\n\n df['Text Lyrics'] = df['Text Lyrics'].apply(clean_my_text)\n\n return df\n\ndef save_transform_to_disk(cv, tf, folder_save):\n\n countvectorfile = os.path.join(folder_save, 'countvector.sav')\n pickle.dump(cv, open(countvectorfile, 'wb'))\n\n Tfidfile = os.path.join(folder_save, 'Tfidfile.sav')\n pickle.dump(tf, open(Tfidfile, 'wb'))\n\n return\n\ndef prepare_training(df_read, folder_save):\n \"\"\"This function takes the database of artists as input\n and the folder where to save transform operations on data\n and return X and y for training\"\"\"\n\n #Songs in english for spacy nn (disable if multilanguage)\n df_prep = keep_english_for_spacy_nn(df_read)\n #Apply spacy nn to reduce dimension of text\n df_prep = apply_spacy_nn_to_DataFrame(df_prep)\n #Count vecorizer of words\n cv = CountVectorizer()\n corpus_vec = cv.fit_transform(df_prep['Text Lyrics'])\n #Tfidf Transform\n tf = TfidfTransformer()\n transform_vec = tf.fit_transform(corpus_vec)\n #Save transform to disk to reuse for predictions\n save_transform_to_disk(cv, tf, folder_save)\n #todense() to remove sparse formatting\n df_word_vec = pd.DataFrame(transform_vec.todense(), columns=cv.get_feature_names())\n y = df_prep['Name']\n X = df_word_vec\n\n return X,y\n" ]
[ [ "sklearn.feature_extraction.text.CountVectorizer", "sklearn.feature_extraction.text.TfidfTransformer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
readthedocs-assistant/autocnet
[ "579cccd0edc4cd870b5d9671165ebd830f1112b8", "579cccd0edc4cd870b5d9671165ebd830f1112b8", "579cccd0edc4cd870b5d9671165ebd830f1112b8" ]
[ "autocnet/matcher/tests/test_cpu_ring_matcher.py", "autocnet/spatial/surface.py", "autocnet/utils/utils.py" ]
[ "from unittest import mock\n\nimport numpy as np\nimport pytest\n\nfrom autocnet.matcher import cpu_ring_matcher as rm\n\[email protected]('arr, expected', [\n (np.array([[1,0],[1,1], [2,3]]), (1,2)),\n (np.array([[0,0], [1,1], [2,2]]), (3,2)\n )])\ndef test_check_pidx_duplicates(arr, expected):\n pidx = rm.check_pidx_duplicates(arr)\n assert pidx.shape == expected\n\[email protected](\"a, b, threshold, expected\", [\n # Tests standard call\n (np.array([1,2,3]), \n np.array([[1,2,3], [4,5,6], [7,8,9]]),\n 1.5, \n np.array([0])),\n # Tests call where distances are too close\n (np.array([1,2,3]),\n np.array([[7,8,9], [1,2,4], [1,2,4.1]]),\n 1.5, \n None),\n # Tests call with close distances where the threshold is low\n (np.array([1,2,3]),\n np.array([[7,8,9], [1,2,4], [1,2,4.1]]),\n 1., \n 1),\n # Tests call when np.argmin will fail\n (np.array([np.nan, np.nan]),\n np.array([[np.nan, np.nan], [np.nan, np.nan]]),\n 1.5,\n None),\n # Tests call where descriptors are identical\n (np.array([1,2,3]),\n np.array([[1,2,3], [1,2,3], [1,2,3]]),\n 1.5,\n None)\n])\ndef test_sift_match(a, b, threshold, expected):\n assert rm.sift_match(a, b, thresh=threshold) == expected \n\[email protected](\"x,y, eidx\",[(np.array([[1,1],[2,2],[3,3], [4,4], [5,5]]),\n np.array([[1.1,1.0],[1.9,1.95],[3,3], [-4,-4], [5,5]]),\n np.array([[0,1,2,4]])),\n (np.array([[1,1], [5,5]]),\n np.array([[1,1], [3,3]]),\n [])\n ])\ndef test_ransac_permute(x, y, eidx):\n xp, yp, idx = rm.ransac_permute(x, y, 0.2, 2)\n np.testing.assert_array_equal(idx, eidx)\n\n\ndef test_add_correspondences():\n func = 'autocnet.matcher.cpu_ring_matcher.ring_match_one'\n with mock.patch(func, return_value=1):\n in_feats = np.array([[1,1], [2,2]])\n ref_feats = np.array([[1,1],[2,2],[3,3], [4,4], [5,5]])\n tar_feats = np.array([[1.1,1.0],[1.9,1.95],[3,3], [-4,-4], [5,5]])\n \n rm.add_correspondences(in_feats, ref_feats, tar_feats, None, None,\n (0,6), (0,6),(0,1))\n\ndef test_dynamically_grow():\n x = np.ones((3,3))\n y = rm.dynamically_grow_array(x,6)\n assert y.shape == (9,3)\n \ndef test_dynamically_grow_dtype():\n x = np.ones((3,3), dtype=np.int8)\n y = rm.dynamically_grow_array(x,6)\n assert np.issubdtype(y.dtype, np.float64)\n\n y = rm.dynamically_grow_array(x,6,dtype=np.int8)\n assert np.issubdtype(y.dtype, np.int8)\n\ndef test_points_in_ring():\n x = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4])\n for i in np.arange(0.5, 4.5):\n assert np.sum(rm.points_in_ring(x, i, i+1)) == 5\n\ndef test_ring_match():\n ref_feats = np.array([[1,1,1,1],\n [2,2,2,2],\n [3,3,3,3],\n [4,4,4,4]])\n tar_feats = np.array([[2,2,1.1,1],\n [2.5, 2.5, 1.1, 1.1],\n [3,3,2.1,2.1],\n [3.5, 3.5, 2.2, 2.2],\n [4,4,2.9,2.9],\n [4.5, 4.5, 3.0, 3.0],\n [5,5, 4.0, 4.1],\n [5.5, 5.5, 4.1, 4.1]])\n ref_desc = np.array([[0,0,0,0],\n [1,1,1,1],\n [2,2,2,2],\n [3,3,3,3]])\n tar_desc = np.array([[0,0,0,0],\n [6,7,8,9],\n [1,1,1,1],\n [6,7,8,9],\n [2,2,2,2],\n [6,7,8,9],\n [3,3,3,3],\n [6,7,8,9]])\n\n ring_radius = 0.5\n max_radius = 1\n target_points = 2\n tolerance = 0.1\n gr, gt, p_idx, ring = rm.ring_match(ref_feats, tar_feats, ref_desc, tar_desc,\n ring_radius=ring_radius, max_radius=max_radius,\n target_points=target_points, tolerance_val=tolerance,\n iteration_break_point=2)\n assert ring == (0.0, 0.5)\n sorted_pidx = p_idx[p_idx[:,0].astype(np.int).argsort()]\n np.testing.assert_array_equal(sorted_pidx,\n np.array([[0,0],[1,2],[2,4],[3,6]]))\n", "\"\"\"\nA set of classes that represent the target surface. Each class implements the\nget_height and get_radius functions for computing the height and radius respectively\nat a given ground location (geocentric latitude and longitude).\n\"\"\"\n\nimport numpy as np\nfrom plio.io.io_gdal import GeoDataset\n\nclass EllipsoidDem:\n \"\"\"\n A biaxial ellipsoid surface model.\n \"\"\"\n\n def __init__(self, semi_major, semi_minor = None):\n \"\"\"\n Create an ellipsoid DEM from a set of radii\n\n Parameters\n ----------\n semi_major : float\n The equatorial semi-major radius of the ellipsoid.\n semi_minor : float\n The polar semi-minor radius of the ellipsoid.\n \"\"\"\n self.a = semi_major\n self.b = semi_major\n self.c = semi_major\n\n if semi_minor is not None:\n self.c = semi_minor\n\n def get_height(self, lat, lon):\n \"\"\"\n Get the height above the ellipsoid at a ground location\n\n Parameters\n ----------\n lat : float\n The geocentric latitude in degrees\n lon : float\n The longitude in degrees\n \"\"\"\n return 0\n\n def get_radius(self, lat, lon):\n \"\"\"\n Get the radius at a ground location\n\n Parameters\n ----------\n lat : float\n The geocentric latitude in degrees\n lon : float\n The longitude in degrees\n \"\"\"\n cos_lon = np.cos(np.deg2rad(lon))\n sin_lon = np.sin(np.deg2rad(lon))\n cos_lat = np.cos(np.deg2rad(lat))\n sin_lat = np.sin(np.deg2rad(lat))\n\n denom = self.b * self.b * cos_lon * cos_lon\n denom += self.a * self.a * sin_lon * sin_lon\n denom *= self.c * self.c * cos_lat * cos_lat\n denom += self.a * self.a * self.b * self.b * sin_lat * sin_lat\n\n return (self.a * self.b * self.c) / np.sqrt(denom)\n\nclass GdalDem(EllipsoidDem):\n \"\"\"\n A raster DEM surface model.\n \"\"\"\n\n def __init__(self, dem, semi_major, semi_minor = None, dem_type=None):\n \"\"\"\n Create a GDAL dem from a dem file\n\n Parameters\n ----------\n dem : str\n The DEM file\n semi_major : float\n The equatorial semi-major radius of the reference ellipsoid.\n semi_minor : float\n The polar semi-minor radius of the reference ellipsoid.\n dem_type : str\n The type of DEM, either height above reference ellipsoid or radius.\n \"\"\"\n super().__init__(semi_major, semi_minor)\n dem_types = ('height', 'radius')\n if dem_type is None:\n dem_type = dem_types[0]\n if dem_type not in dem_types:\n raise ValueError(f'DEM type {dem_type} is not a valid option.')\n self.dem = GeoDataset(dem)\n self.dem_type = dem_type\n\n def get_raster_value(self, lat, lon):\n \"\"\"\n Get the value of the dem raster at a ground location\n\n Parameters\n ----------\n lat : float\n The geocentric latitude in degrees\n lon : float\n The longitude in degrees\n \"\"\"\n px, py = self.dem.latlon_to_pixel(lat, lon)\n return self.dem.read_array(1, [px, py, 1, 1])[0][0]\n\n def get_height(self, lat, lon):\n \"\"\"\n Get the height above the ellipsoid at a ground location\n\n Parameters\n ----------\n lat : float\n The geocentric latitude in degrees\n lon : float\n The longitude in degrees\n \"\"\"\n height = self.get_raster_value(lat, lon)\n if self.dem_type == 'radius':\n height -= super().get_radius(lat, lon)\n return height\n\n def get_radius(self, lat, lon):\n \"\"\"\n Get the radius at a ground location\n\n Parameters\n ----------\n lat : float\n The geocentric latitude in degrees\n lon : float\n The longitude in degrees\n \"\"\"\n radius = self.get_raster_value(lat, lon)\n if self.dem_type == 'height':\n radius += super().get_radius(lat, lon)\n return radius\n", "import importlib\nimport itertools\nimport json\n\nfrom functools import reduce, singledispatch, update_wrapper\n\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\n\nfrom osgeo import ogr\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.neighbors import KDTree\n\nfrom scipy.spatial import Delaunay\n\nfrom shapely import geometry\nfrom shapely.geometry import MultiPoint\nfrom shapely.ops import cascaded_union, polygonize\n\n\nclass FailedImport():\n def __init__(self, exception):\n self.exception = exception\n def __getattr__(self, name: str):\n raise self.exception\n\ndef tile(array_size, tilesize=1000, overlap=500):\n stepsize = tilesize - overlap\n if stepsize < 0:\n raise ValueError('Overlap can not be greater than tilesize.')\n # Compute the tiles\n if tilesize >= array_size[1]:\n ytiles = [(0, array_size[1])]\n else:\n ystarts = range(0, array_size[1], stepsize)\n ystops = range(tilesize, array_size[1], stepsize)\n ytiles = list(zip(ystarts, ystops))\n ytiles.append((ytiles[-1][0] + stepsize, array_size[1]))\n\n if tilesize >= array_size[0]:\n xtiles = [(0, array_size[0])]\n else:\n xstarts = range(0, array_size[0], stepsize)\n xstops = range(tilesize, array_size[0], stepsize)\n xtiles = list(zip(xstarts, xstops))\n xtiles.append((xtiles[-1][0] + stepsize, array_size[0]))\n tiles = itertools.product(xtiles, ytiles)\n\n slices = []\n for tile in tiles:\n # xstart, ystart, xcount, ycount\n xstart = tile[0][0]\n ystart = tile[1][0]\n xstop = tile[0][1]\n ystop = tile[1][1]\n pixels = [xstart, ystart,\n xstop - xstart,\n ystop - ystart]\n slices.append(pixels)\n return slices\n\ndef compare_dicts(d, o):\n \"\"\"\n Given two dictionaries, compare them with support for np.ndarray and\n pd.DataFrame objects\n\n Parameters\n ----------\n d : dict\n first dict to compare\n\n o : dict\n second dict to compare\n\n Examples\n --------\n >>> d = {'a':0}\n >>> o = {'a':0}\n >>> compare_dicts(d, o)\n True\n >>> d['a'] = 1\n >>> compare_dicts(d,o)\n False\n >>> d['a'] = np.arange(3)\n >>> o['a'] = np.arange(3)\n >>> compare_dicts(d,o)\n True\n \"\"\"\n for k in o.keys():\n if k not in d.keys():\n return False\n for k, v in d.items():\n if v is None and o[k] is not None:\n return False\n if isinstance(v, pd.DataFrame):\n if not v.equals(o[k]):\n return False\n elif isinstance(v, np.ndarray):\n if not np.allclose(v, o[k]):\n return False\n else:\n if k == '_geodata':\n continue\n if not v == o[k]:\n return False\n return True\n\ndef crossform(a):\n \"\"\"\n Return the cross form, e.g. a in the cross product of a b.\n Parameters\n ----------\n a : ndarray\n (3,) vector\n\n Returns\n -------\n a : ndarray\n (3,3)\n \"\"\"\n return np.array([[0, -a[2], a[1]],\n [a[2], 0, -a[0]],\n [-a[1], a[0], 0]])\n\n\ndef normalize_vector(line):\n \"\"\"\n Normalize a standard form line\n\n Parameters\n ----------\n line : ndarray\n Standard form of a line (Ax + By + C = 0)\n\n Returns\n -------\n line : ndarray\n The normalized line\n\n Examples\n --------\n >>> x = np.array([3, 1, 2])\n >>> nv = normalize_vector(x)\n >>> print(np.round(nv, 6)) # For doc test float percision\n [0.801784 0.267261 0.534522]\n \"\"\"\n if isinstance(line, pd.DataFrame):\n line = line.values\n n = np.sqrt((line[0]**2 + line[1]**2 + line[2]**2))\n return line / abs(n)\n\ndef getnearest(iterable, value):\n \"\"\"\n Given an iterable, get the index nearest to the input value\n\n Parameters\n ----------\n iterable : iterable\n An iterable to search\n\n value : int, float\n The value to search for\n\n Returns\n -------\n : int\n The index into the list\n \"\"\"\n return min(enumerate(iterable), key=lambda i: abs(i[1] - value))\n\n\ndef checkbandnumbers(bands, checkbands):\n \"\"\"\n Given a list of input bands, check that the passed\n tuple contains those bands.\n\n In case of THEMIS, we check for band 9 as band 9 is the temperature\n band required to derive thermal temperature. We also check for band 10\n which is required for TES atmosphere calculations.\n\n Parameters\n ----------\n bands : tuple\n of bands in the input image\n checkbands : list\n of bands to check against\n\n Returns\n -------\n : bool\n True if the bands are present, else False\n \"\"\"\n for c in checkbands:\n if c not in bands:\n return False\n return True\n\n\ndef checkdeplaid(incidence):\n \"\"\"\n Given an incidence angle, select the appropriate deplaid method.\n\n Parameters\n ----------\n incidence : float\n incidence angle extracted from the campt results.\n\n \"\"\"\n if incidence >= 95 and incidence <= 180:\n return 'night'\n elif incidence >=90 and incidence < 95:\n return 'night'\n elif incidence >= 85 and incidence < 90:\n return 'day'\n elif incidence >= 0 and incidence < 85:\n return 'day'\n else:\n return False\n\n\ndef checkmonotonic(iterable, piecewise=False):\n \"\"\"\n Check if a given iterable is monotonically increasing.\n\n Parameters\n ----------\n iterable : iterable\n Any Python iterable object\n\n piecewise : boolean\n If false, return a boolean for the entire iterable,\n else return a list with elementwise monotinicy checks\n\n Returns\n -------\n monotonic : bool/list\n A boolean list of all True if monotonic, or including\n an inflection point\n \"\"\"\n monotonic = [True] + [x < y for x, y in zip(iterable, iterable[1:])]\n if piecewise is True:\n return monotonic\n else:\n return all(monotonic)\n\n\ndef find_in_dict(obj, key):\n \"\"\"\n Recursively find an entry in a dictionary\n\n Parameters\n ----------\n obj : dict\n The dictionary to search\n key : str\n The key to find in the dictionary\n\n Returns\n -------\n item : obj\n The value from the dictionary\n \"\"\"\n if key in obj:\n return obj[key]\n for k, v in obj.items():\n if isinstance(v,dict):\n item = find_in_dict(v, key)\n if item is not None:\n return item\n\n\ndef find_nested_in_dict(data, key_list):\n \"\"\"\n Traverse a list of keys into a dict.\n\n Parameters\n ----------\n data : dict\n The dictionary to be traversed\n key_list: list\n The list of keys to be travered. Keys are\n traversed in the order they are entered in\n the list\n\n Returns\n -------\n value : object\n The value in the dict\n \"\"\"\n return reduce(lambda d, k: d[k], key_list, data)\n\n\ndef make_homogeneous(points):\n \"\"\"\n Convert a set of points (n x dim array) to\n homogeneous coordinates.\n\n Parameters\n ----------\n points : arraylike\n n x m array of points, where n is the number\n of points.\n\n Returns\n -------\n : arraylike\n n x m + 1 array of homogeneous points\n \"\"\"\n homogeneous = np.hstack((points, np.ones((points.shape[0], 1))))\n if isinstance(points, pd.DataFrame):\n columns = points.columns.values.tolist() + ['z']\n homogeneous = pd.DataFrame(homogeneous, index=points.index,\n columns=columns)\n return homogeneous\n\n\n\ndef remove_field_name(a, name):\n \"\"\"\n Given a numpy structured array, remove a column and return\n a copy of the remainder of the array\n\n Parameters\n ----------\n a : ndarray\n Numpy structured array\n\n name : str\n of the index (column) to be removed\n\n Returns\n -------\n b : ndarray\n Numpy structured array with the 'name' column removed\n \"\"\"\n names = list(a.dtype.names)\n if name in names:\n names.remove(name)\n b = a[names]\n return b\n\n\ndef calculate_slope(x1, x2):\n \"\"\"\n Calculates the 2-dimensional slope between the points in two dataframes each containing two columns ['x', 'y']\n The slope is calculated from x1 to x2.\n\n Parameters\n ----------\n x1 : dataframe\n Each row is a point with columns ['x', 'y']\n x2 : dataframe\n Each row is a point with columns ['x', 'y']\n\n Returns\n -------\n : dataframe\n A dataframe with the slope between the points in x1 and x2 for each row.\n \"\"\"\n\n\n sl = False\n if isinstance(x1, pd.DataFrame):\n index = x1.index\n sl = True\n x1 = x1.values\n if isinstance(x2, pd.DataFrame):\n x2 = x2.values\n slopes = (x2[:,1] - x1[:,1])/(x2[:,0] - x1[:,0])\n\n if sl:\n slopes = pd.Series(slopes, index=index)\n return slopes\n\n\ndef cartesian(arrays, out=None):\n\n \"\"\"\n Generate a cartesian product of input arrays.\n Parameters\n ----------\n arrays : list of array-like\n 1-D arrays to form the cartesian product of.\n out : ndarray\n Array to place the cartesian product in.\n Returns\n -------\n out : ndarray\n 2-D array of shape (M, len(arrays)) containing cartesian products\n formed of input arrays.\n\n from scikit-learn\n https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py\n \"\"\"\n arrays = [np.asarray(x) for x in arrays]\n shape = (len(x) for x in arrays)\n dtype = arrays[0].dtype\n\n ix = np.indices(shape)\n ix = ix.reshape(len(arrays), -1).T\n\n if out is None:\n out = np.empty_like(ix, dtype=dtype)\n\n for n, arr in enumerate(arrays):\n out[:, n] = arrays[n][ix[:, n]]\n\n return out\n\n\ndef array_to_poly(array):\n \"\"\"\n Generate a geojson geom\n Parameters\n ----------\n array : array-like\n 2-D array of size (n, 2) of x, y coordinates\n\n Returns\n -------\n geom : GeoJson\n geojson containing the necessary data to construct\n a poly gon\n \"\"\"\n array = np.asarray(array)\n size = np.shape(array)\n if size[1] != 2:\n raise ValueError('Array is not the proper size.')\n return\n geom_array = np.append(array, [array[0]], axis = 0).tolist()\n geom = {\"type\": \"Polygon\", \"coordinates\": [geom_array]}\n poly = ogr.CreateGeometryFromJson(json.dumps(geom))\n return poly\n\n\ndef methodispatch(func):\n \"\"\"\n New dispatch decorator that looks at the second argument to\n avoid self\n\n Parameters\n ----------\n func : Object\n Function object to be dispatched\n\n Returns\n wrapper : Object\n Wrapped function call chosen by the dispatcher\n ----------\n\n \"\"\"\n dispatcher = singledispatch(func)\n\n def wrapper(*args, **kw):\n return dispatcher.dispatch(args[1].__class__)(*args, **kw)\n\n wrapper.register = dispatcher.register\n update_wrapper(wrapper, dispatcher)\n return wrapper\n\n\ndef decorate_class(cls, decorator, exclude=[], *args, **kwargs): # pragma: no cover\n \"\"\"\n Decorates a class with a give docorator. Returns a subclass with\n dectorations applied\n\n Parameters\n ----------\n cls : Class\n A class to be decorated\n\n decorator : callable\n callable to wrap cls's methods with\n\n exclude : list\n list of method names to exclude from being decorated\n\n args, kwargs : list, dict\n Parameters to pass into decorator\n \"\"\"\n if not callable(decorator):\n raise Exception('Decorator must be callable.')\n\n def decorate(cls):\n attributes = cls.__dict__.keys()\n for attr in attributes: # there's propably a better way to do this\n if callable(getattr(cls, attr)):\n name = getattr(cls, attr).__name__\n if name[0] == '_' or name in exclude:\n continue\n setattr(cls, attr, decorator(getattr(cls, attr)))\n return cls\n # return decorated copy (i.e. a subclass with decorations)\n return decorate(type('cls_copy', cls.__bases__, dict(cls.__dict__)))\n\ndef create_decorator(dec, **namespace):\n \"\"\"\n Create a decorator function using arbirary params. The objects passed in\n can be used in the body. Originally designed with the idea of automatically\n updating one object after the decorated object was modified.\n \"\"\"\n\n def decorator(func, *args, **kwargs):\n def wrapper(*args, **kwarg):\n for key in namespace.keys():\n locals()[key] = namespace[key]\n ret = func(*args, **kwargs)\n exec(dec.__code__, locals(), globals())\n if ret:\n return ret\n return wrapper\n return decorator\n\ndef bytescale(data, cmin=None, cmax=None, high=255, low=0):\n \"\"\"\n This is pulled directly from scipy.misc as they are deprecating bytescale.\n\n Byte scales an array (image).\n Byte scaling means converting the input image to uint8 dtype and scaling\n the range to ``(low, high)`` (default 0-255).\n If the input image already has dtype uint8, no scaling is done.\n This function is only available if Python Imaging Library (PIL) is installed.\n Parameters\n ----------\n data : ndarray\n PIL image data array.\n cmin : scalar, optional\n Bias scaling of small values. Default is ``data.min()``.\n cmax : scalar, optional\n Bias scaling of large values. Default is ``data.max()``.\n high : scalar, optional\n Scale max value to `high`. Default is 255.\n low : scalar, optional\n Scale min value to `low`. Default is 0.\n Returns\n -------\n img_array : uint8 ndarray\n The byte-scaled array.\n Examples\n --------\n >>> from scipy.misc import bytescale\n >>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],\n ... [ 73.88003259, 80.91433048, 4.88878881],\n ... [ 51.53875334, 34.45808177, 27.5873488 ]])\n >>> bytescale(img)\n array([[255, 0, 236],\n [205, 225, 4],\n [140, 90, 70]], dtype=uint8)\n >>> bytescale(img, high=200, low=100)\n array([[200, 100, 192],\n [180, 188, 102],\n [155, 135, 128]], dtype=uint8)\n >>> bytescale(img, cmin=0, cmax=255)\n array([[91, 3, 84],\n [74, 81, 5],\n [52, 34, 28]], dtype=uint8)\n \"\"\"\n if data.dtype == np.uint8:\n return data\n\n if high > 255:\n raise ValueError(\"`high` should be less than or equal to 255.\")\n if low < 0:\n raise ValueError(\"`low` should be greater than or equal to 0.\")\n if high < low:\n raise ValueError(\"`high` should be greater than or equal to `low`.\")\n\n if cmin is None:\n cmin = data.min()\n if cmax is None:\n cmax = data.max()\n\n cscale = cmax - cmin\n if cscale < 0:\n raise ValueError(\"`cmax` should be larger than `cmin`.\")\n elif cscale == 0:\n cscale = 1\n\n scale = float(high - low) / cscale\n bytedata = (data - cmin) * scale + low\n return (bytedata.clip(low, high) + 0.5).astype(np.uint8)\n\ndef import_func(func):\n \"\"\"\n Imports a function from the autocnet package.\n\n Parameters\n ----------\n func : str\n import path. For example, to import the place_points_in_overlap function,\n this func can be called with: 'spatial.overlap.place_points_in_overlap'\n\n Returns\n -------\n func : obj\n The function object for use.\n \"\"\"\n if not func[0] == '.':\n # Since this intentionally forces the package to be autocnet\n # need the import path relative to the package name. Convenience\n # for the caller to add the '.' so they don't get a cryptic\n # ModuleImportError.\n func = f'.{func}'\n\n module, func = func.rsplit('.', 1)\n module = importlib.import_module(module, package='autocnet')\n func = getattr(module, func)\n return func\n\n\ndef compute_depression(input_dem, scale_factor=1, curvature_percentile=75):\n \"\"\"\n Compute depressions and return a new image with larges depressions filled in. \n \n Parameters\n ----------\n \n input_dem : np.array, rd.rdarray\n 2d array of elevation DNs, a DEM\n \n scale_factor : float\n Value to scale the erotion of planform curvatures by\n \n curvature_percentile : float \n what percentile of the curvature to keep, lower values\n results in bigger blobs \n \n \n Returns\n -------\n dem : rd.rdarray\n Dem with filled depressions\n \n mask : np.array\n Change mask, true on pixels that have been changed \n \n \n \"\"\"\n if isinstance(input_dem, np.ndarray):\n dem = rd.rdarray(input_dem.copy(), no_data=0)\n elif isinstance(input_dem, rd.rdarray):\n # take ownership of the reference\n dem = input_dem.copy()\n\n # create filled DEM\n demfilled = rd.FillDepressions(dem, epsilon=True, in_place=False, topology=\"D8\")\n \n # Mask out filled areas\n mask = np.abs(dem-demfilled)\n thresh = np.percentile(mask, 95)\n mask[mask <= thresh] = False\n mask[mask > thresh] = True\n \n curvatures = rd.TerrainAttribute(dem, attrib='planform_curvature')\n curvatures = (curvatures - np.min(curvatures))/np.ptp(curvatures) \n curvatures[curvatures < np.percentile(curvatures, curvature_percentile)] = 0\n curvatures[mask.astype(bool)] = 0\n \n demfilled -= curvatures * scale_factor\n \n mask = (curvatures+mask).astype(bool)\n \n # Get 3rd nn distance \n coords = np.argwhere(mask)\n nbrs = NearestNeighbors(n_neighbors=3, algorithm='kd_tree').fit(coords)\n dists, _ = nbrs.kneighbors(coords)\n eps = np.percentile(dists, 95)\n \n # Cluster\n db = DBSCAN(eps=eps, min_samples=3).fit(coords)\n labels = db.labels_\n unique, counts = np.unique(labels, return_counts=True)\n \n # First count are outliers, ignore\n counts = counts[1:]\n unique = unique[1:]\n \n index = np.argwhere(counts == counts.max())\n group = unique[index][0][0]\n cluster = coords[labels == group]\n \n # mask out depression\n dmask = np.full(dem.shape, False)\n dmask[[*cluster.T]] = True\n \n dem[dmask] = 0\n demfilled[~dmask] = 0\n dem = dem+demfilled\n\n return dem, dmask\n\n\ndef rasterize_polygon(shape, vertices, dtype=bool):\n \"\"\"\n Simple tool to convert poly into a boolean numpy array.\n \n source: https://stackoverflow.com/questions/37117878/generating-a-filled-polygon-inside-a-numpy-array\n \n Parameters\n ----------\n \n shape : tuple \n size of the array in (y,x) format\n \n vertices : np.array, list\n array of vertices in [[x0, y0], [x1, y1]...] format\n \n dtype : type\n datatype of output mask\n \n Returns\n -------\n \n mask : np.array\n mask with filled polygon set to true\n \n \"\"\"\n def check(p1, p2, base_array):\n idxs = np.indices(base_array.shape) # Create 3D array of indices\n\n p1 = p1.astype(float)\n p2 = p2.astype(float)\n\n # Calculate max column idx for each row idx based on interpolated line between two points\n if p1[0] == p2[0]:\n max_col_idx = (idxs[0] - p1[0]) * idxs.shape[1]\n sign = np.sign(p2[1] - p1[1])\n else:\n max_col_idx = (idxs[0] - p1[0]) / (p2[0] - p1[0]) * (p2[1] - p1[1]) + p1[1]\n sign = np.sign(p2[0] - p1[0])\n \n return idxs[1] * sign <= max_col_idx * sign\n\n base_array = np.zeros(shape, dtype=dtype) # Initialize your array of zeros\n\n fill = np.ones(base_array.shape) * True # Initialize boolean array defining shape fill\n\n # Create check array for each edge segment, combine into fill array\n for k in range(vertices.shape[0]):\n fill = np.all([fill, check(vertices[k-1], vertices[k], base_array)], axis=0)\n \n print(fill.any())\n # Set all values inside polygon to one\n base_array[fill] = 1\n return base_array\n\n\ndef generate_dem(alpha=1.0, size=800, scales=[160,80,32,16,8,4,2,1], scale_factor=5):\n \"\"\"\n Produces a random DEM\n \n Parameters\n ----------\n \n alpha : float \n Controls height variation. Lower number makes a shallower and noisier DEM, \n higher values create smoother DEM with large peaks and valleys. \n Reccommended range = (0, 1.5]\n \n size : int\n size of DEM, output DEM is in the shape of (size, size)\n \n scale_factor : float \n Scalar to multiply the slope degradation by, higher values = more erosion.\n Recommended to increase proportionately with alpha\n (higher alphas mean you might want higher scale_factor)\n \n Returns \n -------\n \n dem : np.array \n DEM array in the shape (size, size)\n \n \"\"\"\n \n topo=np.zeros((2,2))+random.rand(2,2)*(200/(2.**alpha))\n\n for k in range(len(scales)):\n nn = size/scales[k]\n topo = scipy.misc.imresize(topo, (int(nn), int(nn)), \"cubic\", mode=\"F\")\n topo = topo + random.rand(int(nn), int(nn))*(200/(nn**alpha))\n \n topo = rd.rdarray(topo, no_data=0)\n \n curvatures = rd.TerrainAttribute(topo, attrib='slope_riserun')\n curvatures = (curvatures - np.min(curvatures))/np.ptp(curvatures) * scale_factor\n return topo - curvatures\n\n\ndef hillshade(img, azi=255, min_slope=20, max_slope=100, min_bright=0, grayscale=False):\n \"\"\"\n hillshade a DEM, based on IDL code by Colin Dundas \n \n Parameters\n ----------\n \n img : np.array\n DEM to hillshade\n \n azi : float \n Sun azimuth \n \n min_slope : float \n minimum slope value \n \n max_slope : float \n maximum slope value \n \n min_bright : float \n minimum brightness \n \n grayscale : bool \n whether or not to produce grayscale image \n \n \n Returns\n -------\n \n dem : np.array \n hillshaded DEM \n \n \"\"\"\n dem = np.array(np.flip(bytescale(img), axis = 0), dtype=int)\n emax = np.max(dem)\n emin = np.min(dem)\n\n indices = np.linspace(0, 255, 256) / 25.5\n\n red_array = [0,25,50,101,153,204,255,255,255,255,255,255]\n red_index = np.arange(len(red_array))\n red_vec = np.interp(indices, red_index, red_array)\n\n green_array = [42,101,153,204,237,255,255,238,204,153,102,42]\n green_index = np.arange(len(green_array))\n green_vec = np.interp(indices, green_index, green_array)\n\n blue_array = [255,255,255,255,255,255,204,153,101,50,25,0]\n blue_index = np.arange(len(blue_array))\n blue_vec = np.interp(indices, blue_index, blue_array)\n\n zz = (255.0/(emax-emin))*(dem-emin)\n zz = zz.astype(int)\n\n nx = (np.roll(dem, 1, axis = 1) - dem)\n ny = (np.roll(dem, 1, axis = 0) - dem)\n sz = np.shape(nx)\n nz = np.ones(sz)\n nl = np.sqrt(np.power(nx, 2.0) + np.power(ny, 2.0) + np.power(nz, 2.0))\n nx = nx/nl\n ny = ny/nl\n nz = nz/nl\n\n math.cos(math.radians(1))\n azi_rad = math.radians(azi)\n alt_rad = math.radians(alt)\n lx = math.sin(azi_rad)*math.cos(alt_rad)\n ly = math.cos(azi_rad)*math.cos(alt_rad)\n lz = math.sin(alt_rad)\n\n dprod = nx*lx + ny*ly + nz*lz\n\n if min_slope is not None:\n min_dprod = math.cos(math.radians(max_slope + 90.0 - alt))\n else:\n min_dprod = np.min(dprod)\n\n if max_slope is not None:\n max_dprod = math.cos(math.radians(90.0 - alt - max_slope))\n else:\n max_dprod = np.max(dprod)\n\n bright = ((dprod - min_dprod) + min_bright)/((max_dprod - min_dprod) + min_bright)\n\n if grayscale:\n qq=(255*bright)\n else:\n qq = red_vec[zz]*bright\n\n if grayscale:\n rr = (255*bright)\n else:\n rr = green_vec[zz]*bright\n\n if grayscale:\n ss=(255*bright)\n else:\n ss = blue_vec[zz]*bright\n\n arrforout = np.dstack((qq, rr ,ss))\n arrforout = np.flip(arrforout.astype(int), axis = 0)\n arrfotout = bytescale(arrforout)\n arrforout.shape\n return arrforout\n\n\n" ]
[ [ "numpy.arange", "numpy.issubdtype", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.array" ], [ "numpy.deg2rad", "numpy.sqrt" ], [ "numpy.sqrt", "pandas.Series", "numpy.linspace", "numpy.asarray", "pandas.DataFrame", "sklearn.cluster.DBSCAN", "numpy.max", "numpy.roll", "numpy.allclose", "numpy.unique", "numpy.empty_like", "numpy.full", "numpy.interp", "sklearn.neighbors.NearestNeighbors", "numpy.zeros", "numpy.min", "numpy.power", "numpy.append", "numpy.array", "numpy.abs", "numpy.indices", "numpy.percentile", "numpy.argwhere", "numpy.ones", "numpy.dstack", "numpy.ptp", "numpy.sign", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
horseriver/csgm
[ "0f77d9c749dd31ce03b104dc2d355267e3ced038" ]
[ "celebA_dcgan/model_def.py" ]
[ "# pylint: disable = C0103, C0111, C0301, R0914\n\n\"\"\"Model definitions for celebA\n\nThis file is partially based on\nhttps://github.com/carpedm20/DCGAN-tensorflow/blob/master/main.py\nhttps://github.com/carpedm20/DCGAN-tensorflow/blob/master/model.py\n\nThey come with the following license: https://github.com/carpedm20/DCGAN-tensorflow/blob/master/LICENSE\n\"\"\"\n\nimport tensorflow as tf\nimport ops\n\n\nclass Hparams(object):\n def __init__(self):\n self.c_dim = 3\n self.z_dim = 100\n self.gf_dim = 64\n self.df_dim = 64\n self.gfc_dim = 1024\n self.dfc_dim = 1024\n self.batch_size = 64\n\n\ndef generator(hparams, z, train, reuse):\n\n if reuse:\n tf.get_variable_scope().reuse_variables()\n\n output_size = 64\n s = output_size\n s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)\n\n g_bn0 = ops.batch_norm(name='g_bn0')\n g_bn1 = ops.batch_norm(name='g_bn1')\n g_bn2 = ops.batch_norm(name='g_bn2')\n g_bn3 = ops.batch_norm(name='g_bn3')\n\n # project `z` and reshape\n h0 = tf.reshape(ops.linear(z, hparams.gf_dim*8*s16*s16, 'g_h0_lin'), [-1, s16, s16, hparams.gf_dim * 8])\n h0 = tf.nn.relu(g_bn0(h0, train=train))\n\n h1 = ops.deconv2d(h0, [hparams.batch_size, s8, s8, hparams.gf_dim*4], name='g_h1')\n h1 = tf.nn.relu(g_bn1(h1, train=train))\n\n h2 = ops.deconv2d(h1, [hparams.batch_size, s4, s4, hparams.gf_dim*2], name='g_h2')\n h2 = tf.nn.relu(g_bn2(h2, train=train))\n\n h3 = ops.deconv2d(h2, [hparams.batch_size, s2, s2, hparams.gf_dim*1], name='g_h3')\n h3 = tf.nn.relu(g_bn3(h3, train=train))\n\n h4 = ops.deconv2d(h3, [hparams.batch_size, s, s, hparams.c_dim], name='g_h4')\n x_gen = tf.nn.tanh(h4)\n\n return x_gen\n\n\ndef discriminator(hparams, x, train, reuse):\n\n if reuse:\n tf.get_variable_scope().reuse_variables()\n\n d_bn1 = ops.batch_norm(name='d_bn1')\n d_bn2 = ops.batch_norm(name='d_bn2')\n d_bn3 = ops.batch_norm(name='d_bn3')\n\n h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv'))\n\n h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv')\n h1 = ops.lrelu(d_bn1(h1, train=train))\n\n h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv')\n h2 = ops.lrelu(d_bn2(h2, train=train))\n\n h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv')\n h3 = ops.lrelu(d_bn3(h3, train=train))\n\n h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin')\n\n d_logit = h4\n d = tf.nn.sigmoid(d_logit)\n\n return d, d_logit\n\n\ndef gen_restore_vars():\n restore_vars = ['g_bn0/beta',\n 'g_bn0/gamma',\n 'g_bn0/moving_mean',\n 'g_bn0/moving_variance',\n 'g_bn1/beta',\n 'g_bn1/gamma',\n 'g_bn1/moving_mean',\n 'g_bn1/moving_variance',\n 'g_bn2/beta',\n 'g_bn2/gamma',\n 'g_bn2/moving_mean',\n 'g_bn2/moving_variance',\n 'g_bn3/beta',\n 'g_bn3/gamma',\n 'g_bn3/moving_mean',\n 'g_bn3/moving_variance',\n 'g_h0_lin/Matrix',\n 'g_h0_lin/bias',\n 'g_h1/biases',\n 'g_h1/w',\n 'g_h2/biases',\n 'g_h2/w',\n 'g_h3/biases',\n 'g_h3/w',\n 'g_h4/biases',\n 'g_h4/w']\n return restore_vars\n\n\n\ndef discrim_restore_vars():\n restore_vars = ['d_bn1/beta',\n 'd_bn1/gamma',\n 'd_bn1/moving_mean',\n 'd_bn1/moving_variance',\n 'd_bn2/beta',\n 'd_bn2/gamma',\n 'd_bn2/moving_mean',\n 'd_bn2/moving_variance',\n 'd_bn3/beta',\n 'd_bn3/gamma',\n 'd_bn3/moving_mean',\n 'd_bn3/moving_variance',\n 'd_h0_conv/biases',\n 'd_h0_conv/w',\n 'd_h1_conv/biases',\n 'd_h1_conv/w',\n 'd_h2_conv/biases',\n 'd_h2_conv/w',\n 'd_h3_conv/biases',\n 'd_h3_conv/w',\n 'd_h3_lin/Matrix',\n 'd_h3_lin/bias']\n return restore_vars\n" ]
[ [ "tensorflow.reshape", "tensorflow.nn.sigmoid", "tensorflow.nn.tanh", "tensorflow.get_variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
bmedishe/pytorch
[ "7fc73285da2c8918cf039a2c3e0eeed241478e40" ]
[ "test/run_test.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport copy\nfrom datetime import datetime\nfrom distutils.util import strtobool\nfrom distutils.version import LooseVersion\nimport functools\nimport os\nimport pathlib\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport tempfile\n\nimport torch\nfrom torch.utils import cpp_extension\nfrom torch.testing._internal.common_utils import (\n FILE_SCHEMA,\n IS_IN_CI,\n TEST_WITH_ROCM,\n shell,\n set_cwd,\n parser as common_parser,\n)\nimport torch.distributed as dist\nfrom typing import Dict, Optional, List\n\nREPO_ROOT = pathlib.Path(__file__).resolve().parent.parent\n\ntry:\n # using tools/ to optimize test run.\n sys.path.append(str(REPO_ROOT))\n from tools.testing.test_selections import (\n export_S3_test_times,\n get_shard_based_on_S3,\n # NS: Disable target determination\n # get_slow_tests_based_on_S3,\n get_specified_test_cases,\n get_reordered_tests,\n get_test_case_configs,\n )\n # NS: Disable target determination\n # from tools.testing.modulefinder_determinator import (\n # should_run_test,\n # TARGET_DET_LIST,\n # )\n\n HAVE_TEST_SELECTION_TOOLS = True\nexcept ImportError:\n HAVE_TEST_SELECTION_TOOLS = False\n print(\n \"Unable to import test_selections from tools/testing. Running without test selection stats...\"\n )\n\n\ndef discover_tests(\n base_dir: Optional[pathlib.Path] = None,\n blocklisted_patterns: Optional[List[str]] = None,\n blocklisted_tests: Optional[List[str]] = None,\n extra_tests: Optional[List[str]] = None) -> List[str]:\n \"\"\"\n Searches for all python files starting with test_ excluding one specified by patterns\n \"\"\"\n def skip_test_p(name: str) -> bool:\n rc = False\n if blocklisted_patterns is not None:\n rc |= any(name.startswith(pattern) for pattern in blocklisted_patterns)\n if blocklisted_tests is not None:\n rc |= name in blocklisted_tests\n return rc\n cwd = pathlib.Path(__file__).resolve().parent if base_dir is None else base_dir\n all_py_files = list(cwd.glob('**/test_*.py'))\n rc = [str(fname.relative_to(cwd))[:-3] for fname in all_py_files]\n # Invert slashes on Windows\n if sys.platform == \"win32\":\n rc = [name.replace('\\\\', '/') for name in rc]\n rc = [test for test in rc if not skip_test_p(test)]\n if extra_tests is not None:\n rc += extra_tests\n return sorted(rc)\n\nTESTS = discover_tests(\n blocklisted_patterns=[\n 'ao',\n 'bottleneck_test',\n 'custom_backend',\n 'custom_operator',\n 'fx', # executed by test_fx.py\n 'jit', # executed by test_jit.py\n 'mobile',\n 'onnx',\n 'package', # executed by test_package.py\n 'quantization', # executed by test_quantization.py\n 'autograd', # executed by test_autograd.py\n ],\n blocklisted_tests=[\n 'test_bundled_images',\n 'test_cpp_extensions_aot',\n 'test_determination',\n 'test_jit_fuser',\n 'test_jit_simple',\n 'test_jit_string',\n 'test_kernel_launch_checks',\n 'test_metal',\n 'test_nnapi',\n 'test_segment_reductions',\n 'test_static_runtime',\n 'test_throughput_benchmark',\n 'test_typing',\n \"distributed/algorithms/ddp_comm_hooks/test_ddp_hooks\",\n \"distributed/algorithms/quantization/test_quantization\",\n \"distributed/bin/test_script\",\n \"distributed/elastic/multiprocessing/bin/test_script\",\n \"distributed/launcher/bin/test_script\",\n \"distributed/launcher/bin/test_script_init_method\",\n \"distributed/launcher/bin/test_script_is_torchelastic_launched\",\n \"distributed/launcher/bin/test_script_local_rank\",\n \"distributed/test_c10d_spawn\",\n 'distributions/test_transforms',\n 'distributions/test_utils',\n ],\n extra_tests=[\n \"test_cpp_extensions_aot_ninja\",\n \"test_cpp_extensions_aot_no_ninja\",\n \"distributed/elastic/timer/api_test\",\n \"distributed/elastic/timer/local_timer_example\",\n \"distributed/elastic/timer/local_timer_test\",\n \"distributed/elastic/events/lib_test\",\n \"distributed/elastic/metrics/api_test\",\n \"distributed/elastic/utils/logging_test\",\n \"distributed/elastic/utils/util_test\",\n \"distributed/elastic/utils/distributed_test\",\n \"distributed/elastic/multiprocessing/api_test\",\n \"test_deploy\",\n ]\n)\n\nFSDP_TEST = [test for test in TESTS if test.startswith(\"distributed/fsdp\")]\n\n# Tests need to be run with pytest.\nUSE_PYTEST_LIST = [\n \"distributed/pipeline/sync/skip/test_api\",\n \"distributed/pipeline/sync/skip/test_gpipe\",\n \"distributed/pipeline/sync/skip/test_inspect_skip_layout\",\n \"distributed/pipeline/sync/skip/test_leak\",\n \"distributed/pipeline/sync/skip/test_portal\",\n \"distributed/pipeline/sync/skip/test_stash_pop\",\n \"distributed/pipeline/sync/skip/test_tracker\",\n \"distributed/pipeline/sync/skip/test_verify_skippables\",\n \"distributed/pipeline/sync/test_balance\",\n \"distributed/pipeline/sync/test_bugs\",\n \"distributed/pipeline/sync/test_checkpoint\",\n \"distributed/pipeline/sync/test_copy\",\n \"distributed/pipeline/sync/test_deferred_batch_norm\",\n \"distributed/pipeline/sync/test_dependency\",\n \"distributed/pipeline/sync/test_inplace\",\n \"distributed/pipeline/sync/test_microbatch\",\n \"distributed/pipeline/sync/test_phony\",\n \"distributed/pipeline/sync/test_pipe\",\n \"distributed/pipeline/sync/test_pipeline\",\n \"distributed/pipeline/sync/test_stream\",\n \"distributed/pipeline/sync/test_transparency\",\n \"distributed/pipeline/sync/test_worker\",\n \"distributions/test_constraints\",\n \"distributions/test_transforms\",\n \"distributions/test_utils\",\n \"test_typing\",\n \"distributed/elastic/events/lib_test\",\n \"distributed/elastic/agent/server/test/api_test\",\n \"test_deploy\",\n]\n\nWINDOWS_BLOCKLIST = [\n \"distributed/nn/jit/test_instantiator\",\n \"distributed/rpc/test_faulty_agent\",\n \"distributed/rpc/test_tensorpipe_agent\",\n \"distributed/rpc/test_share_memory\",\n \"distributed/rpc/cuda/test_tensorpipe_agent\",\n \"distributed/pipeline/sync/skip/test_api\",\n \"distributed/pipeline/sync/skip/test_gpipe\",\n \"distributed/pipeline/sync/skip/test_inspect_skip_layout\",\n \"distributed/pipeline/sync/skip/test_leak\",\n \"distributed/pipeline/sync/skip/test_portal\",\n \"distributed/pipeline/sync/skip/test_stash_pop\",\n \"distributed/pipeline/sync/skip/test_tracker\",\n \"distributed/pipeline/sync/skip/test_verify_skippables\",\n \"distributed/pipeline/sync/test_balance\",\n \"distributed/pipeline/sync/test_bugs\",\n \"distributed/pipeline/sync/test_checkpoint\",\n \"distributed/pipeline/sync/test_copy\",\n \"distributed/pipeline/sync/test_deferred_batch_norm\",\n \"distributed/pipeline/sync/test_dependency\",\n \"distributed/pipeline/sync/test_inplace\",\n \"distributed/pipeline/sync/test_microbatch\",\n \"distributed/pipeline/sync/test_phony\",\n \"distributed/pipeline/sync/test_pipe\",\n \"distributed/pipeline/sync/test_pipeline\",\n \"distributed/pipeline/sync/test_stream\",\n \"distributed/pipeline/sync/test_transparency\",\n \"distributed/pipeline/sync/test_worker\",\n \"distributed/elastic/agent/server/test/api_test\",\n \"distributed/elastic/multiprocessing/api_test\",\n \"distributed/_shard/checkpoint/test_checkpoint\"\n \"distributed/_shard/checkpoint/test_file_system_checkpoint\"\n \"distributed/_shard/sharding_spec/test_sharding_spec\",\n \"distributed/_shard/sharding_plan/test_sharding_plan\",\n \"distributed/_shard/sharded_tensor/test_megatron_prototype\",\n \"distributed/_shard/sharded_tensor/test_sharded_tensor\",\n \"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard\",\n \"distributed/_shard/sharded_tensor/ops/test_chunk\",\n \"distributed/_shard/sharded_tensor/ops/test_elementwise_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_embedding\",\n \"distributed/_shard/sharded_tensor/ops/test_embedding_bag\",\n \"distributed/_shard/sharded_tensor/ops/test_binary_cmp\",\n \"distributed/_shard/sharded_tensor/ops/test_init\",\n \"distributed/_shard/sharded_tensor/ops/test_linear\",\n \"distributed/_shard/sharded_tensor/ops/test_math_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_matrix_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_softmax\",\n \"distributed/_shard/sharded_optim/test_sharded_optim\",\n \"distributed/_shard/test_partial_tensor\",\n \"distributed/_shard/test_replicated_tensor\",\n] + FSDP_TEST\n\nROCM_BLOCKLIST = [\n \"distributed/nn/jit/test_instantiator\",\n \"distributed/rpc/test_faulty_agent\",\n \"distributed/rpc/test_tensorpipe_agent\",\n \"distributed/rpc/test_share_memory\",\n \"distributed/rpc/cuda/test_tensorpipe_agent\",\n \"distributed/_shard/checkpoint/test_checkpoint\"\n \"distributed/_shard/checkpoint/test_file_system_checkpoint\"\n \"distributed/_shard/sharding_spec/test_sharding_spec\",\n \"distributed/_shard/sharding_plan/test_sharding_plan\",\n \"distributed/_shard/sharded_tensor/test_megatron_prototype\",\n \"distributed/_shard/sharded_tensor/test_sharded_tensor\",\n \"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard\",\n \"distributed/_shard/sharded_tensor/ops/test_chunk\",\n \"distributed/_shard/sharded_tensor/ops/test_elementwise_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_embedding\",\n \"distributed/_shard/sharded_tensor/ops/test_embedding_bag\",\n \"distributed/_shard/sharded_tensor/ops/test_binary_cmp\",\n \"distributed/_shard/sharded_tensor/ops/test_init\",\n \"distributed/_shard/sharded_tensor/ops/test_linear\",\n \"distributed/_shard/sharded_tensor/ops/test_math_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_matrix_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_softmax\",\n \"distributed/_shard/sharded_optim/test_sharded_optim\",\n \"distributed/_shard/test_partial_tensor\",\n \"distributed/_shard/test_replicated_tensor\",\n \"test_determination\",\n \"test_jit_legacy\",\n \"test_type_hints\",\n \"test_openmp\",\n]\n\nRUN_PARALLEL_BLOCKLIST = [\n \"test_cpp_extensions_jit\",\n \"test_jit_disabled\",\n \"test_mobile_optimizer\",\n \"test_multiprocessing\",\n \"test_multiprocessing_spawn\",\n \"test_namedtuple_return_api\",\n \"test_overrides\",\n \"test_show_pickle\",\n \"test_tensorexpr\",\n \"test_cuda_primary_ctx\",\n] + FSDP_TEST\n\nWINDOWS_COVERAGE_BLOCKLIST = []\n\n# A subset of our TEST list that validates PyTorch's ops, modules, and autograd function as expected\nCORE_TEST_LIST = [\n \"test_autograd\",\n \"test_modules\",\n \"test_nn\",\n \"test_ops\",\n \"test_ops_gradients\",\n \"test_ops_jit\",\n \"test_torch\"\n]\n\n# the JSON file to store the S3 test stats\nTEST_TIMES_FILE = \".pytorch-test-times.json\"\n\n# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST\nSLOW_TEST_THRESHOLD = 300\n\nDISTRIBUTED_TESTS_CONFIG = {}\n\n\nif dist.is_available():\n DISTRIBUTED_TESTS_CONFIG[\"test\"] = {\"WORLD_SIZE\": \"1\"}\n if not TEST_WITH_ROCM and dist.is_mpi_available():\n DISTRIBUTED_TESTS_CONFIG[\"mpi\"] = {\n \"WORLD_SIZE\": \"3\",\n \"TEST_REPORT_SOURCE_OVERRIDE\": \"dist-mpi\",\n }\n if dist.is_nccl_available():\n DISTRIBUTED_TESTS_CONFIG[\"nccl\"] = {\n \"WORLD_SIZE\": \"2\" if torch.cuda.device_count() == 2 else \"3\",\n \"TEST_REPORT_SOURCE_OVERRIDE\": \"dist-nccl\",\n }\n if dist.is_gloo_available():\n DISTRIBUTED_TESTS_CONFIG[\"gloo\"] = {\n \"WORLD_SIZE\": \"2\" if torch.cuda.device_count() == 2 else \"3\",\n \"TEST_REPORT_SOURCE_OVERRIDE\": \"dist-gloo\",\n }\n\n# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python\nSIGNALS_TO_NAMES_DICT = {\n getattr(signal, n): n for n in dir(signal) if n.startswith(\"SIG\") and \"_\" not in n\n}\n\nCPP_EXTENSIONS_ERROR = \"\"\"\nNinja (https://ninja-build.org) is required for some of the C++ extensions\ntests, but it could not be found. Install ninja with `pip install ninja`\nor `conda install ninja`. Alternatively, disable said tests with\n`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.\n\"\"\"\n\nPYTORCH_COLLECT_COVERAGE = bool(os.environ.get(\"PYTORCH_COLLECT_COVERAGE\"))\n\nENABLE_PR_HISTORY_REORDERING = bool(\n os.environ.get(\"ENABLE_PR_HISTORY_REORDERING\", \"0\") == \"1\"\n)\n\nJIT_EXECUTOR_TESTS = [\n \"test_jit_profiling\",\n \"test_jit_legacy\",\n \"test_jit_fuser_legacy\",\n]\n\nDISTRIBUTED_TESTS = [test for test in TESTS if test.startswith(\"distributed\")]\n\nTESTS_REQUIRING_LAPACK = [\n \"distributions/test_constraints\",\n \"distributions/test_distributions\",\n]\n\n# Dictionary matching test modules (in TESTS) to lists of test cases (within that test_module) that would be run when\n# options.run_specified_test_cases is enabled.\n# For example:\n# {\n# \"test_nn\": [\"test_doubletensor_avg_pool3d\", \"test_share_memory\", \"test_hook_requires_grad\"],\n# ...\n# }\n# then for test_nn.py, we would ONLY run test_doubletensor_avg_pool3d, test_share_memory, and test_hook_requires_grad.\nSPECIFIED_TEST_CASES_DICT: Dict[str, List[str]] = {}\n\n# The file from which the SPECIFIED_TEST_CASES_DICT will be filled, a CSV of test cases that would be run when\n# options.run_specified_test_cases is enabled.\nSPECIFIED_TEST_CASES_FILE: str = \".pytorch_specified_test_cases.csv\"\n\n\ndef print_to_stderr(message):\n print(message, file=sys.stderr)\n\n\ndef get_test_case_args(test_module, using_pytest) -> List[str]:\n args = []\n # if test_module not specified or specified with '__all__' then run all tests\n if (\n test_module not in SPECIFIED_TEST_CASES_DICT\n or \"__all__\" in SPECIFIED_TEST_CASES_DICT[test_module]\n ):\n return args\n\n if using_pytest:\n args.append(\"-k\")\n args.append(\" or \".join(SPECIFIED_TEST_CASES_DICT[test_module]))\n else:\n for test in SPECIFIED_TEST_CASES_DICT[test_module]:\n args.append(\"-k\")\n args.append(test)\n\n return args\n\n\ndef get_executable_command(options, allow_pytest, disable_coverage=False):\n if options.coverage and not disable_coverage:\n executable = [\"coverage\", \"run\", \"--parallel-mode\", \"--source=torch\"]\n else:\n executable = [sys.executable]\n if options.pytest:\n if allow_pytest:\n executable += [\"-m\", \"pytest\"]\n else:\n print_to_stderr(\n \"Pytest cannot be used for this test. Falling back to unittest.\"\n )\n return executable\n\n\ndef run_test(\n test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None\n):\n unittest_args = options.additional_unittest_args.copy()\n if options.verbose:\n unittest_args.append(f'-{\"v\"*options.verbose}') # in case of pytest\n if test_module in RUN_PARALLEL_BLOCKLIST:\n unittest_args = [\n arg for arg in unittest_args if not arg.startswith(\"--run-parallel\")\n ]\n if extra_unittest_args:\n assert isinstance(extra_unittest_args, list)\n unittest_args.extend(extra_unittest_args)\n\n # If using pytest, replace -f with equivalent -x\n if options.pytest:\n unittest_args = [arg if arg != \"-f\" else \"-x\" for arg in unittest_args]\n elif IS_IN_CI:\n # use the downloaded test cases configuration, not supported in pytest\n unittest_args.extend([\"--import-slow-tests\", \"--import-disabled-tests\"])\n\n # Multiprocessing related tests cannot run with coverage.\n # Tracking issue: https://github.com/pytorch/pytorch/issues/50661\n disable_coverage = (\n sys.platform == \"win32\" and test_module in WINDOWS_COVERAGE_BLOCKLIST\n )\n\n # Extra arguments are not supported with pytest\n executable = get_executable_command(\n options, allow_pytest=not extra_unittest_args, disable_coverage=disable_coverage\n )\n\n # TODO: move this logic into common_utils.py instead of passing in \"-k\" individually\n # The following logic for running specified tests will only run for non-distributed tests, as those are dispatched\n # to test_distributed and not run_test (this function)\n if options.run_specified_test_cases:\n unittest_args.extend(get_test_case_args(test_module, \"pytest\" in executable))\n\n # Can't call `python -m unittest test_*` here because it doesn't run code\n # in `if __name__ == '__main__': `. So call `python test_*.py` instead.\n argv = [test_module + \".py\"] + unittest_args\n\n command = (launcher_cmd or []) + executable + argv\n print_to_stderr(\"Executing {} ... [{}]\".format(command, datetime.now()))\n return shell(command, test_directory)\n\n\ndef test_cuda_primary_ctx(test_module, test_directory, options):\n return run_test(\n test_module, test_directory, options, extra_unittest_args=[\"--subprocess\"]\n )\n\nrun_test_with_subprocess = functools.partial(run_test, extra_unittest_args=[\"--subprocess\"])\n\n\ndef get_run_test_with_subprocess_fn():\n return lambda test_module, test_directory, options: run_test_with_subprocess(test_module, test_directory, options)\n\n\n\ndef _test_cpp_extensions_aot(test_directory, options, use_ninja):\n if use_ninja:\n try:\n cpp_extension.verify_ninja_availability()\n except RuntimeError:\n print(CPP_EXTENSIONS_ERROR)\n return 1\n\n # Wipe the build folder, if it exists already\n cpp_extensions_test_dir = os.path.join(test_directory, \"cpp_extensions\")\n cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, \"build\")\n if os.path.exists(cpp_extensions_test_build_dir):\n shutil.rmtree(cpp_extensions_test_build_dir)\n\n # Build the test cpp extensions modules\n shell_env = os.environ.copy()\n shell_env[\"USE_NINJA\"] = str(1 if use_ninja else 0)\n cmd = [sys.executable, \"setup.py\", \"install\", \"--root\", \"./install\"]\n return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)\n if return_code != 0:\n return return_code\n if sys.platform != \"win32\":\n return_code = shell(\n cmd,\n cwd=os.path.join(cpp_extensions_test_dir, \"no_python_abi_suffix_test\"),\n env=shell_env,\n )\n if return_code != 0:\n return return_code\n\n # \"install\" the test modules and run tests\n python_path = os.environ.get(\"PYTHONPATH\", \"\")\n from shutil import copyfile\n\n test_module = \"test_cpp_extensions_aot\" + (\"_ninja\" if use_ninja else \"_no_ninja\")\n copyfile(\n test_directory + \"/test_cpp_extensions_aot.py\",\n test_directory + \"/\" + test_module + \".py\",\n )\n try:\n cpp_extensions = os.path.join(test_directory, \"cpp_extensions\")\n install_directory = \"\"\n # install directory is the one that is named site-packages\n for root, directories, _ in os.walk(os.path.join(cpp_extensions, \"install\")):\n for directory in directories:\n if \"-packages\" in directory:\n install_directory = os.path.join(root, directory)\n\n assert install_directory, \"install_directory must not be empty\"\n os.environ[\"PYTHONPATH\"] = os.pathsep.join([install_directory, python_path])\n return run_test(test_module, test_directory, options)\n finally:\n os.environ[\"PYTHONPATH\"] = python_path\n if os.path.exists(test_directory + \"/\" + test_module + \".py\"):\n os.remove(test_directory + \"/\" + test_module + \".py\")\n\n\ndef test_cpp_extensions_aot_ninja(test_module, test_directory, options):\n return _test_cpp_extensions_aot(test_directory, options, use_ninja=True)\n\n\ndef test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):\n return _test_cpp_extensions_aot(test_directory, options, use_ninja=False)\n\n\ndef test_distributed(test_module, test_directory, options):\n # MPI tests are broken with Python-3.9\n mpi_available = subprocess.call(\n \"command -v mpiexec\", shell=True\n ) == 0 and sys.version_info < (3, 9)\n if options.verbose and not mpi_available:\n print_to_stderr(\"MPI not available -- MPI backend tests will be skipped\")\n config = DISTRIBUTED_TESTS_CONFIG\n for backend, env_vars in config.items():\n if sys.platform == \"win32\" and backend != \"gloo\":\n continue\n if backend == \"mpi\" and not mpi_available:\n continue\n for with_init_file in {True, False}:\n if sys.platform == \"win32\" and not with_init_file:\n continue\n tmp_dir = tempfile.mkdtemp()\n if options.verbose:\n init_str = \"with {} init_method\"\n with_init = init_str.format(\"file\" if with_init_file else \"env\")\n print_to_stderr(\n \"Running distributed tests for the {} backend {}\".format(\n backend, with_init\n )\n )\n old_environ = dict(os.environ)\n os.environ[\"TEMP_DIR\"] = tmp_dir\n os.environ[\"BACKEND\"] = backend\n os.environ[\"INIT_METHOD\"] = \"env://\"\n os.environ.update(env_vars)\n if with_init_file:\n if test_module == \"test_distributed_spawn\":\n init_method = f\"{FILE_SCHEMA}{tmp_dir}/\"\n else:\n init_method = f\"{FILE_SCHEMA}{tmp_dir}/shared_init_file\"\n os.environ[\"INIT_METHOD\"] = init_method\n try:\n os.mkdir(os.path.join(tmp_dir, \"barrier\"))\n os.mkdir(os.path.join(tmp_dir, \"test_dir\"))\n if backend == \"mpi\":\n # test mpiexec for --noprefix option\n with open(os.devnull, \"w\") as devnull:\n allowrunasroot_opt = (\n \"--allow-run-as-root\"\n if subprocess.call(\n 'mpiexec --allow-run-as-root -n 1 bash -c \"\"',\n shell=True,\n stdout=devnull,\n stderr=subprocess.STDOUT,\n )\n == 0\n else \"\"\n )\n noprefix_opt = (\n \"--noprefix\"\n if subprocess.call(\n f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c \"\"',\n shell=True,\n stdout=devnull,\n stderr=subprocess.STDOUT,\n )\n == 0\n else \"\"\n )\n\n mpiexec = [\"mpiexec\", \"-n\", \"3\", noprefix_opt, allowrunasroot_opt]\n\n return_code = run_test(\n test_module, test_directory, options, launcher_cmd=mpiexec\n )\n else:\n return_code = run_test(test_module, test_directory, options, extra_unittest_args=[\"--subprocess\"])\n if return_code != 0:\n return return_code\n finally:\n shutil.rmtree(tmp_dir)\n os.environ.clear()\n os.environ.update(old_environ)\n return 0\n\n\nCUSTOM_HANDLERS = {\n \"test_cuda_primary_ctx\": test_cuda_primary_ctx,\n \"test_cpp_extensions_aot_no_ninja\": test_cpp_extensions_aot_no_ninja,\n \"test_cpp_extensions_aot_ninja\": test_cpp_extensions_aot_ninja,\n \"distributed/test_distributed_spawn\": test_distributed,\n \"distributed/test_c10d_nccl\": get_run_test_with_subprocess_fn(),\n \"distributed/test_c10d_gloo\": get_run_test_with_subprocess_fn(),\n \"distributed/test_c10d_common\": get_run_test_with_subprocess_fn(),\n \"distributed/test_c10d_spawn_gloo\": get_run_test_with_subprocess_fn(),\n \"distributed/test_c10d_spawn_nccl\": get_run_test_with_subprocess_fn(),\n \"distributed/test_store\": get_run_test_with_subprocess_fn(),\n \"distributed/test_pg_wrapper\": get_run_test_with_subprocess_fn(),\n \"distributed/rpc/test_faulty_agent\": get_run_test_with_subprocess_fn(),\n \"distributed/rpc/test_tensorpipe_agent\": get_run_test_with_subprocess_fn(),\n \"distributed/rpc/test_share_memory\": get_run_test_with_subprocess_fn(),\n \"distributed/rpc/cuda/test_tensorpipe_agent\": get_run_test_with_subprocess_fn(),\n}\n\ndef parse_test_module(test):\n return test.split(\".\")[0]\n\n\nclass TestChoices(list):\n def __init__(self, *args, **kwargs):\n super(TestChoices, self).__init__(args[0])\n\n def __contains__(self, item):\n return list.__contains__(self, parse_test_module(item))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Run the PyTorch unit test suite\",\n epilog=\"where TESTS is any of: {}\".format(\", \".join(TESTS)),\n formatter_class=argparse.RawTextHelpFormatter,\n parents=[common_parser]\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"print verbose information and test-by-test results\",\n )\n parser.add_argument(\"--jit\", \"--jit\", action=\"store_true\", help=\"run all jit tests\")\n parser.add_argument(\n \"--distributed-tests\",\n \"--distributed-tests\",\n action=\"store_true\",\n help=\"run all distributed tests\",\n )\n parser.add_argument(\n \"-core\",\n \"--core\",\n action=\"store_true\",\n help=\"Only run core tests, or tests that validate PyTorch's ops, modules,\"\n \"and autograd. They are defined by CORE_TEST_LIST.\"\n )\n parser.add_argument(\n \"-pt\",\n \"--pytest\",\n action=\"store_true\",\n help=\"If true, use `pytest` to execute the tests. E.g., this runs \"\n \"TestTorch with pytest in verbose and coverage mode: \"\n \"python run_test.py -vci torch -pt\",\n )\n parser.add_argument(\n \"-c\",\n \"--coverage\",\n action=\"store_true\",\n help=\"enable coverage\",\n default=PYTORCH_COLLECT_COVERAGE,\n )\n parser.add_argument(\n \"-i\",\n \"--include\",\n nargs=\"+\",\n choices=TestChoices(TESTS),\n default=TESTS,\n metavar=\"TESTS\",\n help=\"select a set of tests to include (defaults to ALL tests).\"\n \" tests must be a part of the TESTS list defined in run_test.py\",\n )\n parser.add_argument(\n \"-x\",\n \"--exclude\",\n nargs=\"+\",\n choices=TESTS,\n metavar=\"TESTS\",\n default=[],\n help=\"select a set of tests to exclude\",\n )\n parser.add_argument(\n \"-f\",\n \"--first\",\n choices=TESTS,\n metavar=\"TESTS\",\n help=\"select the test to start from (excludes previous tests)\",\n )\n parser.add_argument(\n \"-l\",\n \"--last\",\n choices=TESTS,\n metavar=\"TESTS\",\n help=\"select the last test to run (excludes following tests)\",\n )\n parser.add_argument(\n \"--bring-to-front\",\n nargs=\"+\",\n choices=TestChoices(TESTS),\n default=[],\n metavar=\"TESTS\",\n help=\"select a set of tests to run first. This can be used in situations\"\n \" where you want to run all tests, but care more about some set, \"\n \"e.g. after making a change to a specific component\",\n )\n parser.add_argument(\n \"--ignore-win-blocklist\",\n action=\"store_true\",\n help=\"always run blocklisted windows tests\",\n )\n # NS: Disable target determination until it can be made more reliable\n # parser.add_argument(\n # \"--determine-from\",\n # help=\"File of affected source filenames to determine which tests to run.\",\n # )\n parser.add_argument(\n \"--continue-through-error\",\n action=\"store_true\",\n help=\"Runs the full test suite despite one of the tests failing\",\n default=strtobool(os.environ.get(\"CONTINUE_THROUGH_ERROR\", \"False\")),\n )\n parser.add_argument(\n \"additional_unittest_args\",\n nargs=\"*\",\n help=\"additional arguments passed through to unittest, e.g., \"\n \"python run_test.py -i sparse -- TestSparse.test_factory_size_check\",\n )\n parser.add_argument(\n \"--export-past-test-times\",\n nargs=\"?\",\n type=str,\n const=TEST_TIMES_FILE,\n help=\"dumps test times from previous S3 stats into a file, format JSON\",\n )\n parser.add_argument(\n \"--shard\",\n nargs=2,\n type=int,\n help=\"runs a shard of the tests (taking into account other selections), e.g., \"\n \"--shard 2 3 will break up the selected tests into 3 shards and run the tests \"\n \"in the 2nd shard (the first number should not exceed the second)\",\n )\n parser.add_argument(\n \"--exclude-jit-executor\",\n action=\"store_true\",\n help=\"exclude tests that are run for a specific jit config\",\n )\n parser.add_argument(\n \"--exclude-distributed-tests\",\n action=\"store_true\",\n help=\"exclude distributed tests\",\n )\n parser.add_argument(\n \"--run-specified-test-cases\",\n nargs=\"?\",\n type=str,\n const=SPECIFIED_TEST_CASES_FILE,\n help=\"load specified test cases file dumped from previous OSS CI stats, format CSV. \"\n \" If all test cases should run for a <test_module> please add a single row: \\n\"\n \" test_filename,test_case_name\\n\"\n \" ...\\n\"\n \" <test_module>,__all__\\n\"\n \" ...\\n\"\n 'how we use the stats will be based on option \"--use-specified-test-cases-by\".',\n )\n parser.add_argument(\n \"--use-specified-test-cases-by\",\n type=str,\n choices=[\"include\", \"bring-to-front\"],\n default=\"include\",\n help='used together with option \"--run-specified-test-cases\". When specified test case '\n \"file is set, this option allows the user to control whether to only run the specified test \"\n \"modules or to simply bring the specified modules to front and also run the remaining \"\n \"modules. Note: regardless of this option, we will only run the specified test cases \"\n \" within a specified test module. For unspecified test modules with the bring-to-front \"\n \"option, all test cases will be run, as one may expect.\",\n )\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Only list the test that will run.\",\n )\n return parser.parse_args()\n\n\ndef find_test_index(test, selected_tests, find_last_index=False):\n \"\"\"Find the index of the first or last occurrence of a given test/test module in the list of selected tests.\n\n This function is used to determine the indices when slicing the list of selected tests when\n ``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.\n\n :attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests\n as part of the same test module, e.g.:\n\n ```\n selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',\n 'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']\n ```\n\n If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.\n If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.\n\n Args:\n test (str): Name of test to lookup\n selected_tests (list): List of tests\n find_last_index (bool, optional): should we lookup the index of first or last\n occurrence (first is default)\n\n Returns:\n index of the first or last occurrence of the given test\n \"\"\"\n idx = 0\n found_idx = -1\n for t in selected_tests:\n if t.startswith(test):\n found_idx = idx\n if not find_last_index:\n break\n idx += 1\n return found_idx\n\n\ndef exclude_tests(exclude_list, selected_tests, exclude_message=None):\n for exclude_test in exclude_list:\n tests_copy = selected_tests[:]\n for test in tests_copy:\n if test.startswith(exclude_test):\n if exclude_message is not None:\n print_to_stderr(\"Excluding {} {}\".format(test, exclude_message))\n selected_tests.remove(test)\n return selected_tests\n\n\ndef get_selected_tests(options):\n # First make sure run specific test cases options are processed.\n if options.run_specified_test_cases:\n if options.use_specified_test_cases_by == \"include\":\n options.include = list(SPECIFIED_TEST_CASES_DICT.keys())\n elif options.use_specified_test_cases_by == \"bring-to-front\":\n options.bring_to_front = list(SPECIFIED_TEST_CASES_DICT.keys())\n\n selected_tests = options.include\n\n # filter if there's JIT only and distributed only test options\n if options.jit:\n selected_tests = list(\n filter(lambda test_name: \"jit\" in test_name, selected_tests)\n )\n\n if options.distributed_tests:\n selected_tests = list(\n filter(lambda test_name: test_name in DISTRIBUTED_TESTS, selected_tests)\n )\n\n # Filter to only run core tests when --core option is specified\n if options.core:\n selected_tests = list(\n filter(lambda test_name: test_name in CORE_TEST_LIST, selected_tests)\n )\n\n # process reordering\n if options.bring_to_front:\n to_front = set(options.bring_to_front)\n selected_tests = options.bring_to_front + list(\n filter(lambda name: name not in to_front, selected_tests)\n )\n\n if options.first:\n first_index = find_test_index(options.first, selected_tests)\n selected_tests = selected_tests[first_index:]\n\n if options.last:\n last_index = find_test_index(options.last, selected_tests, find_last_index=True)\n selected_tests = selected_tests[: last_index + 1]\n\n # process exclusion\n if options.exclude_jit_executor:\n options.exclude.extend(JIT_EXECUTOR_TESTS)\n\n if options.exclude_distributed_tests:\n options.exclude.extend(DISTRIBUTED_TESTS)\n\n # these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375\n if torch.version.cuda is not None and LooseVersion(torch.version.cuda) == \"11.6\":\n options.exclude.extend([\"distributions/test_constraints\"])\n\n selected_tests = exclude_tests(options.exclude, selected_tests)\n\n if sys.platform == \"win32\" and not options.ignore_win_blocklist:\n target_arch = os.environ.get(\"VSCMD_ARG_TGT_ARCH\")\n if target_arch != \"x64\":\n WINDOWS_BLOCKLIST.append(\"cpp_extensions_aot_no_ninja\")\n WINDOWS_BLOCKLIST.append(\"cpp_extensions_aot_ninja\")\n WINDOWS_BLOCKLIST.append(\"cpp_extensions_jit\")\n WINDOWS_BLOCKLIST.append(\"jit\")\n WINDOWS_BLOCKLIST.append(\"jit_fuser\")\n\n # This is exception that's caused by this issue https://github.com/pytorch/pytorch/issues/69460\n # This below code should be removed once this issue is solved\n if torch.version.cuda is not None and LooseVersion(torch.version.cuda) >= \"11.5\":\n WINDOWS_BLOCKLIST.append(\"test_cpp_extensions_aot\")\n WINDOWS_BLOCKLIST.append(\"test_cpp_extensions_aot_ninja\")\n WINDOWS_BLOCKLIST.append(\"test_cpp_extensions_aot_no_ninja\")\n\n selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, \"on Windows\")\n\n elif TEST_WITH_ROCM:\n selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, \"on ROCm\")\n\n # sharding\n if options.shard:\n assert len(options.shard) == 2, \"Unexpected shard format\"\n assert min(options.shard) > 0, \"Shards must be positive numbers\"\n which_shard, num_shards = options.shard\n assert (\n which_shard <= num_shards\n ), \"Selected shard must be less than or equal to total number of shards\"\n assert num_shards <= len(\n selected_tests\n ), f\"Number of shards must be less than {len(selected_tests)}\"\n # TODO: fix this to use test_times_filename, but currently this is not working\n # because setting the export arg immeidately halts the test execution.\n selected_tests = get_shard_based_on_S3(\n which_shard, num_shards, selected_tests, TEST_TIMES_FILE\n )\n\n # skip all distributed tests if distributed package is not available.\n if not dist.is_available():\n selected_tests = exclude_tests(DISTRIBUTED_TESTS, selected_tests,\n \"PyTorch is built without distributed support.\")\n\n # skip tests that require LAPACK when it's not available\n if not torch._C.has_lapack:\n selected_tests = exclude_tests(TESTS_REQUIRING_LAPACK, selected_tests,\n \"PyTorch is built without LAPACK support.\")\n\n return selected_tests\n\n\ndef run_test_module(test: str, test_directory: str, options) -> Optional[str]:\n test_module = parse_test_module(test)\n\n # Printing the date here can help diagnose which tests are slow\n print_to_stderr(\"Running {} ... [{}]\".format(test, datetime.now()))\n handler = CUSTOM_HANDLERS.get(test_module, run_test)\n return_code = handler(test_module, test_directory, options)\n assert isinstance(return_code, int) and not isinstance(\n return_code, bool\n ), \"Return code should be an integer\"\n if return_code == 0:\n return None\n\n message = f\"{test} failed!\"\n if return_code < 0:\n # subprocess.Popen returns the child process' exit signal as\n # return code -N, where N is the signal number.\n signal_name = SIGNALS_TO_NAMES_DICT[-return_code]\n message += f\" Received signal: {signal_name}\"\n return message\n\n\ndef main():\n options = parse_args()\n\n # TODO: move this export & download function in tools/ folder\n test_times_filename = options.export_past_test_times\n if test_times_filename:\n print(\n f\"Exporting past test times from S3 to {test_times_filename}, no tests will be run.\"\n )\n export_S3_test_times(test_times_filename)\n return\n\n specified_test_cases_filename = options.run_specified_test_cases\n if specified_test_cases_filename:\n print(\n f\"Loading specified test cases to run from {specified_test_cases_filename}.\"\n )\n global SPECIFIED_TEST_CASES_DICT\n SPECIFIED_TEST_CASES_DICT = get_specified_test_cases(\n specified_test_cases_filename, TESTS\n )\n\n test_directory = str(REPO_ROOT / \"test\")\n selected_tests = get_selected_tests(options)\n\n if options.verbose:\n print_to_stderr(\"Selected tests:\\n {}\".format(\"\\n \".join(selected_tests)))\n\n if options.dry_run:\n return\n\n if options.coverage and not PYTORCH_COLLECT_COVERAGE:\n shell([\"coverage\", \"erase\"])\n\n # NS: Disable target determination until it can be made more reliable\n # if options.determine_from is not None and os.path.exists(options.determine_from):\n # slow_tests = get_slow_tests_based_on_S3(\n # TESTS, TARGET_DET_LIST, SLOW_TEST_THRESHOLD\n # )\n # print_to_stderr(\n # \"Added the following tests to target_det tests as calculated based on S3:\"\n # )\n # print_to_stderr(slow_tests)\n # with open(options.determine_from, \"r\") as fh:\n # touched_files = [\n # os.path.normpath(name.strip())\n # for name in fh.read().split(\"\\n\")\n # if len(name.strip()) > 0\n # ]\n # # HACK: Ensure the 'test' paths can be traversed by Modulefinder\n # sys.path.append(test_directory)\n # selected_tests = [\n # test\n # for test in selected_tests\n # if should_run_test(\n # TARGET_DET_LIST + slow_tests, test, touched_files, options\n # )\n # ]\n # sys.path.remove(test_directory)\n\n if IS_IN_CI:\n selected_tests = get_reordered_tests(\n selected_tests, ENABLE_PR_HISTORY_REORDERING\n )\n # downloading test cases configuration to local environment\n get_test_case_configs(dirpath=test_directory)\n\n has_failed = False\n failure_messages = []\n try:\n for test in selected_tests:\n options_clone = copy.deepcopy(options)\n if test in USE_PYTEST_LIST:\n options_clone.pytest = True\n err_message = run_test_module(test, test_directory, options_clone)\n if err_message is None:\n continue\n has_failed = True\n failure_messages.append(err_message)\n if not options_clone.continue_through_error:\n raise RuntimeError(err_message)\n print_to_stderr(err_message)\n finally:\n if options.coverage:\n from coverage import Coverage\n\n with set_cwd(test_directory):\n cov = Coverage()\n if PYTORCH_COLLECT_COVERAGE:\n cov.load()\n cov.combine(strict=False)\n cov.save()\n if not PYTORCH_COLLECT_COVERAGE:\n cov.html_report()\n\n if options.continue_through_error and has_failed:\n for err in failure_messages:\n print_to_stderr(err)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.distributed.is_nccl_available", "torch.utils.cpp_extension.verify_ninja_availability", "torch.testing._internal.common_utils.shell", "torch.distributed.is_available", "torch.distributed.is_gloo_available", "torch.cuda.device_count", "torch.distributed.is_mpi_available", "torch.testing._internal.common_utils.set_cwd" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
danielkentwood/fireTS
[ "3b00ae932e13997a0d069515273c09fc24e0593d" ]
[ "fireTS/core.py" ]
[ "import numpy as np\r\nfrom sklearn.base import BaseEstimator, RegressorMixin\r\nfrom sklearn.utils.validation import check_X_y\r\nfrom fireTS.utils import shift, MetaLagFeatureProcessor\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\n\r\nclass TimeSeriesRegressor(BaseEstimator, RegressorMixin):\r\n \"\"\"\r\n TimeSeriesRegressor creates a time series model based on a\r\n general-purpose regression model defined in base_estimator.\r\n base_estimator must be a model which implements the scikit-learn APIs.\r\n \"\"\"\r\n\r\n def __init__(self, base_estimator, **base_params):\r\n self.base_estimator = base_estimator.set_params(**base_params)\r\n\r\n def set_params(self, **params):\r\n for param, value in params.items():\r\n if param in self.get_params():\r\n super(TimeSeriesRegressor, self).set_params(**{param: value})\r\n else:\r\n self.base_estimator.set_params(**{param: value})\r\n return self\r\n\r\n\r\nclass GeneralAutoRegressor(TimeSeriesRegressor, RegressorMixin):\r\n r\"\"\"\r\n The general auto regression model can be written in the following form:\r\n\r\n .. math::\r\n y(t + k) &=& f(y(t), ..., y(t-p+1), \\\\\r\n & & x_1(t - d_1), ..., x_1(t-d_1-q_1+1), \\\\\r\n & & ..., x_m(t - d_1), ..., x_m(t - d_m - q_m + 1)) + e(t)\r\n :label: gar\r\n\r\n :param object base_estimator: an estimator object that implements the\r\n scikit-learn API (fit, and predict). The\r\n estimator will be used to fit the function\r\n :math:`f` in equation :eq:`gar`.\r\n :param int auto_order: the autoregression order :math:`p` in equation\r\n :eq:`gar`.\r\n :param list exog_order: the exogenous input order, a list of integers\r\n representing the order for each exogenous input,\r\n i.e. :math:`[q_1, q_2, ..., q_m]` in equation\r\n :eq:`gar`.\r\n :param list exog_delay: the delays of the exogenous inputs, a list of\r\n integers representing the delay of each exogenous\r\n input, i.e. :math:`[d_1, d_2, ..., d_m]` in\r\n equation :eq:`gar`. By default, all the delays are\r\n set to 0.\r\n :param int pred_step: the prediction step :math:`k` in equation :eq:`gar`.\r\n By default, it is set to 1.\r\n :param dict base_params: other keyword arguments for base_estimator.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n base_estimator,\r\n auto_order,\r\n exog_order,\r\n exog_delay=None,\r\n pred_step=1,\r\n **base_params):\r\n super(GeneralAutoRegressor, self).__init__(base_estimator,\r\n **base_params)\r\n self.auto_order = auto_order\r\n self.exog_order = exog_order\r\n if exog_delay is None:\r\n exog_delay = [0] * len(exog_order)\r\n if len(exog_delay) != len(exog_order):\r\n raise ValueError(\r\n 'The length of exog_delay must be the same as the length of exog_order.'\r\n )\r\n self.exog_delay = exog_delay\r\n self.num_exog_inputs = len(exog_order)\r\n self.pred_step = pred_step\r\n\r\n def fit(self, X, y, **params):\r\n \"\"\"\r\n Create lag features and fit the base_estimator.\r\n\r\n :param array-like X: exogenous input time series, shape = (n_samples,\r\n n_exog_inputs)\r\n :param array-like y: target time series to predict, shape = (n_samples)\r\n \"\"\"\r\n X, y = self._check_and_preprocess_X_y(X, y)\r\n features, target = self._preprocess_data(X, y)\r\n self.base_estimator.fit(features, target, **params)\r\n\r\n def _preprocess_data(self, X, y):\r\n \"\"\"\r\n Helper function to prepare the data for base_estimator.\r\n \"\"\"\r\n p = self._get_lag_feature_processor(X, y)\r\n features = p.generate_lag_features()\r\n target = shift(y, -self.pred_step)\r\n\r\n # Remove NaN introduced by shift\r\n all_data = np.concatenate([target.reshape(-1, 1), features], axis=1)\r\n mask = np.isnan(all_data).any(axis=1)\r\n features, target = features[~mask], target[~mask]\r\n return features, target\r\n\r\n def _get_lag_feature_processor(self, X, y):\r\n return MetaLagFeatureProcessor(X, y, self.auto_order, self.exog_order,\r\n self.exog_delay)\r\n\r\n def grid_search(self, X, y, para_grid, **params):\r\n \"\"\"\r\n Perform grid search on the base_estimator. The function first generates\r\n the lag features and predicting targets, and then calls\r\n ``GridSearchCV`` in scikit-learn package.\r\n\r\n :param array-like X: exogenous input time series, shape = (n_samples,\r\n n_exog_inputs)\r\n :param array-like y: target time series to predict, shape = (n_samples)\r\n :param dict para_grid: use the same format in ``GridSearchCV`` in\r\n scikit-learn package.\r\n :param dict params: other keyword arguments that can be passed into\r\n ``GridSearchCV`` in scikit-learn package.\r\n \"\"\"\r\n grid = GridSearchCV(self.base_estimator, para_grid, **params)\r\n X, y = self._check_and_preprocess_X_y(X, y)\r\n features, target = self._preprocess_data(X, y)\r\n grid.fit(features, target)\r\n self.set_params(**grid.best_params_)\r\n\r\n def _predictNA(self, Xdata):\r\n # Xdata contains nan introduced by shift\r\n ypred = np.empty(Xdata.shape[0]) * np.nan\r\n mask = np.isnan(Xdata).any(axis=1)\r\n X2pred = Xdata[~mask]\r\n ypred[~mask] = self.base_estimator.predict(X2pred)\r\n return ypred\r\n\r\n def _check_and_preprocess_X_y(self, X, y):\r\n min_samples_required = max(self.auto_order, \r\n np.max(np.array(self.exog_delay) + np.array(self.exog_order))) - 1\r\n X, y = check_X_y(X, y, ensure_min_samples=min_samples_required)\r\n if len(self.exog_order) != X.shape[1]:\r\n raise ValueError(\r\n 'The number of columns of X must be the same as the length of exog_order.'\r\n )\r\n return X, y\r\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "numpy.isnan", "sklearn.utils.validation.check_X_y", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
havanagrawal/ml-from-scratch
[ "1f5aacc3bb8f831dadf5f27cf8d6fa867b4ec3ab" ]
[ "examples/spam_trainer.py" ]
[ "import os\nimport sys\n\nimport logging\nlogging.basicConfig(format='%(asctime)s %(levelname)-8s [%(process)d] %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')\n\nimport numpy as np\n\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score\n\n_curdir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(_curdir + \"/..\")\n\nfrom classifiers import FGMClassifier\nimport datasets\n\ndef train_spam_untuned(classifier, X_train, X_test, y_train, y_test):\n clf = FGMClassifier(classifier=classifier, lmbda=0.1, max_iter=20, learning_rate='adaptive', eta=1)\n return fit_predict(clf, X_train, X_test, y_train, y_test)\n\ndef train_spam_tuned(X_train, X_test, y_train, y_test):\n param_grid = {\n 'classifier': ['logistic', 'svm'],\n 'lmbda': np.linspace(0, 1, 3)\n }\n clf = GridSearchCV(FGMClassifier(max_iter=10, verbose=True), param_grid, verbose=2)\n\n return fit_predict(clf, X_train, X_test, y_train, y_test)\n\n\ndef fit_predict(clf, X_train, X_test, y_train, y_test):\n clf.fit(X_train, y_train)\n\n logging.info(\"Predicting...\")\n logging.info(\"Training accuracy: {}\".format(clf.score(X_train, y_train)))\n logging.info(\"Test accuracy: {}\".format(clf.score(X_test, y_test)))\n\n return clf\n\ndef main():\n logging.info(\"Loading data...\")\n X_train, X_test, y_train, y_test = datasets.load_spam(standardized=True, with_intercept=True)\n\n logging.info(\"Training logistic without tuning lambda...\")\n train_spam_untuned('logistic', X_train, X_test, y_train, y_test)\n\n logging.info(\"Training svm without tuning lambda...\")\n train_spam_untuned('svm', X_train, X_test, y_train, y_test)\n\n logging.info(\"Training with GridSearchCV...\")\n clf = train_spam_tuned(X_train, X_test, y_train, y_test)\n\n logging.info(\"Best params: {}\".format(clf.best_params_))\n logging.info(\"Best estimator: {}\".format(clf.best_estimator_))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
obulrdy6881/Drowsinss
[ "61cb9281d7dd22aee282b517e2fbf500f0ff9935" ]
[ "my_env/Lib/site-packages/sklearn/cluster/_birch.py" ]
[ "# Authors: Manoj Kumar <[email protected]>\r\n# Alexandre Gramfort <[email protected]>\r\n# Joel Nothman <[email protected]>\r\n# License: BSD 3 clause\r\n\r\nimport warnings\r\nimport numbers\r\nimport numpy as np\r\nfrom scipy import sparse\r\nfrom math import sqrt\r\n\r\nfrom ..metrics import pairwise_distances_argmin\r\nfrom ..metrics.pairwise import euclidean_distances\r\nfrom ..base import TransformerMixin, ClusterMixin, BaseEstimator\r\nfrom ..utils import check_array\r\nfrom ..utils.extmath import row_norms\r\nfrom ..utils.validation import check_is_fitted, _deprecate_positional_args\r\nfrom ..exceptions import ConvergenceWarning\r\nfrom . import AgglomerativeClustering\r\n\r\n\r\ndef _iterate_sparse_X(X):\r\n \"\"\"This little hack returns a densified row when iterating over a sparse\r\n matrix, instead of constructing a sparse matrix for every row that is\r\n expensive.\r\n \"\"\"\r\n n_samples = X.shape[0]\r\n X_indices = X.indices\r\n X_data = X.data\r\n X_indptr = X.indptr\r\n\r\n for i in range(n_samples):\r\n row = np.zeros(X.shape[1])\r\n startptr, endptr = X_indptr[i], X_indptr[i + 1]\r\n nonzero_indices = X_indices[startptr:endptr]\r\n row[nonzero_indices] = X_data[startptr:endptr]\r\n yield row\r\n\r\n\r\ndef _split_node(node, threshold, branching_factor):\r\n \"\"\"The node has to be split if there is no place for a new subcluster\r\n in the node.\r\n 1. Two empty nodes and two empty subclusters are initialized.\r\n 2. The pair of distant subclusters are found.\r\n 3. The properties of the empty subclusters and nodes are updated\r\n according to the nearest distance between the subclusters to the\r\n pair of distant subclusters.\r\n 4. The two nodes are set as children to the two subclusters.\r\n \"\"\"\r\n new_subcluster1 = _CFSubcluster()\r\n new_subcluster2 = _CFSubcluster()\r\n new_node1 = _CFNode(\r\n threshold=threshold, branching_factor=branching_factor,\r\n is_leaf=node.is_leaf,\r\n n_features=node.n_features)\r\n new_node2 = _CFNode(\r\n threshold=threshold, branching_factor=branching_factor,\r\n is_leaf=node.is_leaf,\r\n n_features=node.n_features)\r\n new_subcluster1.child_ = new_node1\r\n new_subcluster2.child_ = new_node2\r\n\r\n if node.is_leaf:\r\n if node.prev_leaf_ is not None:\r\n node.prev_leaf_.next_leaf_ = new_node1\r\n new_node1.prev_leaf_ = node.prev_leaf_\r\n new_node1.next_leaf_ = new_node2\r\n new_node2.prev_leaf_ = new_node1\r\n new_node2.next_leaf_ = node.next_leaf_\r\n if node.next_leaf_ is not None:\r\n node.next_leaf_.prev_leaf_ = new_node2\r\n\r\n dist = euclidean_distances(\r\n node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)\r\n n_clusters = dist.shape[0]\r\n\r\n farthest_idx = np.unravel_index(\r\n dist.argmax(), (n_clusters, n_clusters))\r\n node1_dist, node2_dist = dist[(farthest_idx,)]\r\n\r\n node1_closer = node1_dist < node2_dist\r\n for idx, subcluster in enumerate(node.subclusters_):\r\n if node1_closer[idx]:\r\n new_node1.append_subcluster(subcluster)\r\n new_subcluster1.update(subcluster)\r\n else:\r\n new_node2.append_subcluster(subcluster)\r\n new_subcluster2.update(subcluster)\r\n return new_subcluster1, new_subcluster2\r\n\r\n\r\nclass _CFNode:\r\n \"\"\"Each node in a CFTree is called a CFNode.\r\n\r\n The CFNode can have a maximum of branching_factor\r\n number of CFSubclusters.\r\n\r\n Parameters\r\n ----------\r\n threshold : float\r\n Threshold needed for a new subcluster to enter a CFSubcluster.\r\n\r\n branching_factor : int\r\n Maximum number of CF subclusters in each node.\r\n\r\n is_leaf : bool\r\n We need to know if the CFNode is a leaf or not, in order to\r\n retrieve the final subclusters.\r\n\r\n n_features : int\r\n The number of features.\r\n\r\n Attributes\r\n ----------\r\n subclusters_ : list\r\n List of subclusters for a particular CFNode.\r\n\r\n prev_leaf_ : _CFNode\r\n Useful only if is_leaf is True.\r\n\r\n next_leaf_ : _CFNode\r\n next_leaf. Useful only if is_leaf is True.\r\n the final subclusters.\r\n\r\n init_centroids_ : ndarray of shape (branching_factor + 1, n_features)\r\n Manipulate ``init_centroids_`` throughout rather than centroids_ since\r\n the centroids are just a view of the ``init_centroids_`` .\r\n\r\n init_sq_norm_ : ndarray of shape (branching_factor + 1,)\r\n manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.\r\n\r\n centroids_ : ndarray of shape (branching_factor + 1, n_features)\r\n View of ``init_centroids_``.\r\n\r\n squared_norm_ : ndarray of shape (branching_factor + 1,)\r\n View of ``init_sq_norm_``.\r\n\r\n \"\"\"\r\n def __init__(self, *, threshold, branching_factor, is_leaf, n_features):\r\n self.threshold = threshold\r\n self.branching_factor = branching_factor\r\n self.is_leaf = is_leaf\r\n self.n_features = n_features\r\n\r\n # The list of subclusters, centroids and squared norms\r\n # to manipulate throughout.\r\n self.subclusters_ = []\r\n self.init_centroids_ = np.zeros((branching_factor + 1, n_features))\r\n self.init_sq_norm_ = np.zeros((branching_factor + 1))\r\n self.squared_norm_ = []\r\n self.prev_leaf_ = None\r\n self.next_leaf_ = None\r\n\r\n def append_subcluster(self, subcluster):\r\n n_samples = len(self.subclusters_)\r\n self.subclusters_.append(subcluster)\r\n self.init_centroids_[n_samples] = subcluster.centroid_\r\n self.init_sq_norm_[n_samples] = subcluster.sq_norm_\r\n\r\n # Keep centroids and squared norm as views. In this way\r\n # if we change init_centroids and init_sq_norm_, it is\r\n # sufficient,\r\n self.centroids_ = self.init_centroids_[:n_samples + 1, :]\r\n self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]\r\n\r\n def update_split_subclusters(self, subcluster,\r\n new_subcluster1, new_subcluster2):\r\n \"\"\"Remove a subcluster from a node and update it with the\r\n split subclusters.\r\n \"\"\"\r\n ind = self.subclusters_.index(subcluster)\r\n self.subclusters_[ind] = new_subcluster1\r\n self.init_centroids_[ind] = new_subcluster1.centroid_\r\n self.init_sq_norm_[ind] = new_subcluster1.sq_norm_\r\n self.append_subcluster(new_subcluster2)\r\n\r\n def insert_cf_subcluster(self, subcluster):\r\n \"\"\"Insert a new subcluster into the node.\"\"\"\r\n if not self.subclusters_:\r\n self.append_subcluster(subcluster)\r\n return False\r\n\r\n threshold = self.threshold\r\n branching_factor = self.branching_factor\r\n # We need to find the closest subcluster among all the\r\n # subclusters so that we can insert our new subcluster.\r\n dist_matrix = np.dot(self.centroids_, subcluster.centroid_)\r\n dist_matrix *= -2.\r\n dist_matrix += self.squared_norm_\r\n closest_index = np.argmin(dist_matrix)\r\n closest_subcluster = self.subclusters_[closest_index]\r\n\r\n # If the subcluster has a child, we need a recursive strategy.\r\n if closest_subcluster.child_ is not None:\r\n split_child = closest_subcluster.child_.insert_cf_subcluster(\r\n subcluster)\r\n\r\n if not split_child:\r\n # If it is determined that the child need not be split, we\r\n # can just update the closest_subcluster\r\n closest_subcluster.update(subcluster)\r\n self.init_centroids_[closest_index] = \\\r\n self.subclusters_[closest_index].centroid_\r\n self.init_sq_norm_[closest_index] = \\\r\n self.subclusters_[closest_index].sq_norm_\r\n return False\r\n\r\n # things not too good. we need to redistribute the subclusters in\r\n # our child node, and add a new subcluster in the parent\r\n # subcluster to accommodate the new child.\r\n else:\r\n new_subcluster1, new_subcluster2 = _split_node(\r\n closest_subcluster.child_, threshold, branching_factor)\r\n self.update_split_subclusters(\r\n closest_subcluster, new_subcluster1, new_subcluster2)\r\n\r\n if len(self.subclusters_) > self.branching_factor:\r\n return True\r\n return False\r\n\r\n # good to go!\r\n else:\r\n merged = closest_subcluster.merge_subcluster(\r\n subcluster, self.threshold)\r\n if merged:\r\n self.init_centroids_[closest_index] = \\\r\n closest_subcluster.centroid_\r\n self.init_sq_norm_[closest_index] = \\\r\n closest_subcluster.sq_norm_\r\n return False\r\n\r\n # not close to any other subclusters, and we still\r\n # have space, so add.\r\n elif len(self.subclusters_) < self.branching_factor:\r\n self.append_subcluster(subcluster)\r\n return False\r\n\r\n # We do not have enough space nor is it closer to an\r\n # other subcluster. We need to split.\r\n else:\r\n self.append_subcluster(subcluster)\r\n return True\r\n\r\n\r\nclass _CFSubcluster:\r\n \"\"\"Each subcluster in a CFNode is called a CFSubcluster.\r\n\r\n A CFSubcluster can have a CFNode has its child.\r\n\r\n Parameters\r\n ----------\r\n linear_sum : ndarray of shape (n_features,), default=None\r\n Sample. This is kept optional to allow initialization of empty\r\n subclusters.\r\n\r\n Attributes\r\n ----------\r\n n_samples_ : int\r\n Number of samples that belong to each subcluster.\r\n\r\n linear_sum_ : ndarray\r\n Linear sum of all the samples in a subcluster. Prevents holding\r\n all sample data in memory.\r\n\r\n squared_sum_ : float\r\n Sum of the squared l2 norms of all samples belonging to a subcluster.\r\n\r\n centroid_ : ndarray of shape (branching_factor + 1, n_features)\r\n Centroid of the subcluster. Prevent recomputing of centroids when\r\n ``CFNode.centroids_`` is called.\r\n\r\n child_ : _CFNode\r\n Child Node of the subcluster. Once a given _CFNode is set as the child\r\n of the _CFNode, it is set to ``self.child_``.\r\n\r\n sq_norm_ : ndarray of shape (branching_factor + 1,)\r\n Squared norm of the subcluster. Used to prevent recomputing when\r\n pairwise minimum distances are computed.\r\n \"\"\"\r\n def __init__(self, *, linear_sum=None):\r\n if linear_sum is None:\r\n self.n_samples_ = 0\r\n self.squared_sum_ = 0.0\r\n self.centroid_ = self.linear_sum_ = 0\r\n else:\r\n self.n_samples_ = 1\r\n self.centroid_ = self.linear_sum_ = linear_sum\r\n self.squared_sum_ = self.sq_norm_ = np.dot(\r\n self.linear_sum_, self.linear_sum_)\r\n self.child_ = None\r\n\r\n def update(self, subcluster):\r\n self.n_samples_ += subcluster.n_samples_\r\n self.linear_sum_ += subcluster.linear_sum_\r\n self.squared_sum_ += subcluster.squared_sum_\r\n self.centroid_ = self.linear_sum_ / self.n_samples_\r\n self.sq_norm_ = np.dot(self.centroid_, self.centroid_)\r\n\r\n def merge_subcluster(self, nominee_cluster, threshold):\r\n \"\"\"Check if a cluster is worthy enough to be merged. If\r\n yes then merge.\r\n \"\"\"\r\n new_ss = self.squared_sum_ + nominee_cluster.squared_sum_\r\n new_ls = self.linear_sum_ + nominee_cluster.linear_sum_\r\n new_n = self.n_samples_ + nominee_cluster.n_samples_\r\n new_centroid = (1 / new_n) * new_ls\r\n new_norm = np.dot(new_centroid, new_centroid)\r\n dot_product = (-2 * new_n) * new_norm\r\n sq_radius = (new_ss + dot_product) / new_n + new_norm\r\n if sq_radius <= threshold ** 2:\r\n (self.n_samples_, self.linear_sum_, self.squared_sum_,\r\n self.centroid_, self.sq_norm_) = \\\r\n new_n, new_ls, new_ss, new_centroid, new_norm\r\n return True\r\n return False\r\n\r\n @property\r\n def radius(self):\r\n \"\"\"Return radius of the subcluster\"\"\"\r\n dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)\r\n return sqrt(\r\n ((self.squared_sum_ + dot_product) / self.n_samples_) +\r\n self.sq_norm_)\r\n\r\n\r\nclass Birch(ClusterMixin, TransformerMixin, BaseEstimator):\r\n \"\"\"Implements the Birch clustering algorithm.\r\n\r\n It is a memory-efficient, online-learning algorithm provided as an\r\n alternative to :class:`MiniBatchKMeans`. It constructs a tree\r\n data structure with the cluster centroids being read off the leaf.\r\n These can be either the final cluster centroids or can be provided as input\r\n to another clustering algorithm such as :class:`AgglomerativeClustering`.\r\n\r\n Read more in the :ref:`User Guide <birch>`.\r\n\r\n .. versionadded:: 0.16\r\n\r\n Parameters\r\n ----------\r\n threshold : float, default=0.5\r\n The radius of the subcluster obtained by merging a new sample and the\r\n closest subcluster should be lesser than the threshold. Otherwise a new\r\n subcluster is started. Setting this value to be very low promotes\r\n splitting and vice-versa.\r\n\r\n branching_factor : int, default=50\r\n Maximum number of CF subclusters in each node. If a new samples enters\r\n such that the number of subclusters exceed the branching_factor then\r\n that node is split into two nodes with the subclusters redistributed\r\n in each. The parent subcluster of that node is removed and two new\r\n subclusters are added as parents of the 2 split nodes.\r\n\r\n n_clusters : int, instance of sklearn.cluster model, default=3\r\n Number of clusters after the final clustering step, which treats the\r\n subclusters from the leaves as new samples.\r\n\r\n - `None` : the final clustering step is not performed and the\r\n subclusters are returned as they are.\r\n\r\n - :mod:`sklearn.cluster` Estimator : If a model is provided, the model\r\n is fit treating the subclusters as new samples and the initial data\r\n is mapped to the label of the closest subcluster.\r\n\r\n - `int` : the model fit is :class:`AgglomerativeClustering` with\r\n `n_clusters` set to be equal to the int.\r\n\r\n compute_labels : bool, default=True\r\n Whether or not to compute labels for each fit.\r\n\r\n copy : bool, default=True\r\n Whether or not to make a copy of the given data. If set to False,\r\n the initial data will be overwritten.\r\n\r\n Attributes\r\n ----------\r\n root_ : _CFNode\r\n Root of the CFTree.\r\n\r\n dummy_leaf_ : _CFNode\r\n Start pointer to all the leaves.\r\n\r\n subcluster_centers_ : ndarray\r\n Centroids of all subclusters read directly from the leaves.\r\n\r\n subcluster_labels_ : ndarray\r\n Labels assigned to the centroids of the subclusters after\r\n they are clustered globally.\r\n\r\n labels_ : ndarray of shape (n_samples,)\r\n Array of labels assigned to the input data.\r\n if partial_fit is used instead of fit, they are assigned to the\r\n last batch of data.\r\n\r\n See Also\r\n --------\r\n\r\n MiniBatchKMeans\r\n Alternative implementation that does incremental updates\r\n of the centers' positions using mini-batches.\r\n\r\n Notes\r\n -----\r\n The tree data structure consists of nodes with each node consisting of\r\n a number of subclusters. The maximum number of subclusters in a node\r\n is determined by the branching factor. Each subcluster maintains a\r\n linear sum, squared sum and the number of samples in that subcluster.\r\n In addition, each subcluster can also have a node as its child, if the\r\n subcluster is not a member of a leaf node.\r\n\r\n For a new point entering the root, it is merged with the subcluster closest\r\n to it and the linear sum, squared sum and the number of samples of that\r\n subcluster are updated. This is done recursively till the properties of\r\n the leaf node are updated.\r\n\r\n References\r\n ----------\r\n * Tian Zhang, Raghu Ramakrishnan, Maron Livny\r\n BIRCH: An efficient data clustering method for large databases.\r\n https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf\r\n\r\n * Roberto Perdisci\r\n JBirch - Java implementation of BIRCH clustering algorithm\r\n https://code.google.com/archive/p/jbirch\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.cluster import Birch\r\n >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]\r\n >>> brc = Birch(n_clusters=None)\r\n >>> brc.fit(X)\r\n Birch(n_clusters=None)\r\n >>> brc.predict(X)\r\n array([0, 0, 0, 1, 1, 1])\r\n \"\"\"\r\n @_deprecate_positional_args\r\n def __init__(self, *, threshold=0.5, branching_factor=50, n_clusters=3,\r\n compute_labels=True, copy=True):\r\n self.threshold = threshold\r\n self.branching_factor = branching_factor\r\n self.n_clusters = n_clusters\r\n self.compute_labels = compute_labels\r\n self.copy = copy\r\n\r\n def fit(self, X, y=None):\r\n \"\"\"\r\n Build a CF Tree for the input data.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Input data.\r\n\r\n y : Ignored\r\n Not used, present here for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n self\r\n Fitted estimator.\r\n \"\"\"\r\n self.fit_, self.partial_fit_ = True, False\r\n return self._fit(X)\r\n\r\n def _fit(self, X):\r\n X = self._validate_data(X, accept_sparse='csr', copy=self.copy)\r\n threshold = self.threshold\r\n branching_factor = self.branching_factor\r\n\r\n if branching_factor <= 1:\r\n raise ValueError(\"Branching_factor should be greater than one.\")\r\n n_samples, n_features = X.shape\r\n\r\n # If partial_fit is called for the first time or fit is called, we\r\n # start a new tree.\r\n partial_fit = getattr(self, 'partial_fit_')\r\n has_root = getattr(self, 'root_', None)\r\n if getattr(self, 'fit_') or (partial_fit and not has_root):\r\n # The first root is the leaf. Manipulate this object throughout.\r\n self.root_ = _CFNode(threshold=threshold,\r\n branching_factor=branching_factor,\r\n is_leaf=True,\r\n n_features=n_features)\r\n\r\n # To enable getting back subclusters.\r\n self.dummy_leaf_ = _CFNode(threshold=threshold,\r\n branching_factor=branching_factor,\r\n is_leaf=True, n_features=n_features)\r\n self.dummy_leaf_.next_leaf_ = self.root_\r\n self.root_.prev_leaf_ = self.dummy_leaf_\r\n\r\n # Cannot vectorize. Enough to convince to use cython.\r\n if not sparse.issparse(X):\r\n iter_func = iter\r\n else:\r\n iter_func = _iterate_sparse_X\r\n\r\n for sample in iter_func(X):\r\n subcluster = _CFSubcluster(linear_sum=sample)\r\n split = self.root_.insert_cf_subcluster(subcluster)\r\n\r\n if split:\r\n new_subcluster1, new_subcluster2 = _split_node(\r\n self.root_, threshold, branching_factor)\r\n del self.root_\r\n self.root_ = _CFNode(threshold=threshold,\r\n branching_factor=branching_factor,\r\n is_leaf=False,\r\n n_features=n_features)\r\n self.root_.append_subcluster(new_subcluster1)\r\n self.root_.append_subcluster(new_subcluster2)\r\n\r\n centroids = np.concatenate([\r\n leaf.centroids_ for leaf in self._get_leaves()])\r\n self.subcluster_centers_ = centroids\r\n\r\n self._global_clustering(X)\r\n return self\r\n\r\n def _get_leaves(self):\r\n \"\"\"\r\n Retrieve the leaves of the CF Node.\r\n\r\n Returns\r\n -------\r\n leaves : list of shape (n_leaves,)\r\n List of the leaf nodes.\r\n \"\"\"\r\n leaf_ptr = self.dummy_leaf_.next_leaf_\r\n leaves = []\r\n while leaf_ptr is not None:\r\n leaves.append(leaf_ptr)\r\n leaf_ptr = leaf_ptr.next_leaf_\r\n return leaves\r\n\r\n def partial_fit(self, X=None, y=None):\r\n \"\"\"\r\n Online learning. Prevents rebuilding of CFTree from scratch.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features), \\\r\n default=None\r\n Input data. If X is not provided, only the global clustering\r\n step is done.\r\n\r\n y : Ignored\r\n Not used, present here for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n self\r\n Fitted estimator.\r\n \"\"\"\r\n self.partial_fit_, self.fit_ = True, False\r\n if X is None:\r\n # Perform just the final global clustering step.\r\n self._global_clustering()\r\n return self\r\n else:\r\n self._check_fit(X)\r\n return self._fit(X)\r\n\r\n def _check_fit(self, X):\r\n check_is_fitted(self)\r\n\r\n if (hasattr(self, 'subcluster_centers_') and\r\n X.shape[1] != self.subcluster_centers_.shape[1]):\r\n raise ValueError(\r\n \"Training data and predicted data do \"\r\n \"not have same number of features.\")\r\n\r\n def predict(self, X):\r\n \"\"\"\r\n Predict data using the ``centroids_`` of subclusters.\r\n\r\n Avoid computation of the row norms of X.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Input data.\r\n\r\n Returns\r\n -------\r\n labels : ndarray of shape(n_samples,)\r\n Labelled data.\r\n \"\"\"\r\n X = check_array(X, accept_sparse='csr')\r\n self._check_fit(X)\r\n kwargs = {'Y_norm_squared': self._subcluster_norms}\r\n return self.subcluster_labels_[\r\n pairwise_distances_argmin(X,\r\n self.subcluster_centers_,\r\n metric_kwargs=kwargs)\r\n ]\r\n\r\n def transform(self, X):\r\n \"\"\"\r\n Transform X into subcluster centroids dimension.\r\n\r\n Each dimension represents the distance from the sample point to each\r\n cluster centroid.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Input data.\r\n\r\n Returns\r\n -------\r\n X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)\r\n Transformed data.\r\n \"\"\"\r\n check_is_fitted(self)\r\n return euclidean_distances(X, self.subcluster_centers_)\r\n\r\n def _global_clustering(self, X=None):\r\n \"\"\"\r\n Global clustering for the subclusters obtained after fitting\r\n \"\"\"\r\n clusterer = self.n_clusters\r\n centroids = self.subcluster_centers_\r\n compute_labels = (X is not None) and self.compute_labels\r\n\r\n # Preprocessing for the global clustering.\r\n not_enough_centroids = False\r\n if isinstance(clusterer, numbers.Integral):\r\n clusterer = AgglomerativeClustering(\r\n n_clusters=self.n_clusters)\r\n # There is no need to perform the global clustering step.\r\n if len(centroids) < self.n_clusters:\r\n not_enough_centroids = True\r\n elif (clusterer is not None and not\r\n hasattr(clusterer, 'fit_predict')):\r\n raise ValueError(\"n_clusters should be an instance of \"\r\n \"ClusterMixin or an int\")\r\n\r\n # To use in predict to avoid recalculation.\r\n self._subcluster_norms = row_norms(\r\n self.subcluster_centers_, squared=True)\r\n\r\n if clusterer is None or not_enough_centroids:\r\n self.subcluster_labels_ = np.arange(len(centroids))\r\n if not_enough_centroids:\r\n warnings.warn(\r\n \"Number of subclusters found (%d) by Birch is less \"\r\n \"than (%d). Decrease the threshold.\"\r\n % (len(centroids), self.n_clusters), ConvergenceWarning)\r\n else:\r\n # The global clustering step that clusters the subclusters of\r\n # the leaves. It assumes the centroids of the subclusters as\r\n # samples and finds the final centroids.\r\n self.subcluster_labels_ = clusterer.fit_predict(\r\n self.subcluster_centers_)\r\n\r\n if compute_labels:\r\n self.labels_ = self.predict(X)\r\n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.argmin", "scipy.sparse.issparse" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
cdds-uiuc/simles-book
[ "79f0fe1133d44f6b94b4bdcd0f05ff65434240c9" ]
[ "_build/jupyter_execute/content/Module02/M02_N02_Optimization.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Optimization\n\n# In[101]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n# These are some parameters to make figures nice (and big)\n\n#%matplotlib inline\n#%config InlineBackend.figure_format = 'retina' \nparams = {'legend.fontsize': 'x-large',\n 'figure.figsize': (15, 5),\n 'axes.labelsize': 'x-large',\n 'axes.titlesize':'x-large',\n 'xtick.labelsize':'x-large',\n 'ytick.labelsize':'x-large'}\nplt.rcParams.update(params)\n\n\n# # Theory\n# \n# In this notebook we will briefly consider the more general problem of fitting a model $\\tilde y(\\theta)$ to some obervations $y$, where $\\theta$ are the model parameters\n# \n# If the model is nonlinear, we will not be able to write simple, analytical estimator, so we will look for a numerical solution. This is very common for machine learning problems, where we cannot compute an optimal solution algebraically. \n# \n# We will generally fit the parameters by minimizing the misfit, i.e. by minimizing a cost function $J(\\theta,y)$:\n# \n# $$\\hat\\theta =\\text{argmin } J(\\theta,y)$$ \n# \n# A typical choice for the cost function is the least-squares cost function. If the errors are independent and normally distributed this is motivatd by maximum likelihood theory. However, this is generally a suitable function that can be useful even if there is no theoretical underpinning. For a set of observations $y(t_j)$, the cost function becomes\n# $$ J\\left[\\theta,y(t_j)\\right]=\\sum_j \\left[\\tilde y(\\theta,t_j)-y(t_j)\\right]^2$$\n# \n# For now, we will use a simple python optimiziation method to minimize the cost function. There are several such methods, but they all behave the same. They require a cost function J, that is a function of the parameters, and needs to be minimized, and an initial condition $\\theta_i$.\n# \n# ```\n# theta_hat=optimize.minimize(J,theta_init,args)\n# ```\n\n# # Example Model\n# Consider the model $\\tilde y(t)=y_0\\cdot \\exp (-t/\\tau)$\n# with parameters $\\theta =\\{y_0,\\tau\\}$, which can be also written a \n# \n# $$\\tilde y(\\theta,t)=\\theta_1 \\cdot \\exp (-t/\\theta_2)$$\n# \n# Let's first visualize the model, before fitting it to some synthetic observations\n\n# In[102]:\n\n\ndef model(theta,t):\n y_model=theta[0]*np.exp(-t/theta[1])\n return y_model\n\n\n# In[103]:\n\n\n# Let's choose some parameters and generate some pseudo-observations\ny0_true =3;\ntau_true =3;\ntheta_true=[y0_true,tau_true];\nsigma_e =0.4; # errors\n\n\n# In[104]:\n\n\n# plot model and pseudo observations\nt=np.linspace(0,10,100)\ne=stats.norm.rvs(0,sigma_e,100)\n\ny_true=model(theta_true,t)\ny_obs =y_true+e\n\nfig,ax=plt.subplots(1,1,figsize=[4,4])\nax.plot(t,y_true,'-',label='truth')\nax.plot(t,y_obs,'o',label='observations')\nax.legend();\n\n\n# **Let's plot the model for some first guesses**\n\n# In[105]:\n\n\ntheta=[[3.5,6],\n [5,3],\n [3.3,2.4]]\n\n\n# In[106]:\n\n\n#Example of models\nfig,ax=plt.subplots(1,3,figsize=[12,4])\n\nfor j in range(3):\n y =model(theta[j],t)\n ax[j].plot(t,y_true,'-',label='truth')\n ax[j].plot(t,y_obs,'o',label='observations')\n ax[j].plot(t,y,'-',label='model')\n ax[j].legend()\n\n\n# # Cost Function\n# \n# We will generally fit the parameters by minimizing themisfit, i.e. by minimizing a cost function $J(\\theta,y)$:\n# \n# $$\\hat\\theta =\\text{argmin } J(\\theta,y)$$ \n# \n# A typical choice for the cost function is the least-squares cost function. If the errors are independent and normally distributed this is motivatd by maximum likelihood theory. However, this is generally a suitable function that can be useful even if there is no theoretical underpinning. For a set of observations $y(t_j)$, the cost function becomes\n# \n# $$ J(\\theta,y(t_j))=\\sum_j \\left[\\tilde y(\\theta,t_j)-y(t_j)\\right]^2$$\n# \n\n# In[107]:\n\n\ndef Jcost(theta,y_obs,t):\n Jcost=np.sum( (y_obs-model(theta,t))**2)\n\n return Jcost\n\n\n# In[108]:\n\n\nfig,ax=plt.subplots(1,3,figsize=[12,3])\n\nfor j in range(3):\n y =model(theta[j],t)\n J=Jcost(theta[j],y_obs,t)\n ax[j].plot(t,y_true,'-',label='truth')\n ax[j].plot(t,y_obs,'o',label='observations')\n ax[j].plot(t,y,'-',label='model')\n \n title_str='J='+np.array2string(J,precision=2)\n ax[j].set_title(title_str)\n\n\n# ## Visualize the Cost Function\n\n# In[109]:\n\n\nN1=21;\nN2=20;\ny0_vec=np.linspace(1.5,4,N1);\ntau_vec=np.linspace(1,4,N2);\n\nJ=np.zeros(shape=[N1,N2]);\n\nfor j1 in range(N1):\n for j2 in range(N2):\n theta=[y0_vec[j1],tau_vec[j2]];\n J[j1,j2]=Jcost(theta,y_obs,t); \n\n\n# In[111]:\n\n\nfrom matplotlib import cm\n\nfig, ax = plt.subplots(subplot_kw={\"projection\": \"3d\"},figsize=[10,10])\nX,Y=np.meshgrid(tau_vec,y0_vec)\n\nsurf=ax.plot_surface(X,Y,J,cmap=cm.get_cmap('turbo'),\n linewidth=0, antialiased=False)\n\nax.invert_yaxis()\nax.invert_xaxis()\nax.set_ylabel('theta_1=$y_0$');\nax.set_xlabel('theta_2=tau');\nax.set_zlabel('J(theta)');\nfig.colorbar(surf, shrink=0.5, aspect=10,label='J(theta)');\n\n\n# # Optimize using scipy package\n\n# In[96]:\n\n\nfrom scipy import optimize \ntheta_i=[2,1.2]\ntheta_hat=optimize.minimize(Jcost,theta_i,args=(y_obs,t)).x;\n\nprint(theta_hat)\nprint(theta_true)\n\n\n# In[112]:\n\n\ny_true=model(theta_true,x)\ny_obs =y_true+e\n\nfig,ax=plt.subplots(1,2,figsize=[20,10])\nax[0].plot(x,y_true,'-',label='truth')\nax[0].plot(x,y_obs,'o',label='observations')\nax[0].plot(x,model(theta_i,x),'k--',label='initial gues')\nax[0].plot(x,model(theta_hat,x),'r--',label='best_fit')\nax[0].legend()\n\nax=plt.subplot(1,2,2,projection='3d')\nX,Y=np.meshgrid(tau_vec,y0_vec)\n\nsurf=ax.plot_surface(X,Y,J,cmap=cm.get_cmap('turbo'),\n linewidth=0, antialiased=False,alpha=0.6)\n\nax.invert_yaxis()\nax.invert_xaxis()\nax.set_ylabel('theta_1=$y_0$');\nax.set_xlabel('theta_2=tau');\nax.set_zlabel('J(theta)');\n#ax.grid(False)\nplt.colorbar(surf,ax=ax, shrink=0.5, aspect=10,label='J(theta)');\nax.plot3D(theta_i[1],theta_i[0],Jcost(theta_i,y_obs,t),'ko',markersize=10,label='initial guess');\nax.plot3D(theta_hat[1],theta_hat[0],Jcost(theta_hat,y_obs,t),'ro',markersize=10,label='best fit');\nax.legend();\n\n\n# # Summary\n# Most of the code above is for plotting purposes. The actual optimization is done in ver few lines of code, summarized below\n\n# In[98]:\n\n\n# define your model\ndef model(theta,t):\n y_model=theta[0]*np.exp(-t/theta[1])\n return y_model\n# define your cost function, as a function of the parameter vector\ndef Jcost(theta,y_obs,t):\n Jcost=np.sum( (y_obs-model(theta,t))**2)\n return Jcost\n\n# choose an initial guess\ntheta_init=[2,1.2]\n\n#optimize\ntheta_hat=optimize.minimize(Jcost,theta_init,args=(y_obs,t)).x;\n\n" ]
[ [ "numpy.array2string", "numpy.linspace", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplot", "scipy.stats.norm.rvs", "scipy.optimize.minimize", "matplotlib.cm.get_cmap", "numpy.exp", "matplotlib.pyplot.rcParams.update", "numpy.meshgrid", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
KaviyaSubramanian706/bt-gate-classifier
[ "082775587ccc795c54845572d45743008a8dc892" ]
[ "video_classify.py" ]
[ "import time\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\nimport os\nimport numpy as np\nimport argparse\nimport pandas as pd\nimport cv2\nimport csv\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom normalizer import Normalizer\n\nimport tensorflow as tf\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.compat.v1.Session(config=config)\n\nstds = None\nmeans = None\n\n# flags.DEFINE_string('classes', './data/coco.names', 'path to classes file')\nflags.DEFINE_string('weights', './home/shyam/bridge_tech/mobilenetv2-tf2/model/ec2_model/model_1.1_15_6_64/output/',\n 'path to weights file')\nflags.DEFINE_enum('model', None, ['MobileNetv2'])\n\nflags.DEFINE_integer('size', 64, 'resize images to')\nflags.DEFINE_string('video', None,\n 'path to video file or number for webcam)')\nflags.DEFINE_string('output', None, 'path to output video')\nflags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')\nflags.DEFINE_integer('num_classes', 2, 'number of classes in the model')\n\n\n\ndef get_mean_std(base_dir, filenames, target_size):\n n = 0\n r_mean, g_mean, b_mean = 0.0, 0.0, 0.0\n r_M2, g_M2, b_M2 = 0.0, 0.0, 0.0\n\n \n # for z, filename in enumerate(filenames):\n # if z % 1000 == 0:\n # print(\"Processing image {}/{}\".format(z+1, len(filenames)))\n\n # x = tf.keras.preprocessing.image.img_to_array(tf.keras.preprocessing.image.load_img(os.path.join(base_dir, filename), target_size=target_size))\n r = x[:, :, 0].flatten().tolist()\n g = x[:, :, 1].flatten().tolist()\n b = x[:, :, 2].flatten().tolist()\n\n for (xr, xg, xb) in zip(r, g, b):\n n = n + 1\n\n r_delta = xr - r_mean\n g_delta = xg - g_mean\n b_delta = xb - b_mean\n\n r_mean = r_mean + r_delta/n\n g_mean = g_mean + g_delta/n\n b_mean = b_mean + b_delta/n\n\n r_M2 = r_M2 + r_delta * (xr - r_mean)\n g_M2 = g_M2 + g_delta * (xg - g_mean)\n b_M2 = b_M2 + b_delta * (xb - b_mean)\n\n r_variance = r_M2 / (n - 1)\n g_variance = g_M2 / (n - 1)\n b_variance = b_M2 / (n - 1)\n\n r_std = np.sqrt(r_variance)\n g_std = np.sqrt(g_variance)\n b_std = np.sqrt(b_variance)\n\n return np.array([r_mean, g_mean, b_mean]), np.array([r_std, g_std, b_std])\n\n\nclass Normalizer():\n def __init__(self, mean=None, std=None):\n self.mean = mean\n self.std = std\n\n def __call__(self, img):\n if self.mean is not None:\n img = self.center(img)\n if self.std is not None:\n img = self.scale(img)\n return img\n\n def center(self, img):\n return img - self.mean\n\n def scale(self, img):\n return img / self.std\n\n def set_stats(self, mean, std):\n self.mean = np.array(mean).reshape(1, 1, 3)\n self.std = np.array(std).reshape(1, 1, 3)\n \n\n def get_stats(self, base_dir, filenames, target_size, calc_mean=True, calc_std=True):\n print(\"Calculating mean and standard deviation with shape: \", target_size)\n m, s = get_mean_std(base_dir, filenames, target_size)\n if calc_mean:\n self.mean = m\n self.mean = self.mean.reshape(1, 1, 3)\n print(\"Dataset mean [r, g, b] = {}\".format(m.tolist()))\n if calc_std:\n self.std = s\n self.std = self.std.reshape(1, 1, 3)\n print(\"Dataset std [r, g, b] = {}\". format(s.tolist()))\n\n return str(m.tolist()), str(s.tolist())\n\ndef main(_argv):\n #physical_devices = tf.config.experimental.list_physical_devices('GPU')\n #for physical_device in physical_devices:\n # tf.config.experimental.set_memory_growth(physical_device, True)\n\n if FLAGS.model == 'yolov3-tiny':\n model = YoloV3Tiny(FLAGS.size, classes=FLAGS.num_classes,\n anchors=yolo_tiny_anchors,masks=yolo_tiny_anchor_masks)\n model.summary()\n\n elif FLAGS.model == 'MobileNetv2':\n model = tf.keras.models.load_model('/home/shyam/bridge_tech/mobilenetv2-tf2/model/ec2_model/model_1.1_15_6_64/output/')\n model.summary()\n\n\n model.load_weights(FLAGS.weights).expect_partial()\n logging.info('weights loaded')\n\n class_names = ['Open','Closed']\n logging.info('classes loaded')\n\n times = []\n\n try:\n vid = cv2.VideoCapture(int(FLAGS.video))\n except:\n vid = cv2.VideoCapture(FLAGS.video)\n\n out = None\n\n if FLAGS.output:\n # by default VideoCapture returns float instead of int\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)\n out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))\n\n while True:\n _, img = vid.read()\n\n if img is None:\n logging.warning(\"Empty Frame\")\n time.sleep(0.1)\n break\n\n img = tf.keras.preprocessing.image.load_img(\n img, target_size=(img_height, img_width)\n )\n img_array = keras.preprocessing.image.img_to_array(img)\n img_array = tf.expand_dims(img_array, 0) # Create a batch\n\n predictions = model.predict(img_array)\n score = tf.nn.softmax(predictions[0])\n\n print(\n \"This image most likely belongs to {} with a {:.2f} percent confidence.\"\n .format(class_names[np.argmax(score)], 100 * np.max(score))\n )\n\n img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_in = tf.expand_dims(img_in, 0)\n img_in = tf.image.resize(img_in, (FLAGS.size, FLAGS.size))\n img_in = transform_images(img_in, FLAGS.size)\n\n t1 = time.time()\n boxes, scores, classes, nums = model.predict(img_in)\n t2 = time.time()\n times.append(t2-t1)\n times = times[-20:]\n\n img = draw_outputs(img, (boxes, scores, classes, nums), class_names)\n img = cv2.putText(img, \"Time: {:.2f}ms\".format(sum(times)/len(times)*1000), (0, 30),\n cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)\n if FLAGS.output:\n out.write(img)\n cv2.imshow('output', img)\n if cv2.waitKey(1) == ord('q'):\n break\n\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.compat.v1.ConfigProto", "tensorflow.nn.softmax", "numpy.sqrt", "tensorflow.keras.preprocessing.image.load_img", "tensorflow.expand_dims", "tensorflow.compat.v1.Session", "numpy.max", "tensorflow.image.resize", "numpy.argmax", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Alchemist101010/genesis
[ "1280f7e6af439f6eb9173804d2931078a82190d3" ]
[ "arch1.py" ]
[ "import tensorflow as tf\r\nimport keras\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sb\r\nimport sys \r\nsys.setrecursionlimit(100000)\r\n\r\nfashion = keras.datasets.fashion_mnist\r\n(train_images, train_labels), (test_images, test_labels) = fashion.load_data()\r\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\r\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\r\n\r\nplt.figure()\r\nplt.imshow(train_images[0])\r\nplt.colorbar()\r\nplt.grid(False)\r\n\r\ntrain_images = train_images / 255.0\r\ntest_images = test_images / 255.0\r\n\r\nplt.figure(figsize=(10,10))\r\nfor i in range(25):\r\n plt.subplot(5,5,i+1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.grid(False)\r\n plt.imshow(train_images[i], cmap= 'icefire')\r\n plt.xlabel(class_names[train_labels[i]])\r\n\r\nmodel = keras.Sequential([\r\n keras.layers.Flatten(input_shape=(28, 28)),\r\n keras.layers.Dense(128, activation=tf.nn.relu),\r\n keras.layers.Dense(10, activation=tf.nn.softmax)\r\n])\r\n\r\nmodel.compile(optimizer=tf.train.AdamOptimizer(), \r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\nmodel.fit(train_images, train_labels, epochs=5)\r\n\r\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\r\n\r\nprint('Test accuracy:', test_acc)\r\npredictions = model.predict(test_images)\r\n\r\ndef plot_image(i, predictions_array, true_label, img):\r\n predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\r\n plt.grid(False)\r\n plt.xticks([])\r\n plt.yticks([])\r\n \r\n plt.imshow((img), cmap= 'icefire')\r\n\r\n predicted_label = np.argmax(predictions_array)\r\n if predicted_label == true_label:\r\n color = 'blue'\r\n else:\r\n color = 'red'\r\n \r\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\r\n 100*np.max(predictions_array),\r\n class_names[true_label]),\r\n color=color)\r\n\r\ndef plot_value_array(i, predictions_array, true_label):\r\n predictions_array, true_label = predictions_array[i], true_label[i]\r\n plt.grid(False)\r\n plt.xticks([])\r\n plt.yticks([])\r\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\r\n plt.ylim([0, 1]) \r\n predicted_label = np.argmax(predictions_array)\r\n \r\n thisplot[predicted_label].set_color('red')\r\n thisplot[true_label].set_color('blue')\r\n\r\n i = 0\r\n plt.figure(figsize=(6,3))\r\n plt.subplot(1,2,1)\r\n plot_image(i, predictions, test_labels, test_images)\r\n plt.subplot(1,2,2)\r\n plot_value_array(i, predictions, test_labels)\r\n\r\n i = 12\r\n plt.figure(figsize=(6,3))\r\n plt.subplot(1,2,1)\r\n plot_image(i, predictions, test_labels, test_images)\r\n plt.subplot(1,2,2)\r\n plot_value_array(i, predictions, test_labels)\r\n\r\n# Plot the first X test images, their predicted label, and the true label\r\n# Color correct predictions in blue, incorrect predictions in red\r\nnum_rows = 5\r\nnum_cols = 3\r\nnum_images = num_rows*num_cols\r\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\r\nfor i in range(num_images):\r\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\r\n plot_image(i, predictions, test_labels, test_images)\r\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\r\n plot_value_array(i, predictions, test_labels)\r\n\r\n # Grab an image from the test dataset\r\nimg = test_images[0]\r\n\r\n\r\n\r\nprint(img.shape)\r\n\r\npredictions_single = model.predict(img)\r\nprint(predictions_single)\r\n\r\nplot_value_array(0, predictions_single, test_labels)\r\n_ = plt.xticks(range(10), class_names, rotation=45)\r\nnp.argmax(predictions_single[0])" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks", "matplotlib.pyplot.ylim", "matplotlib.pyplot.colorbar", "numpy.max", "numpy.argmax", "matplotlib.pyplot.subplot", "matplotlib.pyplot.grid", "tensorflow.train.AdamOptimizer", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
LarsNeR/stellargraph
[ "ee993bb600745a37d994fa4da628268b1cd657dc", "ee993bb600745a37d994fa4da628268b1cd657dc", "ee993bb600745a37d994fa4da628268b1cd657dc" ]
[ "tests/data/test_metapath_walker.py", "tests/mapper/test_knowledge_graph.py", "stellargraph/layer/misc.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport pytest\nfrom stellargraph.data.explorer import UniformRandomMetaPathWalk\nfrom stellargraph.core.graph import StellarGraph\n\n\n# FIXME (#535): Consider using graph fixtures\ndef create_test_graph():\n \"\"\"\n Creates a simple graph for testing the BreadthFirstWalk class. The node ids are string or integers. Each node\n also has a label based on the type of its id such that nodes with string ids and those with integer ids have\n labels 's' and 'n' respectively.\n\n Returns:\n A simple graph with 13 nodes and 24 edges (including self loops for all but two of the nodes) in\n networkx format.\n\n \"\"\"\n nodes = {\n \"s\": pd.DataFrame(index=[\"0\", \"5\", \"7\", \"self loner\", \"loner\"]),\n \"n\": pd.DataFrame(index=[1, 2, 3, 4, 6, 8, 9, 10]),\n }\n edges = pd.DataFrame(\n [\n (\"0\", 1),\n (\"0\", 2),\n (1, 3),\n (1, 4),\n (3, 6),\n (4, \"7\"),\n (4, 8),\n (2, \"5\"),\n (\"5\", 9),\n (\"5\", 10),\n (\"0\", \"0\"),\n (1, 1),\n (3, 3),\n (6, 6),\n (4, 4),\n (\"7\", \"7\"),\n (8, 8),\n (2, 2),\n (\"5\", \"5\"),\n (9, 9),\n (\n \"self loner\",\n \"self loner\",\n ), # node that is not connected with any other nodes but has self loop\n ],\n columns=[\"source\", \"target\"],\n )\n\n return StellarGraph(nodes, edges)\n\n\nclass TestMetaPathWalk(object):\n def test_parameter_checking(self):\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n nodes = [1]\n n = 1\n length = 2\n seed = None\n metapaths = [[\"n\", \"s\", \"n\"]]\n\n # nodes should be a list of node ids even for a single node\n with pytest.raises(ValueError):\n mrw.run(nodes=None, n=n, length=length, metapaths=metapaths, seed=seed)\n with pytest.raises(ValueError):\n mrw.run(nodes=0, n=n, length=length, metapaths=metapaths, seed=seed)\n # n has to be positive integer\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=-1, length=length, metapaths=metapaths, seed=seed)\n with pytest.raises(TypeError):\n mrw.run(nodes=nodes, n=11.4, length=length, metapaths=metapaths, seed=seed)\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=0, length=length, metapaths=metapaths, seed=seed)\n # length has to be positive integer\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=-3, metapaths=metapaths, seed=seed)\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=0, metapaths=metapaths, seed=seed)\n with pytest.raises(TypeError):\n mrw.run(nodes=nodes, n=n, length=4.6, metapaths=metapaths, seed=seed)\n with pytest.raises(TypeError):\n mrw.run(nodes=nodes, n=n, length=1.0000001, metapaths=metapaths, seed=seed)\n # metapaths have to start and end with the same node type\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[[\"s\", \"n\"]], seed=seed)\n with pytest.raises(ValueError):\n mrw.run(\n nodes=nodes,\n n=n,\n length=length,\n metapaths=[[\"s\", \"n\", \"s\"], [\"n\", \"s\"]],\n seed=seed,\n )\n # metapaths have to have minimum length of two\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[[\"s\"]], seed=seed)\n # metapaths has to be a list of lists of strings denoting the node labels\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[\"n\", \"s\"], seed=seed)\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[[1, 2]], seed=seed)\n with pytest.raises(ValueError):\n mrw.run(\n nodes=nodes, n=n, length=length, metapaths=[[\"n\", \"s\"], []], seed=seed\n )\n with pytest.raises(ValueError):\n mrw.run(\n nodes=nodes,\n n=n,\n length=length,\n metapaths=[[\"n\", \"s\"], [\"s\", 1]],\n seed=seed,\n )\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[(\"n\", \"s\")], seed=seed)\n with pytest.raises(ValueError):\n mrw.run(\n nodes=nodes,\n n=n,\n length=length,\n metapaths=([\"n\", \"s\"], [\"s\", \"n\", \"s\"]),\n seed=seed,\n )\n # seed has to be integer or None\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=-1)\n with pytest.raises(TypeError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=1000.345)\n\n # If no root nodes are given, an empty list is returned which is not an error but I thought this method\n # is the best for checking this behaviour.\n walks = mrw.run(nodes=[], n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == 0\n\n def test_walk_generation_single_root_node_loner(self):\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n seed = None\n nodes = [\"loner\"] # has no edges, not even to itself\n n = 1\n length = 5\n metapaths = [[\"s\", \"n\", \"s\"]]\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert len(walks[0]) == 1\n\n n = 5\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n for walk in walks:\n assert len(walk) == 1\n\n def test_walk_generation_single_root_node_self_loner(self):\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n seed = None\n nodes = [\"self loner\"] # this node has self edges but not other edges\n n = 1\n length = 10\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert (\n len(walks[0]) == 1\n ) # for the ['s', 'n', 'n', 's'] metapath only the starting node is returned\n\n metapaths = [[\"s\", \"s\"]]\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert len(walks[0]) == length # the node is repeated length times\n for node in walks[0]:\n assert node == \"self loner\"\n\n def test_walk_generation_single_root_node(self):\n\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n nodes = [\"0\"]\n n = 1\n length = 15\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n seed = 42\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert len(walks[0]) <= length # test against maximum walk length\n\n n = 5\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert len(walks[0]) <= length # test against maximum walk length\n\n metapaths = [[\"s\", \"n\", \"s\"], [\"s\", \"n\", \"n\", \"s\"]]\n n = 1\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n * len(metapaths)\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n metapaths = [[\"s\", \"n\", \"s\"], [\"s\", \"n\", \"n\", \"s\"]]\n n = 5\n length = 100\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n * len(metapaths)\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n nodes = [8]\n metapaths = [[\"s\", \"n\", \"s\"], [\"s\", \"n\", \"n\", \"s\"]]\n n = 5\n length = 100\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert (\n len(walks) == 0\n ) # metapaths start with a node of type 's' but starting node is type 'n' so an empty list is returned\n\n def test_walk_generation_many_root_nodes(self):\n\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n nodes = [\"0\", 2]\n n = 1\n length = 15\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n seed = 42\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert (\n len(walks) == 1\n ) # the starting node 2 should not generate a walk because it is of type 'n' not 's'\n assert len(walks[0]) <= length # test against maximum walk length\n\n metapaths = [[\"s\", \"n\", \"n\", \"s\"], [\"n\", \"n\", \"s\", \"n\"]]\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert (\n len(walks) == 2\n ) # each starting node will generate one walk from each metapath\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n n = 2\n nodes = [\"0\", \"5\"]\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n * len(\n nodes\n ) # each starting node will generate one walk from each metapath\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n n = 2\n nodes = [\"0\", \"5\", 1, 6]\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert (\n len(walks) == n * 2\n ) # the first two starting node will generate one walk from each metapath\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n n = 5\n nodes = [\"0\", \"5\", 1, 6]\n metapaths = [[\"s\", \"n\", \"n\", \"s\"], [\"n\", \"s\", \"n\"], [\"n\", \"n\"]]\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n * 6\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n def test_init_parameters(self):\n g = create_test_graph()\n n = 2\n length = 15\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n seed = 42\n nodes = [\"0\", \"5\"]\n\n mrw = UniformRandomMetaPathWalk(\n g, n=n, length=length, metapaths=metapaths, seed=seed\n )\n mrw_no_params = UniformRandomMetaPathWalk(g)\n\n assert mrw.run(nodes=nodes) == mrw_no_params.run(\n nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed\n )\n\n def test_benchmark_uniformrandommetapathwalk(self, benchmark):\n\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n nodes = [\"0\"]\n n = 5\n length = 5\n metapaths = [[\"s\", \"n\", \"n\", \"s\"], [\"n\", \"s\", \"n\"], [\"n\", \"n\"]]\n\n benchmark(lambda: mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths))\n", "# -*- coding: utf-8 -*-\n#\n# Copyright 2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nimport pandas as pd\nimport numpy as np\n\nfrom stellargraph.mapper.knowledge_graph import KGTripleGenerator, KGTripleSequence\n\nfrom .. import test_utils\nfrom ..test_utils.graphs import knowledge_graph\n\npytestmark = test_utils.ignore_stellargraph_experimental_mark\n\n\ndef check_sequence_output(\n output,\n batch_size,\n negatives,\n max_node_iloc=None,\n source_ilocs=None,\n rel_ilocs=None,\n target_ilocs=None,\n):\n s, r, o = output[0]\n l = output[1] if len(output) == 2 else None\n\n expected_length = batch_size * (1 + (negatives or 0))\n assert len(s) == len(r) == len(o) == expected_length\n\n if source_ilocs is not None:\n expected_pos = set(zip(source_ilocs, rel_ilocs, target_ilocs))\n actual_pos = set(zip(s[:batch_size], r[:batch_size], o[:batch_size]))\n # the positive edges should all be edges passed in originally\n assert actual_pos <= expected_pos\n\n if negatives is None:\n assert l is None\n else:\n assert len(l) == expected_length\n assert set(l[:batch_size]) == {1}\n assert set(l[batch_size:]) == {0}\n\n if max_node_iloc is not None:\n assert np.all((0 <= s) & (s <= max_node_iloc))\n assert np.all((0 <= r) & (r <= max_node_iloc))\n\n\ndef triple_df(*values):\n return pd.DataFrame(values, columns=[\"source\", \"label\", \"target\"])\n\n\ndef test_kg_triple_generator(knowledge_graph):\n gen = KGTripleGenerator(knowledge_graph, 2)\n\n edges = triple_df((\"a\", \"W\", \"b\"), (\"c\", \"X\", \"a\"), (\"d\", \"Y\", \"c\"))\n\n seq = gen.flow(edges)\n check_sequence_output(seq[0], 2, None)\n check_sequence_output(seq[1], 1, None)\n\n seq = gen.flow(edges, negative_samples=10)\n check_sequence_output(seq[0], 2, 10, knowledge_graph.number_of_nodes())\n\n\ndef test_kg_triple_generator_errors(knowledge_graph):\n gen = KGTripleGenerator(knowledge_graph, 10)\n\n with pytest.raises(TypeError, match=\"edges: expected.*found int\"):\n gen.flow(1)\n\n with pytest.raises(KeyError, match=\"fake\"):\n gen.flow(triple_df((\"fake\", \"W\", \"b\")))\n\n with pytest.raises(KeyError, match=\"fake\"):\n gen.flow(triple_df((\"a\", \"fake\", \"b\")))\n\n with pytest.raises(KeyError, match=\"fake\"):\n gen.flow(triple_df((\"a\", \"W\", \"fake\")))\n\n with pytest.raises(TypeError, match=\"negative_samples: expected.*found str\"):\n gen.flow(triple_df(), negative_samples=\"foo\")\n\n with pytest.raises(ValueError, match=\"negative_samples: expected.*found -1\"):\n gen.flow(triple_df(), negative_samples=-1)\n\n\[email protected](\"negative_samples\", [None, 1, 10])\ndef test_kg_triple_sequence_batches(negative_samples):\n s = [0, 1, 2, 3, 4]\n r = [5, 6, 7, 8, 9]\n t = [10, 11, 12, 13, 14]\n seq = KGTripleSequence(\n max_node_iloc=20,\n source_ilocs=s,\n rel_ilocs=r,\n target_ilocs=t,\n batch_size=3,\n shuffle=False,\n negative_samples=negative_samples,\n seed=None,\n )\n assert len(seq) == 2\n check_sequence_output(seq[0], 3, negative_samples, 20, s, r, t)\n check_sequence_output(seq[1], 2, negative_samples, 20, s, r, t)\n\n\ndef epoch_sample_equal(a, b):\n return all(np.array_equal(x, y) for x, y in zip(a[0], b[0]))\n\n\[email protected](\"shuffle\", [False, True])\ndef test_kg_triple_sequence_shuffle(shuffle):\n seq = KGTripleSequence(\n max_node_iloc=10,\n source_ilocs=[0, 1, 2, 3, 4],\n rel_ilocs=[0, 1, 0, 1, 0],\n target_ilocs=[4, 3, 2, 1, 0],\n batch_size=5,\n shuffle=shuffle,\n negative_samples=None,\n seed=None,\n )\n assert len(seq) == 1\n\n def sample():\n ret = seq[0]\n seq.on_epoch_end()\n return ret\n\n # with 20 epochs, it's very unlikely ((1/5!)**20 ≈ 2.6e-42) they will all be the same, if\n # (uniform) shuffling is happening\n first, *rest = [sample() for _ in range(20)]\n\n should_be_equal = not shuffle\n assert all(epoch_sample_equal(first, r) for r in rest) == should_be_equal\n\n\ndef test_kg_triple_sequence_negative_samples():\n max_node_iloc = 1234567\n negative_sampless = 100\n s = [0, 1]\n r = [2, 3]\n t = [4, 5]\n seq = KGTripleSequence(\n max_node_iloc=max_node_iloc,\n source_ilocs=s,\n rel_ilocs=r,\n target_ilocs=t,\n batch_size=2,\n shuffle=False,\n negative_samples=negative_sampless,\n seed=None,\n )\n\n sample = seq[0]\n check_sequence_output(sample, 2, negative_sampless, max_node_iloc, s, r, t)\n\n\ndef test_kg_triple_sequence_seed_shuffle_negative_samples():\n def mk(seed):\n return KGTripleSequence(\n max_node_iloc=10000,\n source_ilocs=[0, 1],\n rel_ilocs=[2, 3],\n target_ilocs=[4, 5],\n batch_size=1,\n shuffle=True,\n negative_samples=5,\n seed=seed,\n )\n\n def run(a, b):\n sample_a = a[0]\n sample_b = b[0]\n a.on_epoch_end()\n b.on_epoch_end()\n return epoch_sample_equal(sample_a, sample_b)\n\n # the same seed should give the same sequence\n seq0_1 = mk(0)\n seq0_2 = mk(0)\n assert all(run(seq0_1, seq0_2) for _ in range(20))\n\n # different seeds should give different sequences\n seq1 = mk(1)\n seq2 = mk(2)\n assert not all(run(seq1, seq2) for _ in range(20))\n", "# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.keras import backend as K\nimport warnings\n\n\nclass SqueezedSparseConversion(Layer):\n \"\"\"\n Converts Keras tensors containing indices and values to a tensorflow sparse\n tensor. The input tensors are expected to have a batch dimension of 1 which\n will be removed before conversion to a matrix.\n\n This only works with a tensorflow Keras backend.\n\n Example:\n ```\n A_indices = Input(batch_shape=(1, None, 2), dtype=\"int64\")\n A_values = Input(batch_shape=(1, None))\n Ainput = TFSparseConversion(shape=(N, N))([A_indices, A_values])\n ```\n\n Args:\n shape (list of int): The shape of the sparse matrix to create\n dtype (str or tf.dtypes.DType): Data type for the created sparse matrix\n \"\"\"\n\n def __init__(self, shape, axis=0, dtype=None):\n super().__init__(dtype=dtype)\n\n self.trainable = False\n self.supports_masking = True\n self.matrix_shape = shape\n # self.dtype = dtype\n self.axis = axis\n\n # Check backend\n if K.backend() != \"tensorflow\":\n raise RuntimeError(\n \"SqueezedSparseConversion only supports the Tensorflow backend\"\n )\n\n def get_config(self):\n config = {\"shape\": self.matrix_shape, \"dtype\": self.dtype}\n return config\n\n def compute_output_shape(self, input_shapes):\n return tuple(self.matrix_shape)\n\n def call(self, inputs):\n \"\"\"\n Creates a Tensorflow `SparseTensor` from the inputs\n\n Args:\n inputs (list): Two input tensors contining\n matrix indices (size 1 x E x 2) of type int64, and\n matrix values (size (size 1 x E),\n where E is the number of non-zero entries in the matrix.\n\n Returns:\n Tensorflow SparseTensor that represents the converted sparse matrix.\n \"\"\"\n # Here we squeeze the specified axis\n if self.axis is not None:\n indices = K.squeeze(inputs[0], self.axis)\n values = K.squeeze(inputs[1], self.axis)\n else:\n indices = inputs[0]\n values = inputs[1]\n\n if self.dtype is not None:\n values = K.cast(values, self.dtype)\n\n # Import tensorflow here so that the backend check will work without\n # tensorflow installed.\n import tensorflow as tf\n\n # Build sparse tensor for the matrix\n output = tf.SparseTensor(\n indices=indices, values=values, dense_shape=self.matrix_shape\n )\n return output\n\n\nclass GatherIndices(Layer):\n \"\"\"\n Gathers slices from a data tensor, based on an indices tensors (``tf.gather`` in Layer form).\n\n Args:\n axis (int or Tensor): the data axis to gather from.\n batch_dims (int): the number of batch dimensions in the data and indices.\n \"\"\"\n\n def __init__(self, axis=None, batch_dims=0, **kwargs):\n super().__init__(**kwargs)\n self._axis = axis\n self._batch_dims = batch_dims\n\n def get_config(self):\n config = super().get_config()\n config.update(axis=self._axis, batch_dims=self._batch_dims)\n return config\n\n def compute_output_shape(self, input_shapes):\n data_shape, indices_shape = input_shapes\n axis = self._batch_dims if self._axis is None else self._axis\n # per https://www.tensorflow.org/api_docs/python/tf/gather\n return (\n data_shape[:axis]\n + indices_shape[self._batch_dims :]\n + data_shape[axis + 1 :]\n )\n\n def call(self, inputs):\n \"\"\"\n Args:\n inputs (list): a pair of tensors, corresponding to the ``params`` and ``indices``\n parameters to ``tf.gather``.\n \"\"\"\n data, indices = inputs\n return tf.gather(data, indices, axis=self._axis, batch_dims=self._batch_dims)\n\n\ndef deprecated_model_function(function, old_name):\n def _function_wrapper(*args, **kwargs):\n \"\"\"Deprecated: use :meth:`in_out_tensors`.\"\"\"\n\n warnings.warn(\n f\"The '{old_name}' method is deprecated, use 'in_out_tensors' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return function(*args, **kwargs)\n\n return _function_wrapper\n" ]
[ [ "pandas.DataFrame" ], [ "numpy.all", "numpy.array_equal", "pandas.DataFrame" ], [ "tensorflow.keras.backend.cast", "tensorflow.keras.backend.squeeze", "tensorflow.keras.backend.backend", "tensorflow.SparseTensor", "tensorflow.gather" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Werni2A/galois
[ "97c35afdd1ad38705f2b1e643237fbd2f87bb6e3" ]
[ "tests/fields/test_instantiation.py" ]
[ "\"\"\"\nA pytest module to test instantiation of new Galois field arrays.\n\"\"\"\nimport random\n\nimport pytest\nimport numpy as np\n\nimport galois\n\nfrom ..helper import array_equal\n\n\nDTYPES = galois.dtypes.DTYPES + [np.object_]\n\n\ndef test_cant_instantiate_GF():\n v = [0, 1, 0, 1]\n with pytest.raises(NotImplementedError):\n a = galois.FieldArray(v)\n\n\nclass Test0D:\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array, galois.FieldArray])\n def test_new(self, field, type1):\n v = int(field.Random())\n vt = convert_0d(v, type1, field)\n a = field(vt)\n assert type(a) is field\n assert a == v\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array, galois.FieldArray])\n def test_valid_dtype(self, field, type1):\n v = int(field.Random())\n vt = convert_0d(v, type1, field)\n dtype = valid_dtype(field)\n a = field(vt, dtype=dtype)\n assert type(a) is field\n assert a.dtype == dtype\n assert a == v\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array, galois.FieldArray])\n def test_invalid_dtype(self, field, type1):\n v = int(field.Random())\n vt = convert_0d(v, type1, field)\n dtype = invalid_dtype(field)\n with pytest.raises(TypeError):\n a = field(vt, dtype=dtype)\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array])\n def test_non_integer(self, field, type1):\n v = float(field.order)\n vt = convert_0d(v, type1, field)\n with pytest.raises((TypeError, ValueError)):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array])\n def test_out_of_range_low(self, field, type1):\n v = -1\n vt = convert_0d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array])\n def test_out_of_range_high(self, field, type1):\n v = field.order\n vt = convert_0d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n def test_copy_true(self, field):\n v = int(field.Random(low=1))\n va = np.array(v, dtype=field.dtypes[0])\n a = field(va, copy=True)\n assert type(a) is field\n assert array_equal(a, v)\n va = 1 # Change original array\n assert array_equal(a, v)\n\n def test_default_order_c(self, field):\n v = int(field.Random())\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_default_order_f(self, field):\n v = int(field.Random())\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_c(self, field):\n v = int(field.Random())\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va, order=\"C\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_f(self, field):\n v = int(field.Random())\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va, order=\"F\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_ndmin(self, field):\n v = int(field.Random())\n a = field(v, ndmin=3)\n assert type(a) is field\n assert a.shape == (1,1,1)\n\n\nclass Test1D:\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_new(self, field, type1):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n a = field(vt)\n assert type(a) is field\n assert array_equal(a, v)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_valid_dtype(self, field, type1):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n dtype = valid_dtype(field)\n a = field(vt, dtype=dtype)\n assert type(a) is field\n assert a.dtype == dtype\n assert array_equal(a, v)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_invalid_dtype(self, field, type1):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n dtype = invalid_dtype(field)\n with pytest.raises(TypeError):\n a = field(vt, dtype=dtype)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_non_integer(self, field, type1):\n v = [int(field.Random()), float(field.Random()), int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n with pytest.raises((TypeError, ValueError)):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_out_of_range_low(self, field, type1):\n v = [int(field.Random()), -1, int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_out_of_range_high(self, field, type1):\n v = [int(field.Random()), field.order, int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n def test_copy_true(self, field):\n v = [int(field.Random(low=1)), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, dtype=field.dtypes[0])\n a = field(va, copy=True)\n assert type(a) is field\n assert array_equal(a, v)\n va[0] = 0 # Change original array\n assert array_equal(a, v)\n\n def test_default_order_c(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_default_order_f(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_c(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va, order=\"C\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_f(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va, order=\"F\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_ndmin(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n a = field(v, ndmin=3)\n assert type(a) is field\n assert a.shape == (1,1,4)\n\n\nclass Test2D:\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_new(self, field, type1):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n a = field(vt)\n assert type(a) is field\n assert array_equal(a, v)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_valid_dtype(self, field, type1):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n dtype = valid_dtype(field)\n a = field(vt, dtype=dtype)\n assert type(a) is field\n assert a.dtype == dtype\n assert array_equal(a, v)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_invalid_dtype(self, field, type1):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n dtype = invalid_dtype(field)\n with pytest.raises(TypeError):\n a = field(vt, dtype=dtype)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_non_integer(self, field, type1):\n v = [[int(field.Random()), float(field.Random())], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n with pytest.raises((TypeError, ValueError)):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_out_of_range_low(self, field, type1):\n v = [[int(field.Random()), -1], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_out_of_range_high(self, field, type1):\n v = [[int(field.Random()), field.order], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n def test_copy_true(self, field):\n v = [[int(field.Random(low=1)), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, dtype=field.dtypes[0])\n a = field(va, copy=True)\n assert type(a) is field\n assert array_equal(a, v)\n va[0][0] = 0 # Change original array\n assert array_equal(a, v)\n\n def test_default_order_c(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert not a.flags[\"F_CONTIGUOUS\"]\n\n def test_default_order_f(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert not a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_c(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va, order=\"C\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert not a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_f(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va, order=\"F\")\n assert type(a) is field\n assert not a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_ndmin(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n a = field(v, ndmin=3)\n assert type(a) is field\n assert a.shape == (1,2,2)\n\n\ndef convert_0d(v, type1, field):\n if type1 is int:\n vt = v\n elif type1 in [list, tuple]:\n vt = type1([v])\n elif type1 is np.array and field.dtypes == [np.object_]:\n vt = np.array(v, dtype=np.object_)\n elif type1 is np.array:\n vt = np.array(v)\n elif type1 is galois.FieldArray:\n vt = field(v)\n else:\n raise NotImplementedError\n return vt\n\n\ndef convert_1d(v, type1, field):\n if type1 is galois.FieldArray:\n vt = field(v)\n elif type1 is np.array and field.dtypes == [np.object_]:\n vt = np.array(v, dtype=np.object_)\n elif type1 is np.array:\n vt = np.array(v)\n else:\n vt = type1(v)\n return vt\n\n\ndef convert_2d(v, type1, field):\n if type1 is galois.FieldArray:\n vt = field(v)\n elif type1 is np.array and field.dtypes == [np.object_]:\n vt = np.array(v, dtype=np.object_)\n elif type1 is np.array:\n vt = np.array(v)\n elif type1 in [list, tuple]:\n vt = type1([type1([b for b in a]) for a in v])\n else:\n raise NotImplementedError\n return vt\n\n\ndef valid_dtype(field):\n return random.choice(field.dtypes)\n\n\ndef invalid_dtype(field):\n return random.choice([dtype for dtype in DTYPES if dtype not in field.dtypes])\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MAWUT0R/PokerRL
[ "95708a5f7a16cb151bc4253132bdfd22ea7a9b25" ]
[ "PokerRL/cfr/_MCCFRBase.py" ]
[ "# Copyright (c) 2019 Eric Steinberger\n\n\nimport copy\n\nimport numpy as np\nimport sys\n\nfrom PokerRL.game._.tree.MCPublicTree import MCPublicTree\nfrom PokerRL.game.wrappers import HistoryEnvBuilder\nfrom PokerRL.rl.rl_util import get_env_cls_from_str\n\n\nclass MCCFRBase:\n \"\"\"\n base class to all full-width (i.e. non MC) tabular CFR methods\n \"\"\"\n\n def __init__(self,\n name,\n chief_handle,\n game_cls,\n agent_bet_set,\n algo_name,\n starting_stack_sizes=None,\n innerloop_epi=None,\n sample_method='eps_greedy'\n ):\n \"\"\"\n Args:\n name (str): Under this name all logs, data, and checkpoints will appear.\n chief_handle (ChiefBase): Reference to chief worker\n game_cls (PokerEnv subclass): Class (not instance) to be trained in.\n agent_bet_set (iterable): Choosing a bet-set from bet_sets.py is recommended. If solving a\n Limit poker game, this value will not be considered, but must still\n be passed. Just set this to any list of floats (e.g. [0.0])\n starting_stack_sizes (list of ints): For each stack size in this list, a CFR strategy will be computed.\n Results are logged individually and averaged (uniform).\n If None, takes the default for the game.\n \"\"\"\n\n self._name = name # name=MC_CFR_EXAMPLE\n self._n_seats = 2\n self.touching_nodes = 0\n self._sample_method = sample_method\n\n self._chief_handle = chief_handle\n\n if starting_stack_sizes is None:\n self._starting_stack_sizes = [game_cls.DEFAULT_STACK_SIZE]\n else:\n self._starting_stack_sizes = copy.deepcopy(starting_stack_sizes)\n # self._starting_stack_sizes = [13]\n\n self._game_cls_str = game_cls.__name__ # StandardLeduc\n\n self._env_args = [\n game_cls.ARGS_CLS(n_seats=self._n_seats,\n starting_stack_sizes_list=[start_chips for _ in range(self._n_seats)], #[13, 13]\n bet_sizes_list_as_frac_of_pot=agent_bet_set,\n )\n for start_chips in self._starting_stack_sizes\n ]\n self._env_bldrs = [\n HistoryEnvBuilder(env_cls=get_env_cls_from_str(self._game_cls_str),\n env_args=self._env_args[s])\n\n for s in range(len(self._starting_stack_sizes))\n ]\n\n self._trees = [\n MCPublicTree(env_bldr=self._env_bldrs[idx],\n stack_size=self._env_args[idx].starting_stack_sizes_list,\n stop_at_street=None,\n sample_method=self._sample_method)\n for idx in range(len(self._env_bldrs))\n ]\n\n for tree in self._trees:\n tree.build_tree()\n print(\"Tree with stack size\", tree.stack_size, \"has\", tree.n_nodes, \"nodes out of which\", tree.n_nonterm,\n \"are non-terminal.\")\n\n self._algo_name = algo_name # MCCFR\n\n self._exps_curr_total = [\n self._chief_handle.create_experiment(\n self._name + \"_Curr_S\" + str(self._starting_stack_sizes[s]) + \"_total_\" + self._algo_name)\n for s in range(len(self._starting_stack_sizes))\n ]\n\n self._exps_avg_total = [\n self._chief_handle.create_experiment(\n self._name + \"_Avg_total_S\" + str(self._starting_stack_sizes[s]) + \"_\" + self._algo_name)\n for s in range(len(self._starting_stack_sizes))\n ]\n\n self._exp_all_averaged_curr_total = self._chief_handle.create_experiment(\n self._name + \"_Curr_total_averaged_\" + self._algo_name)\n\n self._exp_all_averaged_avg_total = self._chief_handle.create_experiment(\n self._name + \"_Avg_total_averaged_\" + self._algo_name)\n\n # self._chief_handle._log_buf._experiments\n # {'MC_CFR_EXAMPLE_Curr_S13_total_MCCFR': {}, \n # 'MC_CFR_EXAMPLE_Avg_total_S13_MCCFR': {}, \n # 'MC_CFR_EXAMPLE_Curr_total_averaged_MCCFR': {}, \n # 'MC_CFR_EXAMPLE_Avg_total_averaged_MCCFR': {}}\n\n self._iter_counter = None\n if innerloop_epi is None:\n self._innerloop_epi = self._trees[0]._n_nodes\n else:\n self._innerloop_epi = innerloop_epi\n\n @property\n def name(self):\n return self._name\n\n @property\n def algo_name(self):\n return self._algo_name\n\n @property\n def iter_counter(self):\n return self._iter_counter\n\n def reset(self):\n self._iter_counter = 0\n\n for p in range(self._n_seats):\n self._reset_player(p_id=p)\n\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].fill_uniform_random()\n\n self._compute_cfv()\n self._log_curr_strat_expl()\n\n def iteration(self):\n raise NotImplementedError\n\n def print_tree(self, node):\n print(\"node value\", node.reach_probs)\n for c in node.children:\n print(\"chil value\", c.reach_probs)\n # self.print_tree(c)\n\n def _compute_cfv(self):\n # Compute node.ev_weighted, node.ev_br_weighted, node.epsilon, node.exploitability\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].compute_ev()\n\n def _compute_mc_cfv(self, p_id):\n # Compute node.ev_weighted, node.ev_br_weighted, node.epsilon, node.exploitability\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].compute_mc_ev(p_id)\n\n def _regret_formula_first_it(self, ev_all_actions, strat_ev):\n raise NotImplementedError\n\n def _regret_formula_after_first_it(self, ev_all_actions, strat_ev, last_regrets):\n raise NotImplementedError\n\n def _compute_regrets(self, p_id):\n\n for t_idx in range(len(self._trees)):\n def __compute_evs(_node):\n # EV of each action\n N_ACTIONS = len(_node.children)\n ev_all_actions = np.zeros(shape=(self._env_bldrs[t_idx].rules.RANGE_SIZE, N_ACTIONS), dtype=np.float32)\n for i, child in enumerate(_node.children):\n ev_all_actions[:, i] = child.ev[p_id]\n\n # EV if playing by curr strat\n strat_ev = _node.ev[p_id]\n strat_ev = np.expand_dims(strat_ev, axis=-1).repeat(N_ACTIONS, axis=-1)\n\n self.touching_nodes += len(np.nonzero(_node.reach_probs[0])[0]) * len(np.nonzero(_node.reach_probs[1])[0])\n return strat_ev, ev_all_actions\n\n def _fill_after_first(_node):\n if _node.p_id_acting_next == p_id:\n strat_ev, ev_all_actions = __compute_evs(_node=_node)\n _node.data[\"regret\"] = self._regret_formula_after_first_it(ev_all_actions=ev_all_actions,\n strat_ev=strat_ev,\n last_regrets=_node.data[\"regret\"])\n\n for c in _node.children:\n _fill_after_first(c)\n\n def _fill_first(_node):\n if _node.p_id_acting_next == p_id:\n strat_ev, ev_all_actions = __compute_evs(_node=_node)\n\n _node.data[\"regret\"] = self._regret_formula_first_it(ev_all_actions=ev_all_actions,\n strat_ev=strat_ev)\n\n for c in _node.children:\n _fill_first(c)\n \n\n if self._iter_counter == 0:\n _fill_first(self._trees[t_idx].root)\n else:\n _fill_after_first(self._trees[t_idx].root)\n\n def _compute_new_strategy(self, p_id, inner_loop=False):\n \"\"\" Assumes regrets have been computed for player \"\"p_id\"\" already! \"\"\"\n raise NotImplementedError\n\n def _update_reach_probs(self):\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].update_reach_probs()\n\n def _compute_reach_probs(self, p_id):\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].compute_reach_probs(p_id)\n\n def _add_strategy_to_average(self, p_id):\n raise NotImplementedError\n\n def _log_curr_strat_expl(self):\n expl_totals = []\n for t_idx in range(len(self._trees)):\n METRIC = self._env_bldrs[t_idx].env_cls.WIN_METRIC\n expl_p = [\n float(self._trees[t_idx].root.exploitability[p]) * self._env_bldrs[t_idx].env_cls.EV_NORMALIZER\n for p in range(self._n_seats)\n ]\n expl_total = sum(expl_p) / self._n_seats\n expl_totals.append(expl_total)\n\n self._chief_handle.add_scalar(self._exps_curr_total[t_idx],\n \"Evaluation/\" + METRIC, self._iter_counter, expl_total)\n\n self._trees[t_idx].export_to_file(name=self._name + \"_Curr_\" + str(self._iter_counter))\n\n expl_total_averaged = sum(expl_totals) / float(len(expl_totals))\n self._chief_handle.add_scalar(self._exp_all_averaged_curr_total,\n \"Evaluation/\" + METRIC, self._iter_counter, expl_total_averaged)\n\n def _evaluate_avg_strats(self):\n expl_totals = []\n for t_idx in range(len(self._trees)):\n METRIC = self._env_bldrs[t_idx].env_cls.WIN_METRIC\n eval_tree = MCPublicTree(env_bldr=self._env_bldrs[t_idx],\n stack_size=self._env_args[t_idx].starting_stack_sizes_list,\n stop_at_street=None,\n is_debugging=False,\n )\n eval_tree.build_tree()\n\n def _fill(_node_eval, _node_train):\n if _node_eval.p_id_acting_next != eval_tree.CHANCE_ID and (not _node_eval.is_terminal):\n _node_eval.strategy = np.copy(_node_train.data[\"avg_strat\"])\n assert np.allclose(np.sum(_node_eval.strategy, axis=1), 1, atol=0.0001)\n\n for c_eval, c_train in zip(_node_eval.children, _node_train.children):\n _fill(_node_eval=c_eval, _node_train=c_train)\n\n # sets up some stuff; we overwrite strategy afterwards\n eval_tree.fill_uniform_random()\n\n # fill with strat\n _fill(_node_eval=eval_tree.root, _node_train=self._trees[t_idx].root)\n eval_tree.update_reach_probs()\n\n # compute EVs\n eval_tree.compute_ev()\n\n eval_tree.export_to_file(name=self._name + \"_Avg_\" + str(self._iter_counter))\n\n # log\n expl_p = [\n float(eval_tree.root.exploitability[p]) * self._env_bldrs[t_idx].env_cls.EV_NORMALIZER\n for p in range(eval_tree.n_seats)\n ]\n expl_total = sum(expl_p) / eval_tree.n_seats\n expl_totals.append(expl_total)\n\n self._chief_handle.add_scalar(self._exps_avg_total[t_idx],\n \"Evaluation/\" + METRIC, self._iter_counter, expl_total)\n\n expl_total_averaged = sum(expl_totals) / float(len(expl_totals))\n self._chief_handle.add_scalar(self._exp_all_averaged_avg_total,\n \"Evaluation/\" + METRIC, self._iter_counter, expl_total_averaged)\n return expl_total_averaged\n\n def _reset_player(self, p_id):\n def __reset(_node, _p_id):\n if _node.p_id_acting_next == _p_id:\n # regrets and strategies only need to be stored for one player at each node\n _node.data = {\n \"regret\": None,\n \"avg_strat\": None\n }\n _node.strategy = None\n\n for c in _node.children:\n __reset(c, _p_id=_p_id)\n\n for t_idx in range(len(self._trees)):\n __reset(self._trees[t_idx].root, _p_id=p_id)\n\n\n def _generate_samples(self, p_id, player=False, opponent=False, chance_p=False):\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].generate_samples(p_id, player, opponent, chance_p)\n\n def _calcultate_variance(self):\n variances = []\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].calcultate_variance()\n v = np.mean((self._trees[t_idx].root.ev - self._trees[t_idx].root.true_ev)**2)\n # v = np.mean((self._trees[t_idx].root.ev - self._trees[t_idx].root.true_ev)**2)\n variances.append(v)\n return variances\n\n" ]
[ [ "numpy.expand_dims", "numpy.nonzero", "numpy.copy", "numpy.mean", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BouchardLab/pylearn2
[ "4cab785b870d22cd9e85a5f536d4cac234b6bf60", "4cab785b870d22cd9e85a5f536d4cac234b6bf60", "4cab785b870d22cd9e85a5f536d4cac234b6bf60" ]
[ "pylearn2/energy_functions/tests/test_rbm_energy.py", "pylearn2/models/tests/test_autoencoder.py", "pylearn2/gui/graph_2D.py" ]
[ "import theano\ntheano.config.compute_test_value = 'off'\nfrom pylearn2.energy_functions.rbm_energy import GRBM_Type_1\nimport numpy as N\nfrom theano.compat.six.moves import xrange\nimport theano.tensor as T\nfrom theano import function\nfrom pylearn2.utils import as_floatX\nfrom pylearn2.utils import sharedX\nfrom pylearn2.linear.matrixmul import MatrixMul\nimport unittest\n\n\nclass TestGRBM_Type_1(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.test_m = 2\n\n cls.rng = N.random.RandomState([1, 2, 3])\n cls.nv = 3\n cls.nh = 4\n\n cls.vW = cls.rng.randn(cls.nv, cls.nh)\n cls.W = sharedX(cls.vW)\n cls.vbv = as_floatX(cls.rng.randn(cls.nv))\n cls.bv = T.as_tensor_variable(cls.vbv)\n cls.bv.tag.test_value = cls.vbv\n cls.vbh = as_floatX(cls.rng.randn(cls.nh))\n cls.bh = T.as_tensor_variable(cls.vbh)\n cls.bh.tag.test_value = cls.bh\n cls.vsigma = as_floatX(cls.rng.uniform(0.1, 5))\n cls.sigma = T.as_tensor_variable(cls.vsigma)\n cls.sigma.tag.test_value = cls.vsigma\n\n cls.E = GRBM_Type_1(transformer=MatrixMul(cls.W), bias_vis=cls.bv,\n bias_hid=cls.bh, sigma=cls.sigma)\n\n cls.V = T.matrix()\n cls.V.tag.test_value = as_floatX(cls.rng.rand(cls.test_m, cls.nv))\n cls.H = T.matrix()\n cls.H.tag.test_value = as_floatX(cls.rng.rand(cls.test_m, cls.nh))\n\n cls.E_func = function([cls.V, cls.H], cls.E([cls.V, cls.H]))\n cls.F_func = function([cls.V], cls.E.free_energy(cls.V))\n cls.log_P_H_given_V_func = \\\n function([cls.H, cls.V], cls.E.log_P_H_given_V(cls.H, cls.V))\n cls.score_func = function([cls.V], cls.E.score(cls.V))\n\n cls.F_of_V = cls.E.free_energy(cls.V)\n cls.dummy = T.sum(cls.F_of_V)\n cls.negscore = T.grad(cls.dummy, cls.V)\n cls.score = - cls.negscore\n\n cls.generic_score_func = function([cls.V], cls.score)\n\n def test_mean_H_given_V(self):\n tol = 1e-6\n\n # P(h_1 | v) / P(h_2 | v) = a\n # => exp(-E(v, h_1)) / exp(-E(v,h_2)) = a\n # => exp(E(v,h_2)-E(v,h_1)) = a\n # E(v,h_2) - E(v,h_1) = log(a)\n # also log P(h_1 | v) - log P(h_2) = log(a)\n\n rng = N.random.RandomState([1, 2, 3])\n\n m = 5\n\n Vv = as_floatX(N.zeros((m, self.nv)) + rng.randn(self.nv))\n\n Hv = as_floatX(rng.randn(m, self.nh) > 0.)\n\n log_Pv = self.log_P_H_given_V_func(Hv, Vv)\n\n Ev = self.E_func(Vv, Hv)\n\n for i in xrange(m):\n for j in xrange(i + 1, m):\n log_a = log_Pv[i] - log_Pv[j]\n e = Ev[j] - Ev[i]\n\n assert abs(e-log_a) < tol\n\n def test_free_energy(self):\n\n rng = N.random.RandomState([1, 2, 3])\n\n m = 2 ** self.nh\n\n Vv = as_floatX(N.zeros((m, self.nv)) + rng.randn(self.nv))\n\n F, = self.F_func(Vv[0:1, :])\n\n Hv = as_floatX(N.zeros((m, self.nh)))\n\n for i in xrange(m):\n for j in xrange(self.nh):\n Hv[i, j] = (i & (2 ** j)) / (2 ** j)\n\n Ev = self.E_func(Vv, Hv)\n\n Fv = -N.log(N.exp(-Ev).sum())\n assert abs(F-Fv) < 1e-6\n\n def test_score(self):\n rng = N.random.RandomState([1, 2, 3])\n\n m = 10\n\n Vv = as_floatX(rng.randn(m, self.nv))\n\n Sv = self.score_func(Vv)\n gSv = self.generic_score_func(Vv)\n\n assert N.allclose(Sv, gSv)\n", "\"\"\"\nTests for the pylearn2 autoencoder module.\n\"\"\"\nimport os.path\n\nimport numpy as np\nimport theano\nimport theano.tensor as tensor\nfrom theano import config\nfrom pylearn2.models.autoencoder import Autoencoder, \\\n HigherOrderContractiveAutoencoder, DeepComposedAutoencoder, \\\n UntiedAutoencoder\nfrom pylearn2.corruption import BinomialCorruptor\nfrom pylearn2.config import yaml_parse\nfrom theano.tensor.basic import _allclose\n\n\nyaml_dir_path = os.path.join(\n os.path.abspath(os.path.join(os.path.dirname(__file__))), 'config')\n\n\ndef test_autoencoder_properly_initialized():\n ae = Autoencoder(1, 1, 'sigmoid', 'linear')\n assert hasattr(ae, 'fn'), \"Autoencoder didn't call Block.__init__\"\n assert hasattr(ae, 'extensions'), \"Autoencoder didn't call Model.__init__\"\n\n\ndef test_autoencoder_logistic_linear_tied():\n data = np.random.randn(10, 5).astype(config.floatX)\n ae = Autoencoder(5, 7, act_enc='sigmoid', act_dec='linear',\n tied_weights=True)\n w = ae.weights.get_value()\n ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))\n hb = ae.hidbias.get_value()\n ae.visbias.set_value(np.random.randn(5).astype(config.floatX))\n vb = ae.visbias.get_value()\n d = tensor.matrix()\n result = np.dot(1. / (1 + np.exp(-hb - np.dot(data, w))), w.T) + vb\n ff = theano.function([d], ae.reconstruct(d))\n assert _allclose(ff(data), result)\n\n\ndef test_autoencoder_tanh_cos_untied():\n data = np.random.randn(10, 5).astype(config.floatX)\n ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos',\n tied_weights=False)\n w = ae.weights.get_value()\n w_prime = ae.w_prime.get_value()\n ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))\n hb = ae.hidbias.get_value()\n ae.visbias.set_value(np.random.randn(5).astype(config.floatX))\n vb = ae.visbias.get_value()\n d = tensor.matrix()\n result = np.cos(np.dot(np.tanh(hb + np.dot(data, w)), w_prime) + vb)\n ff = theano.function([d], ae.reconstruct(d))\n assert _allclose(ff(data), result)\n\n\ndef test_high_order_autoencoder_init():\n \"\"\"\n Just test that model initialize and return\n the penalty without error.\n \"\"\"\n corruptor = BinomialCorruptor(corruption_level=0.5)\n model = HigherOrderContractiveAutoencoder(\n corruptor=corruptor,\n num_corruptions=2,\n nvis=5,\n nhid=7,\n act_enc='sigmoid',\n act_dec='sigmoid')\n\n X = tensor.matrix()\n data = np.random.randn(10, 5).astype(config.floatX)\n ff = theano.function([X], model.higher_order_penalty(X))\n assert type(ff(data)) == np.ndarray\n\n\ndef test_cae_basic():\n \"\"\"\n Tests that we can load a contractive autoencoder\n and train it for a few epochs (without saving) on a dummy\n dataset-- tiny model and dataset\n \"\"\"\n with open(os.path.join(yaml_dir_path, 'cae.yaml')) as f:\n yaml_string = f.read()\n train = yaml_parse.load(yaml_string)\n train.main_loop()\n\n\ndef test_hcae_basic():\n \"\"\"\n Tests that we can load a higher order contractive autoencoder\n and train it for a few epochs (without saving) on a dummy\n dataset-- tiny model and dataset\n \"\"\"\n with open(os.path.join(yaml_dir_path, 'hcae.yaml')) as f:\n yaml_string = f.read()\n train = yaml_parse.load(yaml_string)\n train.main_loop()\n\n\ndef test_untied_ae():\n \"\"\"\n Tests that UntiedAutoencoder calls the Model superclass constructor\n \"\"\"\n ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos',\n tied_weights=True)\n model = UntiedAutoencoder(ae)\n model._ensure_extensions()\n\n\ndef test_dcae():\n \"\"\"\n Tests that DeepComposedAutoencoder works correctly\n \"\"\"\n ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos',\n tied_weights=True)\n model = DeepComposedAutoencoder([ae])\n model._ensure_extensions()\n\n data = np.random.randn(10, 5).astype(config.floatX)\n model.perform(data)\n", "\"\"\"\n.. todo::\n\n WRITEME\n\"\"\"\nimport numpy as N\nfrom theano.compat.six.moves import xrange\nfrom theano import config\n\n\nclass Graph2D:\n \"\"\"\n .. todo::\n\n WRITEME\n\n Parameters\n ----------\n shape : WRITEME\n xlim : WRITEME\n ycenter : WRITEME\n \"\"\"\n def __init__(self, shape, xlim, ycenter):\n self.xmin = 0.\n self.xmax = 0.\n self.set_shape(shape)\n self.set_xlim(xlim)\n self.set_ycenter(ycenter)\n\n self.components = []\n\n def set_shape(self, shape):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n self.rows = shape[0]\n self.cols = shape[1]\n\n\n\n def set_xlim(self, xlim):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n #x coordinate of center of leftmost pixel\n self.xmin = xlim[0]\n #x coordinate of center of rightmost pixel\n self.xmax = xlim[1]\n self.delta_x = (self.xmax-self.xmin)/float(self.cols-1)\n\n def set_ycenter(self, ycenter):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n self.delta_y = self.delta_x\n self.ymin = ycenter - (self.rows / 2) * self.delta_y\n self.ymax = self.ymin + (self.rows -1) * self.delta_y\n\n def render(self):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n rval = N.zeros((self.rows, self.cols, 3))\n\n for component in self.components:\n rval = component.render( prev_layer = rval, parent = self )\n assert rval is not None\n\n return rval\n\n def get_coords_for_col(self, i):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n X = N.zeros((self.rows,2),dtype=config.floatX)\n X[:,0] = self.xmin + float(i) * self.delta_x\n X[:,1] = self.ymin + N.cast[config.floatX](N.asarray(range(self.rows-1,-1,-1))) * self.delta_y\n\n\n return X\n\nclass HeatMap:\n \"\"\"\n .. todo::\n\n WRITEME\n\n Parameters\n ----------\n f : WRITEME\n A callable that takes a design matrix of 2D coordinates and returns a\n vector containing the function value at those coordinates\n normalizer : WRITEME\n None or a callable that takes a 2D numpy array and returns a 2D numpy\n array\n render_mode : WRITEME\n * 'o' : opaque.\n * 'r' : render only to the (r)ed channel\n \"\"\"\n def __init__(self, f, normalizer, render_mode = 'o'):\n self.f = f\n self.normalizer = normalizer\n self.render_mode = render_mode\n\n def render(self, prev_layer, parent):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n my_img = prev_layer * 0.0\n\n for i in xrange(prev_layer.shape[1]):\n X = parent.get_coords_for_col(i)\n f = self.f(X)\n if len(f.shape) == 1:\n for j in xrange(3):\n my_img[:,i,j] = f\n else:\n my_img[:,i,:] = f\n #end if\n #end for i\n\n if self.normalizer is not None:\n my_img = self.normalizer(my_img)\n assert my_img is not None\n\n if self.render_mode == 'r':\n my_img[:,:,1:] = prev_layer[:,:,1:]\n elif self.render_mode == 'o':\n pass\n else:\n raise NotImplementedError()\n\n return my_img\n" ]
[ [ "numpy.exp", "numpy.random.RandomState", "numpy.zeros", "numpy.allclose" ], [ "numpy.dot", "numpy.random.randn" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SchubertLab/mvTCR
[ "d815749e24650f69ef68054e0078d490af91b71d" ]
[ "tcr_embedding/models/mixture_modules/separate_model.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom tcr_embedding.models.architectures.transformer import TransformerEncoder, TransformerDecoder\nfrom tcr_embedding.models.architectures.mlp import MLP\nfrom tcr_embedding.models.architectures.mlp_scRNA import build_mlp_encoder, build_mlp_decoder\nfrom tcr_embedding.models.vae_base_model import VAEBaseModel\n\n\ndef none_model(hyperparams, hdim, xdim):\n\tpass\n\n\nclass SeparateModelTorch(nn.Module):\n\tdef __init__(self, tcr_params, rna_params, joint_params):\n\t\tsuper(SeparateModelTorch, self).__init__()\n\t\thdim = joint_params['hdim']\n\t\tnum_conditional_labels = joint_params['num_conditional_labels']\n\t\tcond_dim = joint_params['cond_dim']\n\t\tcond_input = joint_params['cond_input']\n\t\tzdim = joint_params['zdim']\n\t\tshared_hidden = joint_params['shared_hidden']\n\t\tactivation = joint_params['activation']\n\t\tdropout = joint_params['dropout']\n\t\tbatch_norm = joint_params['batch_norm']\n\n\t\tnum_seq_labels = tcr_params['num_seq_labels']\n\n\t\tself.use_rna = rna_params is not None\n\t\tnum_modalities = 1 if rna_params is None else 2\n\n\t\tself.alpha_encoder = TransformerEncoder(tcr_params, hdim//2, num_seq_labels)\n\t\tself.alpha_decoder = TransformerDecoder(tcr_params, hdim*num_modalities, num_seq_labels)\n\n\t\tself.beta_encoder = TransformerEncoder(tcr_params, hdim//2, num_seq_labels)\n\t\tself.beta_decoder = TransformerDecoder(tcr_params, hdim*num_modalities, num_seq_labels)\n\n\t\tif not self.use_rna:\n\t\t\txdim = None\n\t\t\tself.gene_encoder = none_model(rna_params, xdim, hdim)\n\t\t\tself.gene_decoder = none_model(rna_params, xdim, hdim)\n\t\telse:\n\t\t\txdim = rna_params['xdim']\n\t\t\tself.gene_encoder = build_mlp_encoder(rna_params, xdim, hdim)\n\t\t\tself.gene_decoder = build_mlp_decoder(rna_params, xdim, hdim*num_modalities)\n\t\t\t# used for NB loss\n\t\t\tself.theta = torch.nn.Parameter(torch.randn(xdim))\n\n\t\tif cond_dim > 0:\n\t\t\tself.cond_emb = torch.nn.Embedding(num_conditional_labels, cond_dim)\n\t\tself.cond_input = cond_input\n\t\tcond_input_dim = cond_dim if cond_input else 0\n\n\t\tself.shared_encoder = MLP(hdim*num_modalities+cond_input_dim, zdim*2, shared_hidden, activation, 'linear',\n\t\t\t\t\t\t\t\t dropout, batch_norm, regularize_last_layer=False)\n\t\tself.shared_decoder = MLP(zdim+cond_dim, hdim*num_modalities, shared_hidden[::-1], activation, activation,\n\t\t\t\t\t\t\t\t dropout, batch_norm, regularize_last_layer=True)\n\n\n\tdef forward(self, rna, tcr, tcr_len, conditional=None):\n\t\t\"\"\"\n\t\tForward pass of autoencoder\n\t\t:param rna: torch.Tensor shape=[batch_size, num_genes]\n\t\t:param tcr: torch.Tensor shape=[batch_size, seq_len, num_seq_labels]\n\t\t:param tcr_len: torch.LongTensor shape=[batch_size] indicating how long the real unpadded length is\n\t\t:param conditional: torch.Tensor shape=[batch_size, n_cond] one-hot-encoded conditional covariates\n\t\t:return: scRNA_pred, tcr_seq_pred\n\t\t\"\"\"\n\t\talpha_seq = tcr[:, :tcr.shape[1]//2]\n\t\talpha_len = tcr_len[:, 0]\n\n\t\tbeta_seq = tcr[:, tcr.shape[1]//2:]\n\t\tbeta_len = tcr_len[:, 1]\n\n\t\th_beta = self.beta_encoder(beta_seq, beta_len) # shape=[batch_size, hdim//2]\n\t\th_alpha = self.alpha_encoder(alpha_seq, alpha_len) # shape=[batch_size, hdim//2]\n\n\t\tif conditional is not None: # more efficient than doing two concatenations\n\t\t\tcond_emb_vec = self.cond_emb(conditional)\n\n\t\tif not self.use_rna:\n\t\t\tif conditional is not None and self.cond_input: # more efficient than doing two concatenations\n\t\t\t\tjoint_feature = torch.cat([h_alpha, h_beta, cond_emb_vec], dim=-1) # shape=[batch_size, hdim+cond_dim]\n\t\t\telse:\n\t\t\t\tjoint_feature = torch.cat([h_alpha, h_beta], dim=-1)\n\t\telse:\n\t\t\th_rna = self.gene_encoder(rna) # shape=[batch_size, hdim]\n\t\t\tif conditional is not None and self.cond_input:\n\t\t\t\tjoint_feature = torch.cat([h_rna, h_alpha, h_beta, cond_emb_vec], dim=-1)\n\t\t\telse:\n\t\t\t\tjoint_feature = torch.cat([h_rna, h_alpha, h_beta], dim=-1)\n\n\t\tz_ = self.shared_encoder(joint_feature) # shape=[batch_size, zdim*2]\n\t\tmu, logvar = z_[:, :z_.shape[1]//2], z_[:, z_.shape[1]//2:] # mu.shape = logvar.shape = [batch_size, zdim]\n\t\tz = self.reparameterize(mu, logvar) # shape=[batch_size, zdim]\n\n\t\tif conditional is not None:\n\t\t\tz_input = torch.cat([z, cond_emb_vec], dim=-1) # shape=[batch_size, zdim+cond_dim]\n\t\telse:\n\t\t\tz_input = z\n\t\tjoint_dec_feature = self.shared_decoder(z_input) # shape=[batch_size, hdim*2]\n\t\tif not self.use_rna:\n\t\t\trna_pred = None\n\t\telse:\n\t\t\trna_pred = self.gene_decoder(joint_dec_feature) # shape=[batch_size, num_genes]\n\n\t\talpha_seq_pred = self.alpha_decoder(joint_dec_feature, alpha_seq)\n\t\tbeta_seq_pred = self.beta_decoder(joint_dec_feature, beta_seq)\n\n\t\ttcr_pred = torch.cat([alpha_seq_pred, beta_seq_pred], dim=1) # cat along sequence dim\n\t\treturn z, mu, logvar, rna_pred, tcr_pred\n\n\tdef reparameterize(self, mu, log_var):\n\t\t\"\"\"\n\t\thttps://debuggercafe.com/getting-started-with-variational-autoencoder-using-pytorch/\n\t\t:param mu: mean from the encoder's latent space\n\t\t:param log_var: log variance from the encoder's latent space\n\t\t\"\"\"\n\t\tstd = torch.exp(0.5 * log_var) # standard deviation\n\t\teps = torch.randn_like(std) # `randn_like` as we need the same size\n\t\tz = mu + (eps * std) # sampling as if coming from the input space\n\t\treturn z\n\n\tdef predict_transcriptome(self, z_shared, conditional=None):\n\t\t\"\"\"\n\t\tPredict the transcriptome connected to an shared latent space\n\t\t:param z_shared: torch.tensor, shared latent representation\n\t\t:param conditional:\n\t\t:return: torch.tensor, transcriptome profile\n\t\t\"\"\"\n\t\tif conditional is not None: # more efficient than doing two concatenations\n\t\t\tcond_emb_vec = self.cond_emb(conditional)\n\t\t\tz_shared = torch.cat([z_shared, cond_emb_vec], dim=-1) # shape=[batch_size, zdim+cond_dim]\n\n\t\tjoint_dec_feature = self.shared_decoder(z_shared)\n\t\tif self.scRNA_model_arch == 'None' or self.scRNA_model_arch is None:\n\t\t\traise ValueError('Trying to predict transcriptome with a model without rna')\n\t\telse:\n\t\t\ttranscriptome_pred = self.gene_decoder(joint_dec_feature) # shape=[batch_size, num_genes]\n\t\treturn transcriptome_pred\n\n\tdef get_latent_from_z(self, z):\n\t\treturn z\n\n\nclass SeparateModel(VAEBaseModel):\n\tdef __init__(self,\n\t\t\t\t adata,\n\t\t\t\t params_architecture,\n\t\t\t\t balanced_sampling='clonotype',\n\t\t\t\t metadata=None,\n\t\t\t\t conditional=None,\n\t\t\t\t optimization_mode_params=None,\n\t\t\t\t label_key=None,\n\t\t\t\t device=None\n\t\t\t\t ):\n\t\tsuper(SeparateModel, self).__init__(adata, params_architecture, balanced_sampling, metadata,\n\t\t\t\t\t\t\t\t\t\t\tconditional, optimization_mode_params, label_key, device)\n\t\tself.model_type = 'separate'\n\n\t\tself.params_tcr['max_tcr_length'] = adata.obsm['alpha_seq'].shape[1]\n\t\tself.params_tcr['num_seq_labels'] = len(self.aa_to_id)\n\n\t\tif self.params_rna is not None:\n\t\t\tself.params_rna['xdim'] = adata[0].X.shape[1]\n\n\t\tnum_conditional_labels = 0\n\t\tcond_dim = 0\n\t\tif self.conditional is not None:\n\t\t\tif self.conditional in adata.obsm:\n\t\t\t\tnum_conditional_labels = adata.obsm[self.conditional].shape[1]\n\t\t\telse:\n\t\t\t\tnum_conditional_labels = len(adata.obs[self.conditional].unique())\n\t\t\tif 'c_embedding_dim' not in self.params_joint:\n\t\t\t\tcond_dim = 20\n\t\t\telse:\n\t\t\t\tcond_dim = self.params_joint['c_embedding_dim']\n\t\tself.params_joint['num_conditional_labels'] = num_conditional_labels\n\t\tself.params_joint['cond_dim'] = cond_dim\n\t\tself.params_joint['cond_input'] = conditional is not None\n\n\t\tself.model = SeparateModelTorch(self.params_tcr, self.params_rna, self.params_joint)\n\n\tdef calculate_loss(self, rna_pred, rna, tcr_pred, tcr):\n\t\t# For GRU and Transformer, as they don't predict start token for alpha and beta chain, so -2\n\t\tif tcr_pred.shape[1] == tcr.shape[1] - 2:\n\t\t\tmask = torch.ones_like(tcr).bool()\n\t\t\tmask[:, [0, mask.shape[1] // 2]] = False\n\t\t\ttcr_loss = self.loss_weights[1] * self.loss_function_tcr(tcr_pred.flatten(end_dim=1), tcr[mask].flatten())\n\t\telse: # For CNN, as it predicts start token\n\t\t\ttcr_loss = self.loss_weights[1] * self.loss_function_tcr(tcr_pred.flatten(end_dim=1), tcr.flatten())\n\n\t\trna_loss = torch.FloatTensor([0]).to(self.device)\n\t\tif rna_pred is not None:\n\t\t\trna_loss = self.loss_weights[0] * self.loss_function_rna(rna_pred, rna)\n\t\treturn rna_loss, tcr_loss\n\n\tdef calculate_kld_loss(self, mu, logvar, epoch):\n\t\tkld_loss = self.loss_function_kld(mu, logvar)\n\t\tkld_loss *= self.loss_weights[2] * self.get_kl_annealing_factor(epoch)\n\t\tz = mu # make z deterministic by using the mean\n\t\treturn kld_loss, z\n" ]
[ [ "torch.randn_like", "torch.cat", "torch.randn", "torch.nn.Embedding", "torch.exp", "torch.FloatTensor", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Qin-Ming/nilearn
[ "82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f", "82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f", "82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f" ]
[ "nilearn/plotting/tests/test_html_document.py", "nilearn/glm/tests/test_contrasts.py", "nilearn/glm/tests/test_hemodynamic_models.py" ]
[ "import os\nimport time\nimport pytest\nimport tempfile\nimport webbrowser\nfrom nilearn.plotting import html_document\n\nfrom numpy.testing import assert_no_warnings\n\n# Note: html output by nilearn view_* functions\n# should validate as html5 using https://validator.w3.org/nu/ with no\n# warnings\n\n\ndef _open_mock(f):\n print('opened {}'.format(f))\n\n\ndef test_temp_file_removing():\n html = html_document.HTMLDocument('hello')\n wb_open = webbrowser.open\n webbrowser.open = _open_mock\n fd, tmpfile = tempfile.mkstemp()\n try:\n os.close(fd)\n with pytest.warns(None) as record:\n html.open_in_browser(file_name=tmpfile, temp_file_lifetime=None)\n for warning in record:\n assert \"Saved HTML in temporary file\" not in str(warning.message)\n html.open_in_browser(temp_file_lifetime=0.5)\n assert os.path.isfile(html._temp_file)\n time.sleep(1.5)\n assert not os.path.isfile(html._temp_file)\n with pytest.warns(UserWarning, match=\"Saved HTML in temporary file\"):\n html.open_in_browser(temp_file_lifetime=None)\n html.open_in_browser(temp_file_lifetime=None)\n assert os.path.isfile(html._temp_file)\n time.sleep(1.5)\n assert os.path.isfile(html._temp_file)\n finally:\n webbrowser.open = wb_open\n try:\n os.remove(html._temp_file)\n except Exception:\n pass\n try:\n os.remove(tmpfile)\n except Exception:\n pass\n\n\ndef _open_views():\n return [html_document.HTMLDocument('') for i in range(12)]\n\n\ndef _open_one_view():\n for i in range(12):\n v = html_document.HTMLDocument('')\n return v\n\n\ndef test_open_view_warning():\n # opening many views (without deleting the SurfaceView objects)\n # should raise a warning about memory usage\n pytest.warns(UserWarning, _open_views)\n assert_no_warnings(_open_one_view)\n html_document.set_max_img_views_before_warning(15)\n assert_no_warnings(_open_views)\n html_document.set_max_img_views_before_warning(-1)\n assert_no_warnings(_open_views)\n html_document.set_max_img_views_before_warning(None)\n assert_no_warnings(_open_views)\n html_document.set_max_img_views_before_warning(6)\n pytest.warns(UserWarning, _open_views)\n", "import numpy as np\n\nfrom numpy.testing import assert_almost_equal\nfrom sklearn.datasets import make_regression\nfrom sklearn.linear_model import LinearRegression\n\nfrom nilearn.glm.contrasts import (_compute_fixed_effect_contrast,\n _compute_fixed_effects_params,\n compute_contrast,\n expression_to_contrast_vector)\nfrom nilearn.glm.first_level import run_glm\n\n\ndef test_expression_to_contrast_vector():\n cols = \"a face xy_z house window\".split()\n contrast = expression_to_contrast_vector(\n \"face / 10 + (window - face) * 2 - house\", cols)\n assert np.allclose(contrast, [0., -1.9, 0., -1., 2.])\n contrast = expression_to_contrast_vector(\"xy_z\", cols)\n assert np.allclose(contrast, [0., 0., 1., 0., 0.])\n cols = [\"a\", \"b\", \"a - b\"]\n contrast = expression_to_contrast_vector(\"a - b\", cols)\n assert np.allclose(contrast, [0., 0., 1.])\n cols = [\"column_1\"]\n contrast = expression_to_contrast_vector(\"column_1\", cols)\n assert np.allclose(contrast, [1.])\n\n\ndef test_Tcontrast():\n rng = np.random.RandomState(42)\n n, p, q = 100, 80, 10\n X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))\n labels, results = run_glm(Y, X, 'ar1')\n con_val = np.eye(q)[0]\n z_vals = compute_contrast(labels, results, con_val).z_score()\n assert_almost_equal(z_vals.mean(), 0, 0)\n assert_almost_equal(z_vals.std(), 1, 0)\n\n\ndef test_Fcontrast():\n rng = np.random.RandomState(42)\n n, p, q = 100, 80, 10\n X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))\n for model in ['ols', 'ar1']:\n labels, results = run_glm(Y, X, model)\n for con_val in [np.eye(q)[0], np.eye(q)[:3]]:\n z_vals = compute_contrast(\n labels, results, con_val, contrast_type='F').z_score()\n assert_almost_equal(z_vals.mean(), 0, 0)\n assert_almost_equal(z_vals.std(), 1, 0)\n\n\ndef test_t_contrast_add():\n rng = np.random.RandomState(42)\n n, p, q = 100, 80, 10\n X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))\n lab, res = run_glm(Y, X, 'ols')\n c1, c2 = np.eye(q)[0], np.eye(q)[1]\n con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)\n z_vals = con.z_score()\n assert_almost_equal(z_vals.mean(), 0, 0)\n assert_almost_equal(z_vals.std(), 1, 0)\n\n\ndef test_fixed_effect_contrast():\n rng = np.random.RandomState(42)\n n, p, q = 100, 80, 10\n X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))\n lab, res = run_glm(Y, X, 'ols')\n c1, c2 = np.eye(q)[0], np.eye(q)[1]\n con = _compute_fixed_effect_contrast([lab, lab], [res, res], [c1, c2])\n z_vals = con.z_score()\n assert_almost_equal(z_vals.mean(), 0, 0)\n assert_almost_equal(z_vals.std(), 1, 0)\n\n\ndef test_fixed_effect_contrast_nonzero_effect():\n X, y = make_regression(n_features=5, n_samples=20, random_state=0)\n y = y[:, None]\n labels, results = run_glm(y, X, 'ols')\n coef = LinearRegression(fit_intercept=False).fit(X, y).coef_\n for i in range(X.shape[1]):\n contrast = np.zeros(X.shape[1])\n contrast[i] = 1.\n fixed_effect = _compute_fixed_effect_contrast([labels],\n [results],\n [contrast],\n )\n assert_almost_equal(fixed_effect.effect_size(), coef.ravel()[i])\n fixed_effect = _compute_fixed_effect_contrast(\n [labels] * 3, [results] * 3, [contrast] * 3)\n assert_almost_equal(fixed_effect.effect_size(), coef.ravel()[i])\n\n\ndef test_F_contrast_add():\n rng = np.random.RandomState(42)\n n, p, q = 100, 80, 10\n X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))\n lab, res = run_glm(Y, X, 'ar1')\n c1, c2 = np.eye(q)[:2], np.eye(q)[2:4]\n con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)\n z_vals = con.z_score()\n assert_almost_equal(z_vals.mean(), 0, 0)\n assert_almost_equal(z_vals.std(), 1, 0)\n\n # first test with dependent contrast\n con1 = compute_contrast(lab, res, c1)\n con2 = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c1)\n assert_almost_equal(con1.effect * 2, con2.effect)\n assert_almost_equal(con1.variance * 2, con2.variance)\n assert_almost_equal(con1.stat() * 2, con2.stat())\n\n\ndef test_contrast_mul():\n rng = np.random.RandomState(42)\n n, p, q = 100, 80, 10\n X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))\n lab, res = run_glm(Y, X, 'ar1')\n for c1 in [np.eye(q)[0], np.eye(q)[:3]]:\n con1 = compute_contrast(lab, res, c1)\n con2 = con1 * 2\n assert_almost_equal(con1.effect * 2, con2.effect)\n assert_almost_equal(con1.z_score(), con2.z_score())\n\n\ndef test_contrast_values():\n # but this test is circular and should be removed\n rng = np.random.RandomState(42)\n n, p, q = 100, 80, 10\n X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))\n lab, res = run_glm(Y, X, 'ar1', bins=1)\n # t test\n cval = np.eye(q)[0]\n con = compute_contrast(lab, res, cval)\n t_ref = list(res.values())[0].Tcontrast(cval).t\n assert_almost_equal(np.ravel(con.stat()), t_ref)\n # F test\n cval = np.eye(q)[:3]\n con = compute_contrast(lab, res, cval)\n F_ref = list(res.values())[0].Fcontrast(cval).F\n # Note that the values are not strictly equal,\n # this seems to be related to a bug in Mahalanobis\n assert_almost_equal(np.ravel(con.stat()), F_ref, 3)\n\n\ndef test_low_level_fixed_effects():\n rng = np.random.RandomState(42)\n p = 100\n # X1 is some effects estimate, V1 their variance for \"session 1\"\n X1, V1 = rng.standard_normal(size=p), np.ones(p)\n # same thing for a \"session 2\"\n X2, V2 = 2 * X1, 4 * V1\n # compute the fixed effects estimate, Xf, their variance Vf,\n # and the corresponding t statistic tf\n Xf, Vf, tf = _compute_fixed_effects_params([X1, X2], [V1, V2],\n precision_weighted=False)\n # check that the values are correct\n assert_almost_equal(Xf, 1.5 * X1)\n assert_almost_equal(Vf, 1.25 * V1)\n assert_almost_equal(tf, Xf / np.sqrt(Vf))\n\n # Same thing, but now there is no precision weighting\n Xw, Vw, tw = _compute_fixed_effects_params([X1, X2], [V1, V2],\n precision_weighted=True)\n assert_almost_equal(Xw, 1.2 * X1)\n assert_almost_equal(Vw, .8 * V1)\n", "import warnings\n\nimport numpy as np\nimport pytest\n\nfrom numpy.testing import (assert_almost_equal,\n assert_array_equal,\n assert_array_almost_equal,\n )\n\nfrom nilearn.glm.first_level.hemodynamic_models import (_hrf_kernel,\n _orthogonalize,\n _regressor_names,\n _resample_regressor,\n _sample_condition,\n compute_regressor,\n spm_dispersion_derivative,\n spm_hrf,\n spm_time_derivative,\n glover_dispersion_derivative,\n glover_hrf,\n glover_time_derivative,\n )\n\n\ndef test_spm_hrf():\n \"\"\" test that the spm_hrf is correctly normalized and has correct length\n \"\"\"\n h = spm_hrf(2.0)\n assert_almost_equal(h.sum(), 1)\n assert len(h) == 800\n\n\ndef test_spm_hrf_derivative():\n \"\"\" test that the spm_hrf is correctly normalized and has correct length\n \"\"\"\n h = spm_time_derivative(2.0)\n assert_almost_equal(h.sum(), 0)\n assert len(h) == 800\n h = spm_dispersion_derivative(2.0)\n assert_almost_equal(h.sum(), 0)\n assert len(h) == 800\n\n\ndef test_glover_hrf():\n \"\"\" test that the spm_hrf is correctly normalized and has correct length\n \"\"\"\n h = glover_hrf(2.0)\n assert_almost_equal(h.sum(), 1)\n assert len(h) == 800\n h = glover_dispersion_derivative(2.0)\n assert_almost_equal(h.sum(), 0)\n assert len(h) == 800\n\n\ndef test_glover_time_derivative():\n \"\"\" test that the spm_hrf is correctly normalized and has correct length\n \"\"\"\n h = glover_time_derivative(2.0)\n assert_almost_equal(h.sum(), 0)\n assert len(h) == 800\n\n\ndef test_resample_regressor():\n \"\"\" test regressor resampling on a linear function\n \"\"\"\n x = np.linspace(0, 1, 200)\n y = np.linspace(0, 1, 30)\n z = _resample_regressor(x, x, y)\n assert_almost_equal(z, y)\n\n\ndef test_resample_regressor_nl():\n \"\"\" test regressor resampling on a sine function\n \"\"\"\n x = np.linspace(0, 10, 1000)\n y = np.linspace(0, 10, 30)\n z = _resample_regressor(np.cos(x), x, y)\n assert_almost_equal(z, np.cos(y), decimal=2)\n\n\ndef test_orthogonalize():\n \"\"\" test that the orthogonalization is OK \"\"\"\n rng = np.random.RandomState(42)\n X = rng.standard_normal(size=(100, 5))\n X = _orthogonalize(X)\n K = np.dot(X.T, X)\n K -= np.diag(np.diag(K))\n assert_almost_equal((K ** 2).sum(), 0, 15)\n\n\ndef test_orthogonalize_trivial():\n \"\"\" test that the orthogonalization is OK \"\"\"\n rng = np.random.RandomState(42)\n X = rng.standard_normal(size=100)\n Y = X.copy()\n X = _orthogonalize(X)\n assert_array_equal(Y, X)\n\n\ndef test_sample_condition_1():\n \"\"\" Test that the experimental condition is correctly sampled\n \"\"\"\n condition = ([1, 20, 36.5], [0, 0, 0], [1, 1, 1])\n frame_times = np.linspace(0, 49, 50)\n reg, rf = _sample_condition(condition, frame_times, oversampling=1,\n min_onset=0)\n assert reg.sum() == 3\n assert reg[1] == 1\n assert reg[20] == 1\n assert reg[37] == 1\n\n reg, rf = _sample_condition(condition, frame_times, oversampling=1)\n assert reg.sum() == 3\n assert reg[25] == 1\n assert reg[44] == 1\n assert reg[61] == 1\n\n\ndef test_sample_condition_2():\n \"\"\" Test the experimental condition sampling -- onset = 0\n \"\"\"\n condition = ([0, 20, 36.5], [2, 2, 2], [1, 1, 1])\n frame_times = np.linspace(0, 49, 50)\n reg, rf = _sample_condition(condition, frame_times, oversampling=1,\n min_onset=- 10)\n assert reg.sum() == 6\n assert reg[10] == 1\n assert reg[48] == 1\n assert reg[31] == 1\n\n\ndef test_sample_condition_3():\n \"\"\" Test the experimental condition sampling -- oversampling=10\n \"\"\"\n condition = ([1, 20, 36.5], [2, 2, 2], [1, 1, 1])\n frame_times = np.linspace(0, 49, 50)\n reg, rf = _sample_condition(condition, frame_times, oversampling=10,\n min_onset=0)\n assert_almost_equal(reg.sum(), 60.)\n assert reg[10] == 1\n assert reg[380] == 1\n assert reg[210] == 1\n assert np.sum(reg > 0) == 60\n # check robustness to non-int oversampling\n reg_, rf_ = _sample_condition(condition, frame_times, oversampling=10.,\n min_onset=0)\n assert_almost_equal(reg, reg_)\n\n\ndef test_sample_condition_4():\n \"\"\" Test the experimental condition sampling -- negative amplitude\n \"\"\"\n condition = ([1, 20, 36.5], [2, 2, 2], [1., -1., 5.])\n frame_times = np.linspace(0, 49, 50)\n reg, rf = _sample_condition(condition, frame_times, oversampling=1)\n assert reg.sum() == 10\n assert reg[25] == 1.\n assert reg[44] == -1.\n assert reg[61] == 5.\n\n\ndef test_sample_condition_5():\n \"\"\" Test the experimental condition sampling -- negative onset\n \"\"\"\n condition = ([-10, 0, 36.5], [2, 2, 2], [1., -1., 5.])\n frame_times = np.linspace(0, 49, 50)\n reg, rf = _sample_condition(condition, frame_times, oversampling=1)\n assert reg.sum() == 10\n assert reg[14] == 1.\n assert reg[24] == -1.\n assert reg[61] == 5.\n\n\ndef test_names():\n \"\"\" Test the regressor naming function\n \"\"\"\n name = 'con'\n assert _regressor_names(name, 'spm') == ['con']\n assert _regressor_names(\n name, 'spm + derivative') == ['con', 'con_derivative']\n assert _regressor_names(\n name, 'spm + derivative + dispersion') == ['con',\n 'con_derivative',\n 'con_dispersion']\n assert _regressor_names(name, 'glover') == ['con']\n assert _regressor_names(\n name, 'glover + derivative') == ['con', 'con_derivative']\n assert _regressor_names(\n name, 'glover + derivative + dispersion') == ['con',\n 'con_derivative',\n 'con_dispersion']\n\n\ndef test_hkernel():\n \"\"\" test the hrf computation\n \"\"\"\n tr = 2.0\n h = _hrf_kernel('spm', tr)\n assert_almost_equal(h[0], spm_hrf(tr))\n assert len(h) == 1\n h = _hrf_kernel('spm + derivative', tr)\n assert_almost_equal(h[1], spm_time_derivative(tr))\n assert len(h) == 2\n h = _hrf_kernel('spm + derivative + dispersion', tr)\n assert_almost_equal(h[2], spm_dispersion_derivative(tr))\n assert len(h) == 3\n h = _hrf_kernel('glover', tr)\n assert_almost_equal(h[0], glover_hrf(tr))\n assert len(h) == 1\n h = _hrf_kernel('glover + derivative', tr)\n assert_almost_equal(h[1], glover_time_derivative(tr))\n assert_almost_equal(h[0], glover_hrf(tr))\n assert len(h) == 2\n h = _hrf_kernel('fir', tr, fir_delays=np.arange(4))\n assert len(h) == 4\n for dh in h:\n assert_almost_equal(dh.sum(), 1.)\n\n h = _hrf_kernel(None, tr)\n assert len(h) == 1\n assert_almost_equal(h[0], np.hstack((1, np.zeros(49))))\n\n\ndef test_make_regressor_1():\n \"\"\" test the generated regressor\n \"\"\"\n condition = ([1, 20, 36.5], [2, 2, 2], [1, 1, 1])\n frame_times = np.linspace(0, 69, 70)\n hrf_model = 'spm'\n reg, reg_names = compute_regressor(condition, hrf_model, frame_times)\n assert_almost_equal(reg.sum(), 6, 1)\n assert reg_names[0] == 'cond'\n\n\ndef test_make_regressor_2():\n \"\"\" test the generated regressor\n \"\"\"\n condition = ([1, 20, 36.5], [0, 0, 0], [1, 1, 1])\n frame_times = np.linspace(0, 69, 70)\n hrf_model = 'spm'\n reg, reg_names = compute_regressor(condition, hrf_model, frame_times)\n assert_almost_equal(reg.sum() * 50, 3, 1)\n assert reg_names[0] == 'cond'\n\n\ndef test_make_regressor_3():\n \"\"\" test the generated regressor\n \"\"\"\n condition = ([1, 20, 36.5], [2, 2, 2], [1, 1, 1])\n frame_times = np.linspace(0, 138, 70)\n hrf_model = 'fir'\n reg, reg_names = compute_regressor(condition, hrf_model, frame_times,\n fir_delays=np.arange(4))\n assert_array_almost_equal(np.sum(reg, 0), np.array([3, 3, 3, 3]))\n assert len(reg_names) == 4\n reg_, reg_names_ = compute_regressor(condition, hrf_model, frame_times,\n fir_delays=np.arange(4),\n oversampling=50.)\n assert_array_equal(reg, reg_)\n\n\ndef test_design_warnings():\n \"\"\"\n test that warnings are correctly raised upon weird design specification\n \"\"\"\n condition = ([-25, 20, 36.5], [0, 0, 0], [1, 1, 1])\n frame_times = np.linspace(0, 69, 70)\n hrf_model = 'spm'\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\")\n with pytest.warns(UserWarning):\n compute_regressor(condition, hrf_model, frame_times)\n condition = ([-25, -25, 36.5], [0, 0, 0], [1, 1, 1])\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\")\n with pytest.warns(UserWarning):\n compute_regressor(condition, hrf_model, frame_times)\n" ]
[ [ "numpy.testing.assert_no_warnings" ], [ "numpy.allclose", "numpy.sqrt", "numpy.eye", "numpy.ones", "sklearn.datasets.make_regression", "numpy.testing.assert_almost_equal", "sklearn.linear_model.LinearRegression", "numpy.random.RandomState", "numpy.zeros" ], [ "numpy.diag", "numpy.dot", "numpy.linspace", "numpy.arange", "numpy.cos", "numpy.testing.assert_array_equal", "numpy.testing.assert_almost_equal", "numpy.array", "numpy.random.RandomState", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brechtvl/embree
[ "ae029e2ff83bebbbe8742c88aba5b0521aba1a23" ]
[ "scripts/generate_motion_derivative_coefficients.py" ]
[ "#!/usr/bin/python\n\n## Copyright 2009-2021 Intel Corporation\n## SPDX-License-Identifier: Apache-2.0\n\nimport sympy as sp\nimport numpy as np\nimport math\n\n\n################################################################################\n#### Utils\n################################################################################\n\ndef getTerms(map, key):\n if key in map.keys():\n return map[key]\n return 0\n\n# simple linear interpolation wrapper\ndef lerp(v0,v1,t):\n return v0*(1-t)+v1*t\n\n# custom quaternion to matrix conversion\ndef to_rotation_matrix(q):\n return sp.Matrix([[q.a*q.a + q.b*q.b - q.c*q.c - q.d*q.d, 2*(q.b*q.c - q.a*q.d), 2*(q.b*q.d + q.a*q.c), 0],\n [2*(q.b*q.c + q.a*q.d), q.a*q.a - q.b*q.b + q.c*q.c - q.d*q.d, 2*(q.c*q.d - q.a*q.b), 0],\n [2*(q.b*q.d - q.a*q.c), 2*(q.c*q.d + q.a*q.b), q.a*q.a - q.b*q.b - q.c*q.c + q.d*q.d, 0],\n [0, 0, 0, 1]])\n\n\n################################################################################\n#### Set up symbolic objects\n################################################################################\n\nt, theta = sp.symbols(\"t, theta\", real = True)\n\npx0, py0, pz0 = sp.symbols(\"px0, py0, pz0\", real=True) # vertex position at t=0\npx1, py1, pz1 = sp.symbols(\"px1, py1, pz1\", real=True) # vertex position at t=1\n\ntx0, ty0, tz0 = sp.symbols(\"tx0, ty0, tz0\", real=True) # translation at t=0\ntx1, ty1, tz1 = sp.symbols(\"tx1, ty1, tz1\", real=True) # translation at t=1\n\nqx0, qy0, qz0, qw0 = sp.symbols(\"qx0, qy0, qz0, qw0\", real=True) # quaternion at t=0 \nqx1, qy1, qz1, qw1 = sp.symbols(\"qx1, qy1, qz1, qw1\", real=True) # quaternion at t=1\n\n# coefficients for upper triangular matrices\ns000, s001, s002, s003, s011, s012, s013, s022, s023 = sp.symbols(\"s000, s001, s002, s003, s011, s012, s013, s022, s023\", real=True)\ns100, s101, s102, s103, s111, s112, s113, s122, s123 = sp.symbols(\"s100, s101, s102, s103, s111, s112, s113, s122, s123\", real=True)\n\nq0 = sp.Quaternion(qw0, qx0, qy0, qz0)\nq1 = sp.Quaternion(qw1, qx1, qy1, qz1)\n\n# assuming that q1 is qperp = normalize(q1-q0*cosTheta), where cosTheta=dot(q0, q1) and theta = acos(cosTheta).\n# this simplifies the terms of the symbolic expressions later\nqt = q0 * sp.cos(t*theta) + q1 * sp.sin(t*theta)\n\nS0 = sp.Matrix([[s000, s001, s002, s003],\n [ 0, s011, s012, s013],\n [ 0, 0, s022, s023],\n [ 0, 0, 0, 1]])\nS1 = sp.Matrix([[s100, s101, s102, s103],\n [ 0, s111, s112, s113],\n [ 0, 0, s122, s123],\n [ 0, 0, 0, 1]])\nD0 = sp.Matrix([[1, 0, 0, tx0],\n [0, 1, 0, ty0],\n [0, 0, 1, tz0],\n [0, 0, 0, 1]])\nD1 = sp.Matrix([[1, 0, 0, tx1],\n [0, 1, 0, ty1],\n [0, 0, 1, tz1],\n [0, 0, 0, 1]])\np0 = sp.Matrix([px0, py0, pz0, 1])\np1 = sp.Matrix([px1, py1, pz1, 1])\n\nGamma = lerp(D0, D1, t)*to_rotation_matrix(qt)*lerp(S0, S1, t)*lerp(p0, p1, t)\nC = sp.Matrix(np.empty(8)) # 8 coefficients\nK = sp.Matrix(np.empty(7)) # 7 inputs\nA = sp.Matrix(np.empty(8*7*3)) # 8 coefficients, 7 inputs (1, px0, py0, pz0, px1, py1, pz1), 3 dimensions (x, y, z)\ndGamma = sp.diff(Gamma, t)\n\n\n################################################################################\n#### Group the coefficients (this might time a couple of seconds)\n################################################################################\n\n# loop over dimensions (x, y, z)\nfor dim in range(3):\n dm = sp.expand(dGamma[dim])\n dm = dm.subs(sp.sin(t*theta)*sp.sin(t*theta),(1-sp.cos(2*t*theta))/2) # remove sin(t*theta)^2\n dm = dm.subs(sp.cos(t*theta)*sp.cos(t*theta),(1+sp.cos(2*t*theta))/2) # remove cos(t*theta)^2\n dm = dm.subs(sp.sin(t*theta)*sp.cos(t*theta),sp.sin(2*t*theta)/2) # remove sin(t*theta)*cos(t*theta)\n dm = sp.expand(dm)\n\n # group all terms in the form a + b * cos(2*t*theta) + c * sin(2*t*theta)\n dm_cos_sin = sp.collect(dm, (sp.cos(2*t*theta), sp.sin(2*t*theta)), evaluate=False)\n\n # get the terms\n coeff_cos = getTerms(dm_cos_sin, sp.cos(2*t*theta))\n coeff_sin = getTerms(dm_cos_sin, sp.sin(2*t*theta))\n coeff_const = getTerms(dm_cos_sin, 1)\n\n # group the term in the form a + b * t \n coeff_const_t = sp.collect(coeff_const, t, evaluate=False)\n C[0] = getTerms(coeff_const_t, 1)\n C[1] = getTerms(coeff_const_t, t)\n\n # group the term in the form a + b * t + c * t^2 \n coeff_cos_t = sp.collect(coeff_cos, t, evaluate=False)\n C[2] = getTerms(coeff_cos_t, 1)\n C[3] = getTerms(coeff_cos_t, t)\n C[4] = getTerms(coeff_cos_t, t*t)\n\n # group the term in the form a + b * t + c * t^2 \n coeff_sin_t = sp.collect(coeff_sin, t, evaluate=False)\n C[5] = getTerms(coeff_sin_t, 1)\n C[6] = getTerms(coeff_sin_t, t)\n C[7] = getTerms(coeff_sin_t, t*t)\n\n for c in range(8):\n kc = sp.collect(C[c], (px0, py0, pz0, px1, py1, pz1), evaluate=False)\n K[0] = getTerms(kc, 1)\n K[1] = getTerms(kc, px0)\n K[2] = getTerms(kc, py0)\n K[3] = getTerms(kc, pz0)\n K[4] = getTerms(kc, px1)\n K[5] = getTerms(kc, py1)\n K[6] = getTerms(kc, pz1)\n\n for k in range(7):\n K[k] = sp.expand(K[k])\n K[k] = K[k].subs(qw0*qw0, 1-qx0*qx0-qy0*qy0-qz0*qz0) # clean up substitutions\n K[k] = K[k].subs(qw1*qw1, 1-qx1*qx1-qy1*qy1-qz1*qz1) # clean up substitutions\n K[k] = sp.simplify(K[k])\n A[8*7*dim + c*7 + k] = K[k]\n\n\n################################################################################\n#### Write code to file\n################################################################################\n\nfrom sympy.utilities.codegen import codegen, default_datatypes\nfrom sympy.codegen.ast import real, float32\nfrom sympy.printing.ccode import C99CodePrinter\nprinter = C99CodePrinter()\n\n# custom code printer that will not generate such nonesene as x^2 -> pow(x, 2)\nclass CustomCodePrinter(C99CodePrinter):\n def _print_Pow(self, expr):\n if expr.exp.is_integer and expr.exp > 0 and expr.exp < 5:\n return '*'.join([self._print(expr.base) for i in range(expr.exp)])\n else:\n return super()._print_Pow(expr)\n\ncustomprinter = CustomCodePrinter()\ncustomprinter.type_aliases[real] = float32 # cosf instead of cos\ndefault_datatypes[\"float\"].cname = \"float\" # float instead of double\nparams = [\n theta,\n tx0, ty0, tz0,\n tx1, ty1, tz1,\n qw0, qx0, qy0, qz0,\n qw1, qx1, qy1, qz1,\n s000, s001, s002, s003, s011, s012, s013, s022, s023,\n s100, s101, s102, s103, s111, s112, s113, s122, s123]\nR = sp.MatrixSymbol(\"coeff\", A.shape[0], A.shape[1])\nP = sp.MatrixSymbol('p', len(params), 1)\nparam_map = dict(zip(params, P))\nB = A.xreplace(param_map)\ncodegen(('motion_derivative_coefficients', sp.Eq(R,B)), language='c', printer=customprinter, prefix='motion_derivative_coefficients', to_files=True)" ]
[ [ "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mingzheng01/pointnet
[ "401692e08441ff459b63786b9c65c11f78ea599e" ]
[ "sample_from_mesh.py" ]
[ "import open3d as o3d\nimport os\nimport numpy as np\nimport h5py\n\npoint_clouds = []\nfor dirpath, dirnames, filenames in os.walk(\"F:\\\\cases\\\\tooth_11_stls\"):\n for filename in filenames:\n print(os.path.splitext(filename)[-1])\n if os.path.splitext(filename)[-1] != \".stl\":\n continue\n\n full_filename = os.path.join(dirpath, filename)\n mesh = o3d.io.read_triangle_mesh(full_filename)\n mesh.remove_duplicated_vertices()\n mesh.compute_vertex_normals()\n print(mesh)\n pcd = mesh.sample_points_poisson_disk(1024)\n print(pcd)\n #o3d.visualization.draw_geometries([mesh, pcd], mesh_show_wireframe=True)\n #base_name = os.path.splitext(os.path.basename(filename))[0]\n #o3d.io.write_point_cloud(os.path.join(dirpath, base_name) + \".ply\", pcd)\n point_clouds.append(np.array(pcd.points))\n\nf = h5py.File(\"F:\\\\cases\\\\tooth_11_stls\\\\point_clouds.hdf5\", mode='w')\nf[\"point_clouds\"] = point_clouds\nf.close()\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rkansal47/weaver
[ "7e9d3d8c9ee43acb2a95f2d3f76c384822e04699" ]
[ "utils/data/tools.py" ]
[ "import numpy as np\nimport math\n\ntry:\n import awkward0 as awkward\nexcept ImportError:\n import awkward\n if awkward.__version__[0] == '1':\n raise ImportError('Please install awkward0 with `pip install awkward0`.')\n\n\ndef _concat(arrays, axis=0):\n if len(arrays) == 0:\n return np.array([])\n if isinstance(arrays[0], np.ndarray):\n return np.concatenate(arrays, axis=axis)\n else:\n return awkward.concatenate(arrays, axis=axis)\n\n\ndef _stack(arrays, axis=1):\n if len(arrays) == 0:\n return np.array([])\n if isinstance(arrays[0], np.ndarray):\n return np.stack(arrays, axis=axis)\n else:\n content = np.stack([a.content for a in arrays], axis=axis)\n return awkward.JaggedArray.fromcounts(arrays[0].counts, content)\n\n\ndef _pad(a, maxlen, value=0, dtype='float32'):\n if isinstance(a, np.ndarray) and a.ndim >= 2 and a.shape[1] == maxlen:\n return a\n elif isinstance(a, awkward.JaggedArray):\n return a.pad(maxlen, clip=True).fillna(value).regular().astype(dtype)\n else:\n x = (np.ones((len(a), maxlen)) * value).astype(dtype)\n for idx, s in enumerate(a):\n if not len(s):\n continue\n trunc = s[:maxlen].astype(dtype)\n x[idx, :len(trunc)] = trunc\n return x\n\n\ndef _repeat_pad(a, maxlen, shuffle=False, dtype='float32'):\n x = a.flatten()\n x = np.tile(x, int(np.ceil(len(a) * maxlen / len(x))))\n if shuffle:\n np.random.shuffle(x)\n x = x[:len(a) * maxlen].reshape((len(a), maxlen))\n mask = _pad(awkward.JaggedArray.zeros_like(a), maxlen, value=1)\n x = _pad(a, maxlen) + mask * x\n return x.astype(dtype)\n\n\ndef _clip(a, a_min, a_max):\n if isinstance(a, np.ndarray):\n return np.clip(a, a_min, a_max)\n else:\n return awkward.JaggedArray.fromcounts(a.counts, np.clip(a.content, a_min, a_max))\n\n\ndef _knn(support, query, k, n_jobs=1):\n from scipy.spatial import cKDTree\n kdtree = cKDTree(support)\n d, idx = kdtree.query(query, k, n_jobs=n_jobs)\n return idx\n\n\ndef _batch_knn(supports, queries, k, maxlen_s, maxlen_q=None, n_jobs=1):\n assert (len(supports) == len(queries))\n if maxlen_q is None:\n maxlen_q = maxlen_s\n batch_knn_idx = np.ones((len(supports), maxlen_q, k), dtype='int32') * (maxlen_s - 1)\n for i, (s, q) in enumerate(zip(supports, queries)):\n batch_knn_idx[i, :len(q[:maxlen_q]), :] = _knn(\n s[:maxlen_s], q[:maxlen_q], k, n_jobs=n_jobs).reshape((-1, k)) # (len(q), k)\n return batch_knn_idx\n\n\ndef _batch_permute_indices(array, maxlen):\n batch_permute_idx = np.tile(np.arange(maxlen), (len(array), 1))\n for i, a in enumerate(array):\n batch_permute_idx[i, :len(a)] = np.random.permutation(len(a[:maxlen]))\n return batch_permute_idx\n\n\ndef _batch_argsort(array, maxlen):\n batch_argsort_idx = np.tile(np.arange(maxlen), (len(array), 1))\n for i, a in enumerate(array):\n batch_argsort_idx[i, :len(a)] = np.argsort(a[:maxlen])\n return batch_argsort_idx\n\n\ndef _batch_gather(array, indices):\n out = array.zeros_like()\n for i, (a, idx) in enumerate(zip(array, indices)):\n maxlen = min(len(a), len(idx))\n out[i][:maxlen] = a[idx[:maxlen]]\n return out\n\n\ndef _get_variable_names(expr, exclude=['awkward', 'np', 'numpy', 'math']):\n import ast\n root = ast.parse(expr)\n return sorted({node.id for node in ast.walk(root) if isinstance(\n node, ast.Name) and not node.id.startswith('_')} - set(exclude))\n\n\ndef _eval_expr(expr, table):\n tmp = {k: table[k] for k in _get_variable_names(expr)}\n tmp.update(\n {'math': math, 'np': np, 'awkward': awkward, '_concat': _concat, '_stack': _stack, '_pad': _pad,\n '_repeat_pad': _repeat_pad, '_clip': _clip, '_batch_knn': _batch_knn,\n '_batch_permute_indices': _batch_permute_indices, '_batch_argsort': _batch_argsort,\n '_batch_gather': _batch_gather})\n return eval(expr, tmp)\n" ]
[ [ "numpy.clip", "numpy.arange", "numpy.random.shuffle", "numpy.stack", "numpy.concatenate", "numpy.argsort", "numpy.array", "scipy.spatial.cKDTree" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VXallset/deep-high-resolution-net.TensorFlow
[ "d885abc6f8699f5dfd09b270170f3c68fbf32ac2" ]
[ "src/utils.py" ]
[ "\"\"\"\nThis is the utils for deep learning, implemented with TensorFlow.\n\n@ Author: Yu Sun. [email protected]\n\n@ Date created: Jun 04, 2019\n\n@ Last modified: Jun 06, 2019\n\n\"\"\"\nimport tensorflow as tf\n\n\ndef leaky_Relu(input, name=''):\n return tf.nn.leaky_relu(input, alpha=0.1, name=name + '_relu')\n\n\ndef conv_2d(inputs, channels, kernel_size=3, strides=1, batch_normalization=True, activation=None,\n name='', padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), is_training=True):\n\n output = tf.layers.conv2d(inputs=inputs, filters=channels, kernel_size=kernel_size, strides=strides,\n padding=padding, name=name + '_conv', kernel_initializer=kernel_initializer)\n name = name + '_conv'\n\n if batch_normalization:\n output = tf.layers.batch_normalization(output, axis=-1, momentum=0.9, name=name+'_bn', training=is_training)\n name = name + '_bn'\n\n if activation:\n output = activation(output, name=name)\n\n return output\n\n\ndef down_sampling(input, method='strided_convolution', rate=2, name='', activation=leaky_Relu, is_training=True):\n assert method == 'max_pooling' or method == 'strided_convolution', \\\n 'Unknown type of down_sample method! \"strided_convolution\" and \"' \\\n 'max_pooling\" are expected, but \"' + method + '\" is provided!'\n output = input\n\n if method == 'strided_convolution':\n _, _, _, channels = input.get_shape()\n channels = channels.value\n output = input\n loop_index = 1\n new_rate = rate\n while new_rate > 1:\n assert new_rate % 2 == 0, 'The rate of down_sampling (using \"strided_convolution\") must be the power of ' \\\n '2, but \"{}\" is provided!'.format(rate)\n output = conv_2d(output, channels=channels * (2 ** loop_index), strides=2, activation=activation,\n name=name + 'down_sampling' + '_x' + str(loop_index * 2), is_training=is_training)\n loop_index += 1\n new_rate = int(new_rate / 2)\n\n elif method == 'max_pooling':\n output = tf.layers.max_pooling2d(input, pool_size=rate, strides=rate, name=name+'_max_pooling')\n\n return output\n\n\ndef up_sampling(input, channels, method='nearest_neighbor', rate=2, name='', activation=leaky_Relu, is_training=True):\n assert method == 'nearest_neighbor', 'Only \"nearest_neighbor\" method is supported now! ' \\\n 'However, \"' + method + '\" is provided.'\n output = input\n if method == 'nearest_neighbor':\n _, x, y, _= input.get_shape()\n x = x.value\n y = y.value\n\n output = tf.image.resize_nearest_neighbor(input, size=(x*rate, y*rate), name=name + '_upsampling')\n name += '_upsampling'\n output = conv_2d(output, channels=channels, kernel_size=1, activation=activation,\n name=name + '_align_channels', is_training=is_training)\n\n return output\n\n\n# Repeated multi-scale fusion (namely the exchange block) within a stage (the input and the output has the same number\n# of sub-networks)\ndef exchange_within_stage(inputs, name='exchange_within_stage', is_training=True):\n with tf.variable_scope(name):\n subnetworks_number = len(inputs)\n outputs = []\n\n # suppose i is the index of the input sub-network, o is the index of the output sub-network\n for o in range(subnetworks_number):\n one_subnetwork = 0\n for i in range(subnetworks_number):\n if i == o:\n # if in the same resolution\n temp_subnetwork = inputs[i]\n elif i - o < 0:\n # if the input resolution is greater the output resolution, down-sampling with rate\n # of 2 ** (o - i)\n temp_subnetwork = down_sampling(inputs[i], rate=2 ** (o - i), name='i_{}_o_{}'.format(i, o),\n is_training=is_training)\n else:\n # if the input resolution is smaller the output resolution, up-sampling with rate of\n # 2 ** (o - i)\n _, _, _, c = inputs[o].get_shape()\n temp_subnetwork = up_sampling(inputs[i], channels=c, rate=2 ** (i - o),\n name='i_{}_o_{}'.format(i, o), is_training=is_training)\n one_subnetwork = tf.add(temp_subnetwork, one_subnetwork, name='add_i_{}_o_{}'.format(i, o))\n outputs.append(one_subnetwork)\n return outputs\n\n\n# Repeated multi-scale fusion (namely the exchange block) between two stages (the input and the output has the same\n# number of sub-networks)\ndef exchange_between_stage(inputs, name='exchange_between_stage', is_training=True):\n subnetworks_number = len(inputs)\n outputs = []\n\n # suppose i is the index of the input sub-network, o is the index of the output sub-network\n for o in range(subnetworks_number):\n one_subnetwork = 0\n for i in range(subnetworks_number):\n if i == o:\n # if in the same resolution\n temp_subnetwork = inputs[i]\n elif i - o < 0:\n # if the input resolution is greater the output resolution, down-sampling with rate\n # of 2 ** (o - i)\n temp_subnetwork = down_sampling(inputs[i], rate=2 ** (o - i), name='i_{}_o_{}'.format(i, o),\n is_training=is_training)\n else:\n # if the input resolution is smaller the output resolution, up-sampling with rate of\n # 2 ** (o - i)\n _, _, _, c = inputs[o].get_shape()\n temp_subnetwork = up_sampling(inputs[i], channels=c, rate=2 ** (i - o),\n name='i_{}_o_{}'.format(i, o), is_training=is_training)\n one_subnetwork = tf.add(temp_subnetwork, one_subnetwork, name='add_i_{}_o_{}'.format(i, o))\n outputs.append(one_subnetwork)\n one_subnetwork = down_sampling(inputs[-1], rate=2, name='new_resolution', is_training=is_training)\n outputs.append(one_subnetwork)\n return outputs\n\n\ndef residual_unit_bottleneck(input, name='RU_bottleneck', channels=64, is_training=True):\n \"\"\"\n Residual unit with bottleneck design, default width is 64.\n :param input:\n :param name:\n :return:\n \"\"\"\n _, _, _, c = input.get_shape()\n conv_1x1_1 = conv_2d(input, channels=channels, kernel_size=1, activation=leaky_Relu, name=name + '_conv1x1_1',\n is_training = is_training)\n conv_3x3 = conv_2d(conv_1x1_1, channels=channels, activation=leaky_Relu, name=name + '_conv3x3',\n is_training=is_training)\n conv_1x1_2 = conv_2d(conv_3x3, channels=c, kernel_size=1, name=name + '_conv1x1_2', is_training=is_training)\n _output = tf.add(input, conv_1x1_2, name=name + '_add')\n output = leaky_Relu(_output, name=name + '_out')\n return output\n\n\ndef residual_unit(input, name='RU', is_training=True):\n \"\"\"\n Residual unit with two 3 x 3 convolution layers.\n :param input:\n :param name:\n :return:\n \"\"\"\n _, _, _, channels = input.get_shape()\n conv3x3_1 = conv_2d(inputs=input, channels=channels, activation=leaky_Relu, name=name + '_conv3x3_1',\n is_training=is_training)\n conv3x3_2 = conv_2d(inputs=conv3x3_1, channels=channels, name=name + '_conv3x3_2', is_training=is_training)\n _output = tf.add(input, conv3x3_2, name=name + '_add')\n output = leaky_Relu(_output, name=name + '_out')\n return output\n\n\ndef exchange_block(inputs, name='exchange_block', is_training=True):\n with tf.variable_scope(name):\n output = []\n level = 0\n for input in inputs:\n sub_network = residual_unit(input, name='level{}RU1'.format(level), is_training=is_training)\n sub_network = residual_unit(sub_network, name='level{}RU2'.format(level), is_training=is_training)\n sub_network = residual_unit(sub_network, name='level{}RU3'.format(level), is_training=is_training)\n sub_network = residual_unit(sub_network, name='level{}RU4'.format(level), is_training=is_training)\n output.append(sub_network)\n level += 1\n outputs = exchange_within_stage(output, is_training=is_training)\n return outputs" ]
[ [ "tensorflow.layers.conv2d", "tensorflow.layers.batch_normalization", "tensorflow.image.resize_nearest_neighbor", "tensorflow.layers.max_pooling2d", "tensorflow.add", "tensorflow.variable_scope", "tensorflow.random_normal_initializer", "tensorflow.nn.leaky_relu" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
cgyqu/python_learning
[ "55c8df4a963c40ace050d3454b72538190cb0517" ]
[ "ml/time_series02.py" ]
[ "#%%\nfrom numpy import array\nfrom numpy import hstack\nfrom keras.models import Sequential\nfrom keras.layers import LSTM\nfrom keras.layers import Dense\nfrom keras.layers import RepeatVector\nfrom keras.layers import TimeDistributed\n#%%\n# split a multivariate sequence into samples\ndef split_sequences(sequences, n_steps_in, n_steps_out):\n\tX, y = list(), list()\n\tfor i in range(len(sequences)):\n\t\t# find the end of this pattern\n\t\tend_ix = i + n_steps_in\n\t\tout_end_ix = end_ix + n_steps_out\n\t\t# check if we are beyond the dataset\n\t\tif out_end_ix > len(sequences):\n\t\t\tbreak\n\t\t# gather input and output parts of the pattern\n\t\tseq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :]\n\t\tX.append(seq_x)\n\t\ty.append(seq_y)\n\treturn array(X), array(y)\n#%%\n# define input sequence\nin_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90])\nin_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95])\nout_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))])\n# convert to [rows, columns] structure\nin_seq1 = in_seq1.reshape((len(in_seq1), 1))\nin_seq2 = in_seq2.reshape((len(in_seq2), 1))\nout_seq = out_seq.reshape((len(out_seq), 1))\n# horizontally stack columns\ndataset = hstack((in_seq1, in_seq2, out_seq))\ndataset\n#%%\n# choose a number of time steps\nn_steps_in, n_steps_out = 3, 2\n# covert into input/output\nX, y = split_sequences(dataset, n_steps_in, n_steps_out)\nX,y\n#%%\n# the dataset knows the number of features, e.g. 2\nn_features = X.shape[2]\n# define model\nmodel = Sequential()\nmodel.add(LSTM(200, activation='relu', input_shape=(n_steps_in, n_features)))\nmodel.add(RepeatVector(n_steps_out))\nmodel.add(LSTM(200, activation='relu', return_sequences=True))\nmodel.add(TimeDistributed(Dense(n_features))) \n'''\n这里输入3D为(样本量,return_sequences数量(=n_steps_out),200)\n输出为(样本量,return_sequences数量(=n_steps_out),n_features)\n就是每个输出是(3,2)维度的\n'''\nmodel.compile(optimizer='adam', loss='mse')\n# fit model\nmodel.fit(X, y, epochs=300, verbose=0,batch_size=2)\n# demonstrate prediction\nx_input = array([[60, 65, 125], [70, 75, 145], [80, 85, 165]])\nx_input = x_input.reshape((1, n_steps_in, n_features))\nyhat = model.predict(x_input, verbose=0)\nprint(yhat)\n\n# %%\n" ]
[ [ "numpy.hstack", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aacsspkt/autodealerappliation
[ "c7ab3ae8e57e91c797129e87a13bd00d41bc4753" ]
[ "app/customer/views.py" ]
[ "from django.views.generic import ListView, DetailView, FormView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.urls.base import reverse\n\nfrom .models import Customer\nfrom .forms import CustomerForm, CustomerImportForm\n\n\ndef get_filtered_queryset(searchkey, searchvalue):\n return {\n \"citizenship_no\": Customer.objects.filter(citizenship_no__contains=searchvalue),\n \"pan_no\": Customer.objects.filter(pan_no__contains=searchvalue),\n \"fullname\": Customer.objects.filter(fullname__contains=searchvalue),\n \"dob\": Customer.objects.filter(dob__contains=searchvalue),\n \"gender\": Customer.objects.filter(gender__contains=searchvalue),\n \"email\": Customer.objects.filter(email__contains=searchvalue),\n \"phone\": Customer.objects.filter(phone__contains=searchvalue),\n \"occupation\": Customer.objects.filter(occupation__contains=searchvalue),\n \"city\": Customer.objects.filter(city__contains=searchvalue),\n \"district\": Customer.objects.filter(district__contains=searchvalue),\n \"state\": Customer.objects.filter(state__contains=searchvalue),\n \"country\": Customer.objects.filter(country__contains=searchvalue),\n \"state\": Customer.objects.filter(state__contains=searchvalue),\n }.get(searchkey, Customer.objects.all())\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerListView(ListView):\n model = Customer\n template_name = \"customer/index.html\"\n paginate_by = 12\n\n def get_context_data(self, **kwargs):\n context = super(CustomerListView, self).get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"fields\"] = Customer._meta.get_fields(include_parents=False)\n return context\n\n def get_queryset(self):\n searchkey = self.request.GET.get(\"searchkey\", None)\n searchvalue = self.request.GET.get(\"searchvalue\", None)\n if searchkey != None:\n return get_filtered_queryset(searchkey, searchvalue)\n else:\n return Customer.objects.all()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerDetailView(DetailView):\n model = Customer\n template_name = \"customer/detail.html\"\n pk_url_kwarg = \"id\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"detail\"\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerCreateView(SuccessMessageMixin, CreateView):\n model = Customer\n template_name = \"customer/create.html\"\n form_class = CustomerForm\n\n def get_success_message(self, cleaned_data):\n return \"Customer successfully created\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"create\"\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerUpdateView(SuccessMessageMixin, UpdateView):\n model = Customer\n template_name = \"customer/edit.html\"\n form_class = CustomerForm\n pk_url_kwarg = \"id\"\n\n def get_success_message(self, cleaned_data):\n return \"Customer successfully updated\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"edit\"\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerDeleteView(SuccessMessageMixin, DeleteView):\n model = Customer\n template_name = \"customer/delete.html\"\n pk_url_kwarg = \"id\"\n success_url = \"customer:customer-index\"\n\n def get_success_message(self, cleaned_data):\n return \"Customer successfully deleted\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"delete\"\n return context\n\n\nimport pandas as pd\nfrom pprint import pprint\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerImportView(SuccessMessageMixin, FormView):\n form_class = CustomerImportForm\n template_name = \"customer/import.html\"\n success_url = \"/customer/import/\"\n success_message = \"Customer successfully imported\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"import\"\n return context\n\n def form_valid(self, form):\n csvfile = form.cleaned_data.get(\"csvfile\", None)\n if csvfile is not None:\n df = pd.read_csv(csvfile)\n df_dict = df.to_dict(orient=\"index\")\n try:\n customer_list = [\n Customer(\n citizenship_no=v[\"citizenship number\"],\n pan_no=v[\"pan number\"],\n fullname=v[\"full name\"],\n dob=v[\"date of birth\"],\n gender=v[\"gender\"],\n email=v[\"email\"],\n phone=v[\"phone\"],\n occupation=v[\"occupation\"],\n city=v[\"city\"],\n district=v[\"district\"],\n state=v[\"state\"],\n country=v[\"country\"],\n address=v[\"address\"],\n )\n for i, v in df_dict.items()\n ]\n except KeyError:\n form.add_error(\n None,\n \"\"\"Column name in file doesn't match! \n Columns: \n 'citizenship number', 'pan number', 'full name', 'date of birth', 'gender', 'email',\n 'phone', 'occupation', 'city', 'district', 'state', 'country', 'address'.\"\"\",\n )\n return self.render_to_response(self.get_context_data(form=form))\n\n objs = Customer.objects.bulk_create(customer_list)\n pprint(objs)\n\n return super().form_valid(form)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Ascend-Huawei/AVOD
[ "ea62372517bbfa9d4020bc5ab2739ee182c63c56" ]
[ "avod/datasets/kitti/kitti_aug.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom npu_bridge.npu_init import *\nimport copy\n\nimport numpy as np\n\nAUG_FLIPPING = 'flipping'\nAUG_PCA_JITTER = 'pca_jitter'\n\n\ndef flip_image(image):\n \"\"\"Flips an image horizontally\n \"\"\"\n flipped_image = np.fliplr(image)\n return flipped_image\n\n\ndef flip_points(points):\n \"\"\"Flips a list of points (N, 3)\n \"\"\"\n flipped_points = np.copy(points)\n flipped_points[:, 0] = -points[:, 0]\n return flipped_points\n\n\ndef flip_point_cloud(point_cloud):\n \"\"\"Flips a point cloud (3, N)\n \"\"\"\n flipped_point_cloud = np.copy(point_cloud)\n flipped_point_cloud[0] = -point_cloud[0]\n return flipped_point_cloud\n\n\ndef flip_label_in_3d_only(obj_label):\n \"\"\"Flips only the 3D position of an object label. The 2D bounding box is\n not flipped to save time since it is not used.\n\n Args:\n obj_label: ObjectLabel\n\n Returns:\n A flipped object\n \"\"\"\n\n flipped_label = copy.deepcopy(obj_label)\n\n # Flip the rotation\n if obj_label.ry >= 0:\n flipped_label.ry = np.pi - obj_label.ry\n else:\n flipped_label.ry = -np.pi - obj_label.ry\n\n # Flip the t.x sign, t.y and t.z remains the unchanged\n flipped_t = (-flipped_label.t[0], flipped_label.t[1], flipped_label.t[2])\n flipped_label.t = flipped_t\n\n return flipped_label\n\n\ndef flip_boxes_3d(boxes_3d, flip_ry=True):\n \"\"\"Flips boxes_3d\n\n Args:\n boxes_3d: List of boxes in box_3d format\n flip_ry bool: (optional) if False, rotation is not flipped to save on\n computation (useful for flipping anchors)\n\n Returns:\n flipped_boxes_3d: Flipped boxes in box_3d format\n \"\"\"\n\n flipped_boxes_3d = np.copy(boxes_3d)\n\n if flip_ry:\n # Flip the rotation\n above_zero = boxes_3d[:, 6] >= 0\n below_zero = np.logical_not(above_zero)\n flipped_boxes_3d[above_zero, 6] = np.pi - boxes_3d[above_zero, 6]\n flipped_boxes_3d[below_zero, 6] = -np.pi - boxes_3d[below_zero, 6]\n\n # Flip the t.x sign, t.y and t.z remains the unchanged\n flipped_boxes_3d[:, 0] = -boxes_3d[:, 0]\n\n return flipped_boxes_3d\n\n\ndef flip_ground_plane(ground_plane):\n \"\"\"Flips the ground plane by negating the x coefficient\n (ax + by + cz + d = 0)\n\n Args:\n ground_plane: ground plane coefficients\n\n Returns:\n Flipped ground plane coefficients\n \"\"\"\n flipped_ground_plane = np.copy(ground_plane)\n flipped_ground_plane[0] = -ground_plane[0]\n return flipped_ground_plane\n\n\ndef flip_stereo_calib_p2(calib_p2, image_shape):\n \"\"\"Flips the stereo calibration matrix to correct the projection back to\n image space. Flipping the image can be seen as a movement of both the\n camera plane, and the camera itself. To account for this, the instrinsic\n matrix x0 value is flipped with respect to the image width, and the\n extrinsic matrix t1 value is negated.\n\n Args:\n calib_p2: 3 x 4 stereo camera calibration matrix\n image_shape: (h, w) image shape\n\n Returns:\n 'Flipped' calibration p2 matrix with shape (3, 4)\n \"\"\"\n flipped_p2 = np.copy(calib_p2)\n flipped_p2[0, 2] = image_shape[1] - calib_p2[0, 2]\n flipped_p2[0, 3] = -calib_p2[0, 3]\n\n return flipped_p2\n\n\ndef compute_pca(image_set):\n \"\"\"Calculates and returns PCA of a set of images\n\n Args:\n image_set: List of images read with cv2.imread in np.uint8 format\n\n Returns:\n PCA for the set of images\n \"\"\"\n\n # Check for valid input\n assert(image_set[0].dtype == np.uint8)\n\n # Reshape data into single array\n reshaped_data = np.concatenate([image\n for pixels in image_set for image in\n pixels])\n\n # Convert to float and normalize the data between [0, 1]\n reshaped_data = (reshaped_data / 255.0).astype(np.float32)\n\n # Calculate covariance, eigenvalues, and eigenvectors\n # np.cov calculates covariance around the mean, so no need to shift the\n # data\n covariance = np.cov(reshaped_data.T)\n e_vals, e_vecs = np.linalg.eigh(covariance)\n\n # svd can also be used instead\n # U, S, V = np.linalg.svd(mean_data)\n\n pca = np.sqrt(e_vals) * e_vecs\n\n return pca\n\n\ndef add_pca_jitter(img_data, pca):\n \"\"\"Adds a multiple of the principle components,\n with magnitude from a Gaussian distribution with mean 0 and stdev 0.1\n\n\n Args:\n img_data: Original image in read with cv2.imread in np.uint8 format\n pca: PCA calculated with compute_PCA for the image set\n\n Returns:\n Image with added noise\n \"\"\"\n\n # Check for valid input\n assert (img_data.dtype == np.uint8)\n\n # Make a copy of the image data\n new_img_data = np.copy(img_data).astype(np.float32) / 255.0\n\n # Calculate noise by multiplying pca with magnitude,\n # then sum horizontally since eigenvectors are in columns\n magnitude = np.random.randn(3) * 0.1\n noise = (pca * magnitude).sum(axis=1)\n\n # Add the noise to the image, and clip to valid range [0, 1]\n new_img_data = new_img_data + noise\n np.clip(new_img_data, 0.0, 1.0, out=new_img_data)\n\n # Change back to np.uint8\n new_img_data = (new_img_data * 255).astype(np.uint8)\n\n return new_img_data\n\n\ndef apply_pca_jitter(image_in):\n \"\"\"Applies PCA jitter or random noise to a single image\n\n Args:\n image_in: Image to modify\n\n Returns:\n Modified image\n \"\"\"\n image_in = np.asarray([image_in], dtype=np.uint8)\n\n pca = compute_pca(image_in)\n image_out = add_pca_jitter(image_in, pca)\n\n return image_out\n\n" ]
[ [ "numpy.logical_not", "numpy.sqrt", "numpy.clip", "numpy.fliplr", "numpy.asarray", "numpy.concatenate", "numpy.copy", "numpy.cov", "numpy.linalg.eigh", "numpy.random.randn" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
streamlit-badge-bot/a3-thanos
[ "3c673ce6a1321a9229a34a09e4e5f29313825e4f" ]
[ "streamlit_app.py" ]
[ "import streamlit as st\nimport pandas as pd\nimport altair as alt\nfrom vega_datasets import data\n\nst.title(\"Let's analyze some CO2 emission data &#x1f30e\")\nMAX_WIDTH = 1000\[email protected] # add caching so we load the data only once\ndef load_data():\n climate_url = \"https://raw.githubusercontent.com/ZeningQu/World-Bank-Data-by-Indicators/master/climate-change/climate-change.csv\"\n country_url = \"https://raw.githubusercontent.com/ZeningQu/World-Bank-Data-by-Indicators/master/climate-change/Metadata_Country_API_19_DS2_en_csv_v2_10137883.csv\"\n climate_df = pd.read_csv(climate_url)\n climate_df.set_index(\"Country Code\")\n country_df = pd.read_csv(country_url)\n country_df.set_index(\"Country Code\")\n climate = climate_df.merge(country_df, on='Country Code', how='inner')\n\n country_loc = \"https://gist.githubusercontent.com/tadast/8827699/raw/3cd639fa34eec5067080a61c69e3ae25e3076abb/countries_codes_and_coordinates.csv\"\n country_loc_df = pd.read_csv(country_loc)\n country_loc_df.replace('\"', '', regex=True, inplace=True)\n climate = climate.merge(country_loc_df, left_on=\"Country Name\", right_on=\"Country\", how=\"inner\")\n return climate\n\n\ndf = load_data()\ncountry_map_df = pd.read_table(\n \"https://raw.githubusercontent.com/KoGor/Map-Icons-Generator/master/data/world-110m-country-names.tsv\")\ncountries = alt.topo_feature(data.world_110m.url, 'countries')\n\nalt.data_transformers.disable_max_rows()\n\n\ndef preprocess_data(df):\n df = df.rename(columns={\"CO2 emissions from gaseous fuel consumption (% of total)\": \"gaseous fuel % of total\",\n \"CO2 emissions from liquid fuel consumption (% of total)\": \"liquid fuel % of total\",\n \"CO2 emissions from solid fuel consumption (% of total)\": \"solid fuel % of total\",\n \"CO2 emissions (metric tons per capita)\": \"CO2 emissions per capita\",\n \"CO2 emissions (kg per PPP $ of GDP)\" : \"CO2 emissions per GDP\",\n \"Renewable electricity output (% of total electricity output)\":\"Renewable electricity output % of total\",\n \"Terrestrial protected areas (% of total land area)\": \"Terrestrial protected areas % of total\"})\n df = df[df['Year'] <= 2011]\n df = df[df['Year'] > 1990]\n df = df[df['CO2 emissions (kt)'] > 0]\n # df = df[df[\"Population growth (annual %)\"] >= 0]\n return df\n\n\ndef world_map(highlight, highlight2):\n slider = alt.binding_range(min=1991, max=2011, step=1)\n select_year = alt.selection_single(name=\"Year\", fields=['Year'],\n bind=slider, init={'Year': 2011})\n\n map = alt.Chart(df).mark_geoshape(\n stroke='#aaa', strokeWidth=0.25\n ).encode(\n color=alt.condition(highlight2 | highlight, 'CO2 emissions (kt):Q', alt.value('lightgrey'), scale=alt.Scale(scheme='redyellowblue', reverse=True)),\n tooltip=[\"Country Name\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\"]\n ).transform_lookup(\n lookup='Country Name',\n from_=alt.LookupData(\n \"https://raw.githubusercontent.com/KoGor/Map-Icons-Generator/master/data/world-110m-country-names.tsv\",\n 'name', ['id', \"name\"])\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(countries, 'id', fields=[\"id\", \"type\", \"properties\", \"geometry\"])\n ).project(\n type=\"equirectangular\"\n ).properties(\n width=1100,\n height=650,\n title='worldwide CO2 total emissions and emissions per capita'\n ).add_selection(highlight, highlight2)\n\n percapita = alt.Chart(df).mark_circle(\n opacity=0.4 ,\n ).encode(\n size=alt.Size('CO2 emissions per capita:Q', scale=alt.Scale(range=[10, 3000])),\n color=alt.condition(highlight2 | highlight, alt.value('red'), alt.value('lightgrey')),\n longitude='Longitude (average):Q',\n latitude='Latitude (average):Q',\n tooltip = [\"Country Name\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\"]\n ).transform_lookup(\n lookup='Country Name',\n from_=alt.LookupData(\n \"https://raw.githubusercontent.com/KoGor/Map-Icons-Generator/master/data/world-110m-country-names.tsv\",\n 'name', ['id', \"name\"])\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(countries, 'id', fields=[\"id\", \"type\", \"properties\", \"geometry\"])\n ).project(\n type=\"equirectangular\"\n ).properties(\n width=900,\n height=400,\n )\n return alt.layer(map, percapita) \\\n .add_selection(select_year) \\\n .transform_filter(select_year)\n\n\ndef next_block():\n st.write(\"\\n\")\n st.write(\"\\n\")\n st.write(\"\\n\")\n st.write(\"\\n\")\n st.write(\"\\n\")\n st.write(\"\\n\")\n\n\ndef process_data(df,dataset):\n df2 = df\n\n df2 = df2[df2[\"Country Name\"].isin(dataset)]\n\n\n return df2.melt(id_vars=[\"Country Name\", \"Year\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\"],\n value_vars=[\"solid fuel % of total\", \"liquid fuel % of total\", \"gaseous fuel % of total\"],\n var_name=\"type\",\n value_name=\"CO2 emissions from different consumptions (%)\")\n\n\ndef scatter_plot(df,picked_interval, single_select):\n point = alt.Chart(df).mark_circle().encode(\n x=alt.X('Year:O', title=\"Year\"),\n y=alt.Y('CO2 emissions (kt)', title='Total CO2 emissions (kt)', scale=alt.Scale(zero=False, padding=1)),\n # color = alt.Color('Country Name:N',scale=alt.Scale(domain=dataset,type='ordinal'))\n # color = alt.Color('CO2 emissions (kt):Q')\n color=alt.condition(picked_interval|single_select, \"CO2 emissions (kt):Q\", alt.value(\"lightgray\"),\n scale=alt.Scale(scheme='redyellowblue', reverse=True), title=\"Total CO2 emissions (kt)\")\n , size=alt.Size('CO2 emissions per capita:Q',\n scale=alt.Scale(range=[300, 1000])),\n tooltip=[\"Country Name\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\", \"Year\"]\n ).add_selection(\n single_select\n )\n\n line = alt.Chart(df).mark_line(\n # strokeWidth=0.7\n ).encode(\n x=alt.X('Year:N', title=\"Year\"),\n y=alt.Y('CO2 emissions (kt):Q', title='Total CO2 emissions (kt)'),\n color = alt.condition(picked_interval | single_select, \"Country Name\", alt.value(\"lightgray\"), legend=None),\n # color = alt.Color('CO2 emissions (kt):Q')\n # color=alt.condition(picked_interval, \"CO2 emissions (kt):Q\", alt.value(\"lightgray\"),\n # scale=alt.Scale(scheme='redyellowblue', reverse=True), title=\"Total CO2 emissions (kt)\")\n size=alt.condition(~(picked_interval | single_select), alt.value(1), alt.value(3)),\n tooltip=[\"Country Name\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\", \"Year\"]\n ).properties(\n width=650,\n height=500,\n title='CO2 total emission and emission per capita overtime'\n )\n\n labels = alt.Chart(df).mark_text(align='center', dx=-20, dy=-25).encode(\n alt.X('Year:O', aggregate='max'),\n alt.Y('CO2 emissions (kt)', aggregate={'argmax': 'Year'}),\n alt.Text('Country Name'),\n alt.Color('CO2 emissions (kt):Q', aggregate={'argmax': 'Year'},\n scale=alt.Scale(scheme='redyellowblue', reverse=True), legend=None),\n size=alt.condition(~(single_select), alt.value(17), alt.value(20)),\n ).properties(title='CO2 total emission and emission per capita', width=600)\n\n points = line+point+labels\n return points\n\n\ndef shape_plot(df, single_select):\n shape = alt.Chart(df).mark_circle(\n opacity=0.35\n ).encode(\n alt.X('CO2 emissions (kt):Q'),\n alt.Y('CO2 emissions per capita:Q'),\n color=alt.condition(single_select, 'Country Name:N', alt.value('lightgrey'), scale=alt.Scale(scheme=\"tableau10\")),\n shape=alt.Shape('Country Name:N', legend=None),\n size=alt.value(250),\n ).properties(\n width=300,\n height=250,\n title='CO2 total emissions and emissions per capita'\n )\n\n shape_labels = shape.mark_text(\n align='center',\n baseline='middle',\n dy=-25\n ).encode(\n text='Country Name',\n size=alt.value(15)\n )\n shapes = shape # + shape_labels\n return shapes\n\n\n\ndef world_map_for_factors(highlight, dataset, select_year):\n\n cols=alt.hconcat()\n for val in dataset:\n map = alt.Chart(df).mark_geoshape(\n stroke='#aaa', strokeWidth=0.25\n ).encode(\n x = alt.X(\"Country Name\"),\n color=alt.condition(highlight, val, alt.value('lightgrey'), scale=alt.Scale(scheme='yelloworangered'), title=\"\"),\n tooltip=[\"Country Name\"] + dataset\n ).transform_lookup(\n lookup='Country Name',\n from_=alt.LookupData(\n \"https://raw.githubusercontent.com/KoGor/Map-Icons-Generator/master/data/world-110m-country-names.tsv\",\n 'name', ['id', \"name\"])\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(countries, 'id', fields=[\"id\", \"type\", \"properties\", \"geometry\"])\n ).project(\n type=\"equirectangular\"\n ).properties(\n width=500,\n height=200,\n title=val,\n ).add_selection(select_year, highlight) \\\n .transform_filter(select_year)\n\n cols &= map\n return cols.resolve_scale(color='independent')\n\ndef total_trend(highlight, highlight2):\n total = alt.Chart(df).mark_bar().encode(\n alt.X('Year:N', title=\"Year\"),\n alt.Y('CO2 emissions (kt)', title='Total CO2 emissions (kt)'),\n color=alt.Color('Country Name', scale=alt.Scale(scheme=\"set3\"), title='Countries'),\n order=alt.Order(\n # Sort the segments of the bars by this field\n 'CO2 emissions (kt)',\n sort='ascending'\n ),\n tooltip=[\"Country Name\", 'CO2 emissions (kt)']\n ).properties(\n width=530,\n height=350,\n title='Total CO2 emissions world trend'\n ).transform_filter(highlight | highlight2)\n return total\n\ndef percapita_trend(highlight, highlight2):\n total = alt.Chart(df).mark_bar().encode(\n alt.X('Year:N', title=\"Year\"),\n alt.Y('CO2 emissions per capita', title='CO2 emissions per capita (kt)'),\n color=alt.Color('Country Name', scale=alt.Scale(scheme=\"set3\"), title='Countries'),\n order=alt.Order(\n # Sort the segments of the bars by this field\n 'CO2 emissions per capita',\n sort='ascending'\n ),\n tooltip=[\"Country Name\", 'CO2 emissions per capita']\n ).properties(\n width=530,\n height=350,\n title='CO2 emissions per capita world trend'\n ).transform_filter(highlight | highlight2)\n return total\n\n\ndef step1_introduction():\n st.header(\"Step1: What is CO2 Emissions?\")\n st.write(\"What is CO2 Emissions? Why is it important? Let's watch a short introduction video from BBC!\")\n st.video(\"https://www.youtube.com/watch?v=rFw8MopzXdI&ab_channel=BBCNews\")\n next_block()\n st.header(\"Dataset Description\")\n st.write('''\n This dataset comes from [WorldBank]\n (https://github.com/ZeningQu/World-Bank-Data-by-Indicators) It covers over 174 countries and 50 indicators.\n ''')\n if st.checkbox(\"Show Raw Data\"):\n st.write(df)\n\n\ndef step2_wordwide_trend():\n # next_block()\n st.header(\"Step2: Explore the worldwide CO2 emissions trend!\")\n st.write(\"Tips:\")\n st.write(\n \"1. Put your mouse on the country for detailed information. The stacked bar plots below show total CO2 emissions and emissions per capita for all years.\")\n st.write(\"2. Press shift and select multiple countries for comparison.\")\n st.write(\"3. Try to play with the year slide bar below!\")\n highlight1 = alt.selection_multi(on='click', fields=['Country Name'], empty='all')\n highlight2 = alt.selection_single(on='mouseover', fields=['Country Name'], empty='all')\n st.write((world_map(highlight1, highlight2) & alt.hconcat(total_trend(highlight1, highlight2),\n percapita_trend(highlight1, highlight2))).resolve_scale(\n y='independent',\n size='independent'\n ))\n\ndef step3_co2_emissions_sources():\n # next_block()\n st.header(\"Step3: CO2 emissions from different consumptions\")\n st.write(\"Tips:\")\n st.write(\"1. Add countries to the plot and compare!\")\n st.write(\n \"2. Hover your mouse over points on the left plot for detailed information and corresponding CO2 emissions sources.\")\n st.write(\n \"3. Select year interval on the left plot and explore the change of CO2 emissions sources overtime for each country!\")\n\n dataset = st.multiselect(\"Choose countries you want to explore!\", country_map_df[\"name\"].to_list(),\n [\"China\", \"United States\", \"India\", \"Qatar\"])\n df2 = process_data(df,dataset)\n picked_interval = alt.selection_interval(encodings=[\"x\"])\n single_select = alt.selection(type=\"single\",on='mouseover', fields=['Country Name'])\n points = scatter_plot(df2,picked_interval, single_select)\n shapes = shape_plot(df2, single_select)\n\n concat = alt.vconcat(\n alt.Chart(df2).mark_bar(\n opacity=0.9\n ).encode(\n alt.X(\"mean(CO2 emissions from different consumptions (%))\"),\n alt.Y('Country Name', title=\"\"),\n color = alt.condition(single_select, 'type', alt.value('lightgrey'),scale=alt.Scale(scheme=\"tableau10\"), title=\"type\"),\n # color = single_select)\n # alt.Color('type', scale=alt.Scale(scheme=\"tableau10\"), title='type'),\n ).properties(\n width=300,\n height=150,\n title='CO2 emissions from different consumptions'\n ).transform_filter(picked_interval), shapes.transform_filter(picked_interval)\n ).resolve_scale(\n color='independent'\n )\n\n vconcat = alt.hconcat(\n points.add_selection(picked_interval), concat\n ).resolve_scale(\n color='independent'\n , size='independent'\n )\n\n st.write(vconcat)\n\ndef step4_related_factors():\n # next_block()\n st.header(\"Step4: Factors that may affect CO2 emissions\")\n st.write(\"Tips:\")\n st.write(\"1. Add indicators you want to compare with CO2 emissions!\")\n st.write(\"2. Put your mouse on a country and compare across indicators!\")\n st.write(\"3. Remember to play with the year slide bar :)\")\n\n slider = alt.binding_range(min=1991, max=2011, step=1)\n select_year = alt.selection_single(name=\"Year\", fields=['Year'],\n bind=slider, init={'Year': 2011})\n highlight = alt.selection_single(on='mouseover', fields=['Country Name'],\n empty='all') # init={\"Country Name\": \"United States\"})\n\n dataset2 = st.multiselect(\"Choose factors to compare!\",\n [\"CO2 emissions per GDP\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\",\n \"Urban population (% of total)\",\n \"Renewable energy consumption (% of total final energy consumption)\",\n \"Forest area (% of land area)\", \"Marine protected areas (% of territorial waters)\",\n \"Population growth (annual %)\", \"Renewable electricity output % of total\",\n \"Terrestrial protected areas % of total\",\n \"Total greenhouse gas emissions (kt of CO2 equivalent)\"],\n [\"CO2 emissions (kt)\", \"CO2 emissions per GDP\",\n \"Renewable electricity output % of total\"])\n\n st.write(alt.hconcat(world_map_for_factors(highlight, dataset2, select_year), alt.Chart(df).mark_point().encode(\n alt.X(alt.repeat(\"column\"), type='quantitative'),\n alt.Y(alt.repeat(\"row\"), type='quantitative'),\n color='Country Name:N',\n ).properties(\n width=160,\n height=160,\n ).repeat(\n row=dataset2,\n column=dataset2,\n ).transform_filter(select_year).interactive()))\n\n\n################# main plots ############\n\n\ndf = preprocess_data(df)\n\n\nst.sidebar.write('Follow the steps below and begin to explore!')\n\nfunctions = {\n 'Step1: What is CO2 emissions': lambda: step1_introduction(),\n 'Step2: Worldwide CO2 emissions': lambda: step2_wordwide_trend(),\n 'Step3: CO2 emissions sources': lambda: step3_co2_emissions_sources(),\n 'Step4: CO2 emissions related factors': lambda: step4_related_factors()\n}\nmenu = st.sidebar.selectbox(\n \"Menu\", list(functions.keys())\n)\nfunctions[menu]()\n\n\n" ]
[ [ "pandas.read_table", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
minhhoccode/Interpolate-with-flask
[ "7f8cb8f551e9bd36beca911e0987b6c1bc168356" ]
[ "NoiSuy.py" ]
[ "from sympy import *\r\nimport numpy as np\r\nimport array as arr\r\n\r\ndef TongQuat(X, Y):\r\n list ( zip(X , Y ) )\r\n x = symbols('x')\r\n m = len(X)\r\n A = [[X[i] ** j for j in range (m) ] for i in range (m) ] \r\n kq = np.linalg.solve(A,Y)\r\n hamSo = ''\r\n for i in range (len(kq)):\r\n hamSo += '+%d*(x ** %d)' %(kq[i], i)\r\n P = lambda x: eval(hamSo )\r\n f1 = str(P(x))\r\n f1 = eval(f1)\r\n f1 = latex(f1)\r\n return f1, A\r\n\r\n\r\n\r\ndef Newton(X, Y, pp):\r\n X = [0.0,0.5,1.0,1.5,2.0] #mốc nội suy\r\n Y = [-1.0,0.125,1.0,2.375,5.0]\r\n n = len(X)\r\n h = X[1]-X[0]\r\n x , t = symbols ('x t')\r\n sp = [ [d(k, i, Y) for i in range(n-k)] for k in range (n)]\r\n if pp == 'Newton':\r\n P = Y[0]\r\n for k in range(1, n): # k chạy từ 1 tới n-1\r\n prod = d(k, 0,Y)/factorial(k)\r\n for i in range(k):\r\n prod *= t - i\r\n P += prod\r\n P = P . subs (t , ( x - X [0]) / h) . expand()\r\n if pp == 'Newton Lùi':\r\n m = n-1\r\n P = Y[m]\r\n for k in range(1, n): \r\n prod = d(k, m-k, Y)/factorial(k)\r\n for i in range(k):\r\n prod *= t + i\r\n P += prod\r\n P = P.subs(t, (x - X[m]) / h).expand()\r\n print(P)\r\n f1 = latex(P)\r\n return f1, sp\r\n\r\ndef d (k , i, Y ) :\r\n if k == 0:\r\n return Y[i]\r\n return d (k -1,i +1, Y ) - d (k -1 , i, Y )\r\n\r\n\r\ndef checkCondition(X, Y):\r\n n = len(X)\r\n h = X[1]-X[0]\r\n if(len(X) != len(Y)):\r\n return False\r\n for i in range(0,n-1):\r\n if(X[i+1] - X[i] != h):\r\n return False\r\n return True\r\n\r\ndef Lagrange(X,Y):\r\n n = len(X)\r\n x = symbols('x')\r\n P = 0\r\n for i in range (n) :\r\n P += Y [i ] * L (i , x, n , X )\r\n P = P.expand()\r\n f1 = latex(P)\r\n print(f1)\r\n s = []\r\n s1 = [] \r\n for i in range(n):\r\n a, b = L(i, x, n, X), L(i, x, n , X).expand()\r\n s.append( latex(a))\r\n s1.append( latex(b))\r\n return f1, s, s1\r\ndef L (i , x, n, X ) :\r\n prod = 1\r\n for j in range (n) :\r\n if j != i :\r\n prod *= ( x - X[ j ]) / ( X [ i ] - X [ j ])\r\n return prod\r\n\r\n" ]
[ [ "numpy.linalg.solve" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
Xi-L/PMOCO
[ "81dc7c66e5bee34f401d16c29cc39b5e2c3a62e6" ]
[ "MOKP/POMO/test_mokp_n100.py" ]
[ "##########################################################################################\r\n# Machine Environment Config\r\nDEBUG_MODE = False\r\nUSE_CUDA = not DEBUG_MODE\r\nCUDA_DEVICE_NUM = 0\r\n\r\n##########################################################################################\r\n# Path Config\r\nimport os\r\nimport sys\r\nimport torch\r\nimport numpy as np\r\n\r\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\r\nsys.path.insert(0, \"..\") # for problem_def\r\nsys.path.insert(0, \"../..\") # for utils\r\n\r\n##########################################################################################\r\n# import\r\nimport logging\r\nfrom utils.utils import create_logger, copy_all_src\r\n\r\n\r\nfrom MOKPTester import KPTester as Tester\r\nfrom MOKProblemDef import get_random_problems\r\n##########################################################################################\r\nimport time\r\nimport hvwfg\r\nimport pickle\r\n\r\nfrom matplotlib import pyplot as plt\r\nimport matplotlib as mpl\r\nmpl.style.use('default')\r\n##########################################################################################\r\n# parameters\r\nenv_params = {\r\n 'problem_size': 100,\r\n 'pomo_size': 100,\r\n}\r\n\r\nmodel_params = {\r\n 'embedding_dim': 128,\r\n 'sqrt_embedding_dim': 128**(1/2),\r\n 'encoder_layer_num': 6,\r\n 'qkv_dim': 16,\r\n 'head_num': 8,\r\n 'logit_clipping': 10,\r\n 'ff_hidden_dim': 512,\r\n 'eval_type': 'argmax',\r\n}\r\n\r\ntester_params = {\r\n 'use_cuda': USE_CUDA,\r\n 'cuda_device_num': CUDA_DEVICE_NUM,\r\n 'model_load': {\r\n 'path': './result/saved_kp100_model', # directory path of pre-trained model and log files saved.\r\n 'epoch': 200 # epoch version of pre-trained model to laod.\r\n },\r\n 'test_episodes': 100, \r\n 'test_batch_size': 100,\r\n 'augmentation_enable': True,\r\n 'aug_factor': 1, \r\n 'aug_batch_size': 100 \r\n}\r\nif tester_params['augmentation_enable']:\r\n tester_params['test_batch_size'] = tester_params['aug_batch_size']\r\n\r\nlogger_params = {\r\n 'log_file': {\r\n 'desc': 'test_kp_n100',\r\n 'filename': 'run_log'\r\n }\r\n}\r\n\r\n##########################################################################################\r\ndef _set_debug_mode():\r\n global tester_params\r\n tester_params['test_episodes'] = 100\r\n\r\n\r\ndef _print_config():\r\n logger = logging.getLogger('root')\r\n logger.info('DEBUG_MODE: {}'.format(DEBUG_MODE))\r\n logger.info('USE_CUDA: {}, CUDA_DEVICE_NUM: {}'.format(USE_CUDA, CUDA_DEVICE_NUM))\r\n [logger.info(g_key + \"{}\".format(globals()[g_key])) for g_key in globals().keys() if g_key.endswith('params')]\r\n \r\n##########################################################################################\r\ndef main(n_sols = 101):\r\n \r\n timer_start = time.time()\r\n logger_start = time.time()\r\n \r\n if DEBUG_MODE:\r\n _set_debug_mode()\r\n \r\n create_logger(**logger_params)\r\n _print_config()\r\n \r\n tester = Tester(env_params=env_params,\r\n model_params=model_params,\r\n tester_params=tester_params)\r\n \r\n copy_all_src(tester.result_folder)\r\n \r\n sols = np.zeros([n_sols, 2])\r\n \r\n shared_problem = get_random_problems(tester_params['test_episodes'], env_params['problem_size'])\r\n \r\n for i in range(n_sols):\r\n pref = torch.zeros(2).cuda()\r\n pref[0] = 1 - 0.01 * i\r\n pref[1] = 0.01 * i\r\n pref = pref / torch.sum(pref)\r\n \r\n score = tester.run(shared_problem,pref)\r\n sols[i] = np.array(score)\r\n \r\n timer_end = time.time()\r\n \r\n total_time = timer_end - timer_start\r\n \r\n # MOKP 50\r\n #single_task = [20.12, 20.12]\r\n \r\n # MOKP 100\r\n single_task = [40.45, 40.45]\r\n \r\n # MOKP 200\r\n #single_task = [57.62, 57.62]\r\n \r\n fig = plt.figure()\r\n \r\n plt.axvline(single_task[0],linewidth=3 , alpha = 0.25)\r\n plt.axhline(single_task[1],linewidth=3,alpha = 0.25, label = 'Single Objective KP (DP)')\r\n \r\n plt.plot(sols[:,0],sols[:,1], marker = 'o', c = 'C1',ms = 3, label='Pareto MOCO (Ours)')\r\n \r\n plt.legend()\r\n \r\n #ref = np.array([-15.5,-15.5]) # refpoint: [20.5,20.5] e.g., divide by (20.5 - 15.5) * (20 - 15.5)\r\n ref = np.array([-30,-30]) # refpoint: [40,40] e.g., divide by (40 - 30) * (40 - 30)\r\n #ref = np.array([-40,-40]) # refpoint: [60,60] e.g., divide by (60 - 40) * (60 - 40)\r\n \r\n hv = hvwfg.wfg(-sols.astype(float), ref.astype(float))\r\n \r\n hv_ratio = hv / ((40 - 30) * (40 - 30))\r\n\r\n print('Run Time(s): {:.4f}'.format(total_time))\r\n print('HV Ratio: {:.4f}'.format(hv_ratio))\r\n\r\n##########################################################################################\r\nif __name__ == \"__main__\":\r\n main()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.axvline", "matplotlib.pyplot.axhline", "matplotlib.style.use", "torch.zeros", "torch.sum", "matplotlib.pyplot.plot", "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AntonMu/albert
[ "f928a488380d359520e407e7862b031ffa5bb603" ]
[ "run_classifier.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning on classification tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nfrom albert import classifier_utils\nfrom albert import fine_tuning_utils\nfrom albert import modeling\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import cluster_resolver as contrib_cluster_resolver\nfrom tensorflow.contrib import tpu as contrib_tpu\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"data_dir\",\n None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\",\n)\n\nflags.DEFINE_string(\n \"albert_config_file\",\n None,\n \"The config json file corresponding to the pre-trained ALBERT model. \"\n \"This specifies the model architecture.\",\n)\n\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\n\nflags.DEFINE_string(\n \"vocab_file\", None, \"The vocabulary file that the ALBERT model was trained on.\"\n)\n\nflags.DEFINE_string(\n \"spm_model_file\", None, \"The model file for sentence piece tokenization.\"\n)\n\nflags.DEFINE_string(\n \"output_dir\",\n None,\n \"The output directory where the model checkpoints will be written.\",\n)\n\nflags.DEFINE_string(\n \"cached_dir\",\n None,\n \"Path to cached training and dev tfrecord file. \"\n \"The file will be generated if not exist.\",\n)\n\n## Other parameters\nflags.DEFINE_float(\"train_subset\", 1.0, \"The subset of the train set.\")\nflags.DEFINE_float(\"dev_subset\", 1.0, \"The subset of the dev set.\")\nflags.DEFINE_float(\"test_subset\", 1.0, \"The subset of the test set.\")\n\nflags.DEFINE_string(\n \"init_checkpoint\",\n None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\",\n)\n\nflags.DEFINE_string(\n \"albert_hub_module_handle\", None, \"If set, the ALBERT hub module to use.\"\n)\n\nflags.DEFINE_bool(\n \"do_lower_case\",\n True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\",\n)\n\nflags.DEFINE_integer(\n \"max_seq_length\",\n 512,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\",\n)\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False, \"Whether to run the model in inference mode on the test set.\"\n)\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_integer(\"train_step\", 1000, \"Total number of training steps to perform.\")\n\nflags.DEFINE_integer(\n \"warmup_step\", 0, \"number of steps to perform linear learning rate warmup for.\"\n)\n\nflags.DEFINE_integer(\n \"save_checkpoints_steps\", 1000, \"How often to save the model checkpoint.\"\n)\n\nflags.DEFINE_integer(\"keep_checkpoint_max\", 5, \"How many checkpoints to keep.\")\n\nflags.DEFINE_integer(\n \"iterations_per_loop\", 1000, \"How many steps to make in each estimator call.\"\n)\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\nflags.DEFINE_string(\"optimizer\", \"adamw\", \"Optimizer to use\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\",\n None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\",\n)\n\ntf.flags.DEFINE_string(\n \"tpu_zone\",\n None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\",\n)\n\ntf.flags.DEFINE_string(\n \"gcp_project\",\n None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\",\n)\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\",\n 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\",\n)\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": classifier_utils.ColaProcessor,\n \"mnli\": classifier_utils.MnliProcessor,\n \"mismnli\": classifier_utils.MisMnliProcessor,\n \"mrpc\": classifier_utils.MrpcProcessor,\n \"rte\": classifier_utils.RteProcessor,\n \"sst-2\": classifier_utils.Sst2Processor,\n \"sts-b\": classifier_utils.StsbProcessor,\n \"qqp\": classifier_utils.QqpProcessor,\n \"qnli\": classifier_utils.QnliProcessor,\n \"wnli\": classifier_utils.WnliProcessor,\n }\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:\n raise ValueError(\n \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\"\n )\n\n if not FLAGS.albert_config_file and not FLAGS.albert_hub_module_handle:\n raise ValueError(\n \"At least one of `--albert_config_file` and \"\n \"`--albert_hub_module_handle` must be set\"\n )\n\n if FLAGS.albert_config_file:\n albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)\n if FLAGS.max_seq_length > albert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the ALBERT model \"\n \"was only trained up to sequence length %d\"\n % (FLAGS.max_seq_length, albert_config.max_position_embeddings)\n )\n else:\n albert_config = None # Get the config from TF-Hub.\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n task_name = FLAGS.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name](\n use_spm=True if FLAGS.spm_model_file else False,\n do_lower_case=FLAGS.do_lower_case,\n )\n\n label_list = processor.get_labels()\n\n tokenizer = fine_tuning_utils.create_vocab(\n vocab_file=FLAGS.vocab_file,\n do_lower_case=FLAGS.do_lower_case,\n spm_model_file=FLAGS.spm_model_file,\n hub_module=FLAGS.albert_hub_module_handle,\n )\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project\n )\n\n is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2\n if FLAGS.do_train:\n iterations_per_loop = int(\n min(FLAGS.iterations_per_loop, FLAGS.save_checkpoints_steps)\n )\n else:\n iterations_per_loop = FLAGS.iterations_per_loop\n run_config = contrib_tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=int(FLAGS.save_checkpoints_steps),\n keep_checkpoint_max=0,\n tpu_config=contrib_tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host,\n ),\n )\n\n train_examples = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(\n FLAGS.data_dir, FLAGS.train_subset\n )\n model_fn = classifier_utils.model_fn_builder(\n albert_config=albert_config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=FLAGS.train_step,\n num_warmup_steps=FLAGS.warmup_step,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu,\n task_name=task_name,\n hub_module=FLAGS.albert_hub_module_handle,\n optimizer=FLAGS.optimizer,\n )\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = contrib_tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size,\n )\n\n if FLAGS.do_train:\n cached_dir = FLAGS.cached_dir\n if not cached_dir:\n cached_dir = FLAGS.output_dir\n train_file = os.path.join(cached_dir, task_name + \"_train.tf_record\")\n if not tf.gfile.Exists(train_file):\n classifier_utils.file_based_convert_examples_to_features(\n train_examples,\n label_list,\n FLAGS.max_seq_length,\n tokenizer,\n train_file,\n task_name,\n )\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", FLAGS.train_step)\n train_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.train_batch_size,\n )\n estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_step)\n\n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples(FLAGS.data_dir, FLAGS.dev_subset)\n num_actual_eval_examples = len(eval_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on. These do NOT count towards the metric (all tf.metrics\n # support a per-instance weight, and these get a weight of 0.0).\n while len(eval_examples) % FLAGS.eval_batch_size != 0:\n eval_examples.append(classifier_utils.PaddingInputExample())\n\n cached_dir = FLAGS.cached_dir\n if not cached_dir:\n cached_dir = FLAGS.output_dir\n eval_file = os.path.join(cached_dir, task_name + \"_eval.tf_record\")\n if not tf.gfile.Exists(eval_file):\n classifier_utils.file_based_convert_examples_to_features(\n eval_examples,\n label_list,\n FLAGS.max_seq_length,\n tokenizer,\n eval_file,\n task_name,\n )\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\n \" Num examples = %d (%d actual, %d padding)\",\n len(eval_examples),\n num_actual_eval_examples,\n len(eval_examples) - num_actual_eval_examples,\n )\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n # This tells the estimator to run through the entire set.\n eval_steps = None\n # However, if running eval on the TPU, you will need to specify the\n # number of steps.\n if FLAGS.use_tpu:\n assert len(eval_examples) % FLAGS.eval_batch_size == 0\n eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.eval_batch_size,\n )\n\n best_trial_info_file = os.path.join(FLAGS.output_dir, \"best_trial.txt\")\n\n def _best_trial_info():\n \"\"\"Returns information about which checkpoints have been evaled so far.\"\"\"\n if tf.gfile.Exists(best_trial_info_file):\n with tf.gfile.GFile(best_trial_info_file, \"r\") as best_info:\n (\n global_step,\n best_metric_global_step,\n metric_value,\n ) = best_info.read().split(\":\")\n global_step = int(global_step)\n best_metric_global_step = int(best_metric_global_step)\n metric_value = float(metric_value)\n else:\n metric_value = -1\n best_metric_global_step = -1\n global_step = -1\n tf.logging.info(\n \"Best trial info: Step: %s, Best Value Step: %s, \" \"Best Value: %s\",\n global_step,\n best_metric_global_step,\n metric_value,\n )\n return global_step, best_metric_global_step, metric_value\n\n def _remove_checkpoint(checkpoint_path):\n for ext in [\"meta\", \"data-00000-of-00001\", \"index\"]:\n src_ckpt = checkpoint_path + \".{}\".format(ext)\n tf.logging.info(\"removing {}\".format(src_ckpt))\n tf.gfile.Remove(src_ckpt)\n\n def _find_valid_cands(curr_step):\n filenames = tf.gfile.ListDirectory(FLAGS.output_dir)\n candidates = []\n for filename in filenames:\n if filename.endswith(\".index\"):\n ckpt_name = filename[:-6]\n idx = ckpt_name.split(\"-\")[-1]\n if int(idx) > curr_step:\n candidates.append(filename)\n return candidates\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n\n if task_name == \"sts-b\":\n key_name = \"pearson\"\n elif task_name == \"cola\":\n key_name = \"matthew_corr\"\n else:\n key_name = \"eval_accuracy\"\n\n global_step, best_perf_global_step, best_perf = _best_trial_info()\n writer = tf.gfile.GFile(output_eval_file, \"w\")\n while global_step < FLAGS.train_step:\n steps_and_files = {}\n filenames = tf.gfile.ListDirectory(FLAGS.output_dir)\n for filename in filenames:\n if filename.endswith(\".index\"):\n ckpt_name = filename[:-6]\n cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)\n if cur_filename.split(\"-\")[-1] == \"best\":\n continue\n gstep = int(cur_filename.split(\"-\")[-1])\n if gstep not in steps_and_files:\n tf.logging.info(\"Add {} to eval list.\".format(cur_filename))\n steps_and_files[gstep] = cur_filename\n tf.logging.info(\"found {} files.\".format(len(steps_and_files)))\n if not steps_and_files:\n tf.logging.info(\n \"found 0 file, global step: {}. Sleeping.\".format(global_step)\n )\n time.sleep(60)\n else:\n for checkpoint in sorted(steps_and_files.items()):\n step, checkpoint_path = checkpoint\n if global_step >= step:\n if (\n best_perf_global_step != step\n and len(_find_valid_cands(step)) > 1\n ):\n _remove_checkpoint(checkpoint_path)\n continue\n result = estimator.evaluate(\n input_fn=eval_input_fn,\n steps=eval_steps,\n checkpoint_path=checkpoint_path,\n )\n global_step = result[\"global_step\"]\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n writer.write(\"best = {}\\n\".format(best_perf))\n if result[key_name] > best_perf:\n best_perf = result[key_name]\n best_perf_global_step = global_step\n elif len(_find_valid_cands(global_step)) > 1:\n _remove_checkpoint(checkpoint_path)\n writer.write(\"=\" * 50 + \"\\n\")\n writer.flush()\n with tf.gfile.GFile(best_trial_info_file, \"w\") as best_info:\n best_info.write(\n \"{}:{}:{}\".format(\n global_step, best_perf_global_step, best_perf\n )\n )\n writer.close()\n\n for ext in [\"meta\", \"data-00000-of-00001\", \"index\"]:\n src_ckpt = \"model.ckpt-{}.{}\".format(best_perf_global_step, ext)\n tgt_ckpt = \"model.ckpt-best.{}\".format(ext)\n tf.logging.info(\"saving {} to {}\".format(src_ckpt, tgt_ckpt))\n tf.io.gfile.rename(\n os.path.join(FLAGS.output_dir, src_ckpt),\n os.path.join(FLAGS.output_dir, tgt_ckpt),\n overwrite=True,\n )\n\n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples(\n FLAGS.data_dir, FLAGS.test_subset\n )\n num_actual_predict_examples = len(predict_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on.\n while len(predict_examples) % FLAGS.predict_batch_size != 0:\n predict_examples.append(classifier_utils.PaddingInputExample())\n\n predict_file = os.path.join(FLAGS.output_dir, \"predict.tf_record\")\n classifier_utils.file_based_convert_examples_to_features(\n predict_examples,\n label_list,\n FLAGS.max_seq_length,\n tokenizer,\n predict_file,\n task_name,\n )\n\n tf.logging.info(\"***** Running prediction*****\")\n tf.logging.info(\n \" Num examples = %d (%d actual, %d padding)\",\n len(predict_examples),\n num_actual_predict_examples,\n len(predict_examples) - num_actual_predict_examples,\n )\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n predict_drop_remainder = True if FLAGS.use_tpu else False\n predict_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.predict_batch_size,\n )\n\n checkpoint_path = os.path.join(FLAGS.output_dir, \"model.ckpt-best\")\n result = estimator.predict(\n input_fn=predict_input_fn, checkpoint_path=checkpoint_path\n )\n\n output_predict_file = os.path.join(FLAGS.output_dir, \"test_results.tsv\")\n output_submit_file = os.path.join(FLAGS.output_dir, \"submit_results.tsv\")\n with tf.gfile.GFile(output_predict_file, \"w\") as pred_writer, tf.gfile.GFile(\n output_submit_file, \"w\"\n ) as sub_writer:\n sub_writer.write(\"index\" + \"\\t\" + \"prediction\\n\")\n num_written_lines = 0\n tf.logging.info(\"***** Predict results *****\")\n for (i, (example, prediction)) in enumerate(zip(predict_examples, result)):\n probabilities = prediction[\"probabilities\"]\n if i >= num_actual_predict_examples:\n break\n output_line = (\n \"\\t\".join(\n str(class_probability) for class_probability in probabilities\n )\n + \"\\n\"\n )\n pred_writer.write(output_line)\n\n if task_name != \"sts-b\":\n actual_label = label_list[int(prediction[\"predictions\"])]\n else:\n actual_label = str(prediction[\"predictions\"])\n sub_writer.write(example.guid + \"\\t\" + actual_label + \"\\n\")\n num_written_lines += 1\n assert num_written_lines == num_actual_predict_examples\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"spm_model_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n" ]
[ [ "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.compat.v1.gfile.Remove", "tensorflow.compat.v1.gfile.ListDirectory", "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.logging.set_verbosity", "tensorflow.compat.v1.flags.DEFINE_string", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.gfile.GFile", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CsatiZoltan/imagepy
[ "9a60ad3b1e8f79f2dcc47e4f246a4f31a96f99f5", "df44caef2822f2c543b9fa4ef6132a7b1014623e" ]
[ "imagepy/tools/Draw/floodfill_tol.py", "imagepy/tools/Measure/profile_tol.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 19 17:35:09 2016\n\n@author: yxl\n\"\"\"\n\nfrom imagepy.core.engine import Tool\nimport numpy as np\nfrom imagepy.core.manager import ColorManager\n# from imagepy.core.draw.fill import floodfill\nfrom skimage.morphology import flood_fill, flood\n\nclass Plugin(Tool):\n title = 'Flood Fill'\n para = {'tor':10, 'con':'8-connect'}\n view = [(int, 'tor', (0,1000), 0, 'torlorance', 'value'),\n (list, 'con', ['4-connect', '8-connect'], str, 'fill', 'pix')]\n \n def mouse_down(self, ips, x, y, btn, **key):\n \n img, color = ips.img, ColorManager.get_front()\n if int(y)<0 or int(x)<0: return\n if int(y)>=img.shape[0] or int(x)>=img.shape[1]: return \n\n ips.snapshot()\n connectivity=(self.para['con']=='8-connect')+1\n img = ips.img.reshape((ips.img.shape+(1,))[:3])\n msk = np.ones(img.shape[:2], dtype=np.bool)\n for i in range(img.shape[2]):\n msk &= flood(img[:,:,i], (int(y),int(x)), \n connectivity=connectivity, tolerance=self.para['tor'])\n img[msk] = np.mean(color) if img.shape[2]==1 else color\n ips.update()\n \n def mouse_up(self, ips, x, y, btn, **key):\n pass\n \n def mouse_move(self, ips, x, y, btn, **key):\n pass\n \n def mouse_wheel(self, ips, x, y, d, **key):\n pass\n\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 3 22:21:32 2017\n\n@author: yxl\n\"\"\"\n\nimport wx\nfrom imagepy import IPy\nfrom imagepy.core.engine import Tool\nimport numpy as np\nimport pandas as pd\nfrom numpy.linalg import norm\nfrom .setting import Setting\nfrom math import ceil\n\nclass Profile:\n \"\"\"Define the profile class\"\"\"\n dtype = 'distance'\n def __init__(self, body=None, unit=None):\n self.body = body if body!=None else []\n self.buf, self.unit = [], unit\n\n def addline(self):\n line = self.buf\n if len(line)!=2 or line[0] !=line[-1]:\n self.body.append(line)\n self.buf = []\n\n def snap(self, x, y, lim):\n minl, idx = 1000, None\n for i in self.body:\n for j in i:\n d = (j[0]-x)**2+(j[1]-y)**2\n if d < minl:minl,idx = d,(i, i.index(j))\n return idx if minl**0.5<lim else None\n\n def pick(self, x, y, lim):\n return self.snap(x, y, lim)\n\n def draged(self, ox, oy, nx, ny, i):\n i[0][i[1]] = (nx, ny)\n\n def draw(self, dc, f, **key):\n dc.SetPen(wx.Pen(Setting['color'], width=1, style=wx.SOLID))\n dc.SetTextForeground(Setting['tcolor'])\n linefont = wx.Font(10, wx.FONTFAMILY_DEFAULT, \n wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False)\n dc.SetFont(linefont)\n if len(self.buf)>1:\n dc.DrawLines([f(*i) for i in self.buf])\n for i in self.buf:dc.DrawCircle(f(*i),2)\n for line in self.body:\n dc.DrawLines([f(*i) for i in line])\n for i in line:dc.DrawCircle(f(*i),2)\n pts = np.array(line)\n mid = (pts[:-1]+pts[1:])/2\n\n dxy = (pts[:-1]-pts[1:])\n dis = norm(dxy, axis=1)\n unit = 1 if self.unit is None else self.unit[0]\n for i,j in zip(dis, mid):\n dc.DrawText('%.2f'%(i*unit), f(*j))\n\n def report(self, title):\n rst, titles = [], ['K']\n for line in self.body:\n pts = np.array(line)\n mid = (pts[:-1]+pts[1:])/2\n\n dxy = (pts[:-1]-pts[1:])\n dxy[:,1][dxy[:,1]==0] = 1\n l = norm(dxy, axis=1)*-np.sign(dxy[:,1])\n rst.append(np.round(np.arccos(dxy[:,0]/l)/np.pi*180,1))\n IPy.show_table(pd.DataFrame(rst, columns=titles), title)\n\nclass Plugin(Tool):\n \"\"\"Define the profile class plugin with the event callback functions\"\"\"\n title = 'Profile'\n def __init__(self):\n self.curobj = None\n self.doing = False\n self.odx,self.ody = 0, 0\n\n def mouse_down(self, ips, x, y, btn, **key):\n if key['ctrl'] and key['alt']:\n if isinstance(ips.mark, Profile):\n ips.mark.report(ips.title)\n return\n lim = 5.0/key['canvas'].scale\n if btn==1:\n if not self.doing:\n if isinstance(ips.mark, Profile):\n self.curobj = ips.mark.pick(x, y, lim)\n if self.curobj!=None:return\n\n if not isinstance(ips.mark, Profile):\n ips.mark = Profile(unit=ips.unit)\n self.doing = True\n else: ips.mark = None\n if self.doing:\n ips.mark.buf.append((x,y))\n ips.mark.buf.append((x,y))\n self.curobj = (ips.mark.buf, -1)\n self.odx, self.ody = x,y\n ips.update()\n\n def mouse_up(self, ips, x, y, btn, **key):\n self.curobj = None\n if self.doing:\n ips.mark.addline()\n self.doing = False\n if ips.mark!=None and len(ips.mark.body)==1:\n self.profile(ips.mark.body, ips.img)\n ips.update()\n\n def mouse_move(self, ips, x, y, btn, **key):\n if not isinstance(ips.mark, Profile):return\n lim = 5.0/key['canvas'].scale \n if btn==None:\n self.cursor = wx.CURSOR_CROSS\n if ips.mark.snap(x, y, lim)!=None:\n self.cursor = wx.CURSOR_HAND\n elif btn==1:\n ips.mark.draged(self.odx, self.ody, x, y, self.curobj)\n ips.update()\n #PlotFrame.plot(np.random.rand(100))\n self.odx, self.ody = x, y\n\n def profile(self, body, img):\n (x1, y1), (x2, y2) = body[0]\n dx, dy = x2-x1, y2-y1\n n = max(abs(dx), abs(dy)) + 1\n xs = np.linspace(x1, x2, int(n)).round().astype(np.int16)\n ys = np.linspace(y1, y2, int(n)).round().astype(np.int16)\n msk = (xs>=0) * (xs<img.shape[1])\n msk*= (ys>=0) * (ys<img.shape[0])\n ix = np.arange(len(xs))\n\n frame = IPy.plot('Profile', 'Profile - Line', 'pixels coordinate', 'value of pixcels')\n frame.clear()\n if len(img.shape) == 3:\n vs = np.zeros((3,len(xs)), dtype=np.int16)\n vs[:,msk] = img[ys[msk], xs[msk]].T\n frame.add_data(ix, vs[0], (255,0,0), 1)\n frame.add_data(ix, vs[1], (0,255,0), 1)\n frame.add_data(ix, vs[2], (0,0,255), 1)\n else: \n vs = np.zeros(len(xs), dtype=np.int16)\n vs[msk] = img[ys[msk], xs[msk]]\n frame.add_data(ix, vs, (0,0,0), 1)\n frame.draw()\n\n def mouse_wheel(self, ips, x, y, d, **key):\n pass\n\nif __name__ == '__main__':\n app = wx.App(False)\n frame = PlotFrame(None)\n frame.Show()\n app.MainLoop()" ]
[ [ "numpy.mean", "numpy.ones" ], [ "numpy.arccos", "numpy.linalg.norm", "pandas.DataFrame", "numpy.sign", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
neonbjb/tortoise-tts
[ "a9e64e216d871f52c091465f2a2a8e503737a69c", "a9e64e216d871f52c091465f2a2a8e503737a69c" ]
[ "tortoise/utils/wav2vec_alignment.py", "tortoise/utils/typical_sampling.py" ]
[ "import re\n\nimport torch\nimport torchaudio\nfrom transformers import Wav2Vec2ForCTC, Wav2Vec2FeatureExtractor, Wav2Vec2CTCTokenizer, Wav2Vec2Processor\n\nfrom tortoise.utils.audio import load_audio\n\n\ndef max_alignment(s1, s2, skip_character='~', record=None):\n \"\"\"\n A clever function that aligns s1 to s2 as best it can. Wherever a character from s1 is not found in s2, a '~' is\n used to replace that character.\n\n Finally got to use my DP skills!\n \"\"\"\n if record is None:\n record = {}\n assert skip_character not in s1, f\"Found the skip character {skip_character} in the provided string, {s1}\"\n if len(s1) == 0:\n return ''\n if len(s2) == 0:\n return skip_character * len(s1)\n if s1 == s2:\n return s1\n if s1[0] == s2[0]:\n return s1[0] + max_alignment(s1[1:], s2[1:], skip_character, record)\n\n take_s1_key = (len(s1), len(s2) - 1)\n if take_s1_key in record:\n take_s1, take_s1_score = record[take_s1_key]\n else:\n take_s1 = max_alignment(s1, s2[1:], skip_character, record)\n take_s1_score = len(take_s1.replace(skip_character, ''))\n record[take_s1_key] = (take_s1, take_s1_score)\n\n take_s2_key = (len(s1) - 1, len(s2))\n if take_s2_key in record:\n take_s2, take_s2_score = record[take_s2_key]\n else:\n take_s2 = max_alignment(s1[1:], s2, skip_character, record)\n take_s2_score = len(take_s2.replace(skip_character, ''))\n record[take_s2_key] = (take_s2, take_s2_score)\n\n return take_s1 if take_s1_score > take_s2_score else skip_character + take_s2\n\n\nclass Wav2VecAlignment:\n \"\"\"\n Uses wav2vec2 to perform audio<->text alignment.\n \"\"\"\n def __init__(self):\n self.model = Wav2Vec2ForCTC.from_pretrained(\"jbetker/wav2vec2-large-robust-ft-libritts-voxpopuli\").cpu()\n self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f\"facebook/wav2vec2-large-960h\")\n self.tokenizer = Wav2Vec2CTCTokenizer.from_pretrained('jbetker/tacotron-symbols')\n\n def align(self, audio, expected_text, audio_sample_rate=24000):\n orig_len = audio.shape[-1]\n\n with torch.no_grad():\n self.model = self.model.cuda()\n audio = audio.to('cuda')\n audio = torchaudio.functional.resample(audio, audio_sample_rate, 16000)\n clip_norm = (audio - audio.mean()) / torch.sqrt(audio.var() + 1e-7)\n logits = self.model(clip_norm).logits\n self.model = self.model.cpu()\n\n logits = logits[0]\n pred_string = self.tokenizer.decode(logits.argmax(-1).tolist())\n\n fixed_expectation = max_alignment(expected_text.lower(), pred_string)\n w2v_compression = orig_len // logits.shape[0]\n expected_tokens = self.tokenizer.encode(fixed_expectation)\n expected_chars = list(fixed_expectation)\n if len(expected_tokens) == 1:\n return [0] # The alignment is simple; there is only one token.\n expected_tokens.pop(0) # The first token is a given.\n expected_chars.pop(0)\n\n alignments = [0]\n def pop_till_you_win():\n if len(expected_tokens) == 0:\n return None\n popped = expected_tokens.pop(0)\n popped_char = expected_chars.pop(0)\n while popped_char == '~':\n alignments.append(-1)\n if len(expected_tokens) == 0:\n return None\n popped = expected_tokens.pop(0)\n popped_char = expected_chars.pop(0)\n return popped\n\n next_expected_token = pop_till_you_win()\n for i, logit in enumerate(logits):\n top = logit.argmax()\n if next_expected_token == top:\n alignments.append(i * w2v_compression)\n if len(expected_tokens) > 0:\n next_expected_token = pop_till_you_win()\n else:\n break\n\n pop_till_you_win()\n if not (len(expected_tokens) == 0 and len(alignments) == len(expected_text)):\n torch.save([audio, expected_text], 'alignment_debug.pth')\n assert False, \"Something went wrong with the alignment algorithm. I've dumped a file, 'alignment_debug.pth' to\" \\\n \"your current working directory. Please report this along with the file so it can get fixed.\"\n\n # Now fix up alignments. Anything with -1 should be interpolated.\n alignments.append(orig_len) # This'll get removed but makes the algorithm below more readable.\n for i in range(len(alignments)):\n if alignments[i] == -1:\n for j in range(i+1, len(alignments)):\n if alignments[j] != -1:\n next_found_token = j\n break\n for j in range(i, next_found_token):\n gap = alignments[next_found_token] - alignments[i-1]\n alignments[j] = (j-i+1) * gap // (next_found_token-i+1) + alignments[i-1]\n\n return alignments[:-1]\n\n def redact(self, audio, expected_text, audio_sample_rate=24000):\n if '[' not in expected_text:\n return audio\n splitted = expected_text.split('[')\n fully_split = [splitted[0]]\n for spl in splitted[1:]:\n assert ']' in spl, 'Every \"[\" character must be paired with a \"]\" with no nesting.'\n fully_split.extend(spl.split(']'))\n\n # At this point, fully_split is a list of strings, with every other string being something that should be redacted.\n non_redacted_intervals = []\n last_point = 0\n for i in range(len(fully_split)):\n if i % 2 == 0:\n end_interval = max(0, last_point + len(fully_split[i]) - 1)\n non_redacted_intervals.append((last_point, end_interval))\n last_point += len(fully_split[i])\n\n bare_text = ''.join(fully_split)\n alignments = self.align(audio, bare_text, audio_sample_rate)\n\n output_audio = []\n for nri in non_redacted_intervals:\n start, stop = nri\n output_audio.append(audio[:, alignments[start]:alignments[stop]])\n return torch.cat(output_audio, dim=-1)\n", "import torch\nfrom transformers import LogitsWarper\n\n\nclass TypicalLogitsWarper(LogitsWarper):\n def __init__(self, mass: float = 0.9, filter_value: float = -float(\"Inf\"), min_tokens_to_keep: int = 1):\n self.filter_value = filter_value\n self.mass = mass\n self.min_tokens_to_keep = min_tokens_to_keep\n\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n # calculate entropy\n normalized = torch.nn.functional.log_softmax(scores, dim=-1)\n p = torch.exp(normalized)\n ent = -(normalized * p).nansum(-1, keepdim=True)\n\n # shift and sort\n shifted_scores = torch.abs((-normalized) - ent)\n sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False)\n sorted_logits = scores.gather(-1, sorted_indices)\n cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)\n\n # Remove tokens with cumulative mass above the threshold\n last_ind = (cumulative_probs < self.mass).sum(dim=1)\n last_ind[last_ind < 0] = 0\n sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1))\n if self.min_tokens_to_keep > 1:\n # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)\n sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0\n indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)\n\n scores = scores.masked_fill(indices_to_remove, self.filter_value)\n return scores" ]
[ [ "torch.save", "torch.no_grad", "torch.cat" ], [ "torch.exp", "torch.abs", "torch.sort", "torch.nn.functional.log_softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thunlp/EntityDuetNeuralRanking
[ "3efbc1f6ccffb5b149d08030fde8dd059fce0fa1" ]
[ "data/preprocess.py" ]
[ "''' Handling the data io '''\nimport argparse\nimport torch\nimport sys\n\n\n\ndef read_vocab_idx(vocab_path):\n ''' build vocab '''\n\n word2idx = {\"_PAD\" : 0}\n\n with open(vocab_path) as f:\n for line in f:\n tokens = line.strip(\"\\n\").split(\"\\t\")\n no = int(tokens[1])\n word2idx[tokens[0]] = no\n\n print('[Info] Trimmed vocabulary size = {},'.format(len(word2idx)))\n return word2idx\n\ndef read_ent_des(inst_file):\n ent_des_dict = dict()\n ent_des = list()\n ent_des.append([0] * 20)\n with open(inst_file) as f:\n for step, line in enumerate(f):\n tokens = line.strip().split()\n ent_des_dict[tokens[0]] = step + 1\n ent_des.append([int(token) for token in tokens[1:]][:20])\n return ent_des, ent_des_dict\n\ndef read_ent_car(inst_file):\n ent_wrd_dict = dict()\n ent_wrd = list()\n ent_wrd.append([0] * 10)\n with open(inst_file) as f:\n for step, line in enumerate(f):\n tokens = line.strip().split()\n ent_wrd_dict[tokens[0]] = step + 1\n ent_wrd.append([int(token) for token in tokens[1:]][:10])\n return ent_wrd, ent_wrd_dict\n\n\ndef main():\n ''' Main function '''\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-ent_des', required=True)\n parser.add_argument('-ent_car', required=True)\n parser.add_argument('-save_data', required=True)\n parser.add_argument('-wrd_vocab', required=True)\n parser.add_argument('-ent_vocab', required=True)\n parser.add_argument('-car_vocab', required=True)\n\n opt = parser.parse_args()\n wrd2idx = read_vocab_idx(opt.wrd_vocab)\n ent2idx = read_vocab_idx(opt.ent_vocab)\n car2idx = read_vocab_idx(opt.car_vocab)\n ent_des, ent_des_dict = read_ent_des(opt.ent_des)\n ent_wrd, ent_wrd_dict = read_ent_car(opt.ent_car)\n data = {\n 'settings': opt,\n 'wrd2idx': wrd2idx,\n 'ent2idx': ent2idx,\n 'car2idx': car2idx,\n 'ent_des_dict' : ent_des_dict,\n 'ent_des' : ent_des,\n 'ent_wrd_dict': ent_wrd_dict,\n 'ent_wrd': ent_wrd}\n\n print('[Info] Dumping the processed data to pickle file', opt.save_data)\n torch.save(data, opt.save_data)\n print('[Info] Finish.')\n\nif __name__ == '__main__':\n #reload(sys)\n #sys.setdefaultencoding('utf-8')\n main()" ]
[ [ "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fantamat/gtrain
[ "9538697768deb0f88c3efdbd617e882d0ecc2bc4" ]
[ "gtrain/utils.py" ]
[ "import numpy as np\nimport os\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n\n\ndef get_loss_and_accuracy(save_dir):\n \"\"\"\n loads scalars from training procedure saved in summaries created by gtrain\n :param save_dir: same as save_dir in gtrain method\n :return: dict with\n \"val_acc\": vector of validation accuracies\n \"val_loss\": vector of loss\n \"val_stem\": step in which the record was made\n \"val_timestamp\": time in which the record was made\n \"train_acc\": vector of validation accuracies\n \"train_loss\": vector of loss\n \"train_stem\": step in which the record was made\n \"train_timestamp\": time in which the record was made\n \"\"\"\n def scallarEvent_list_2_dict(sel):\n wall_time = list()\n step = list()\n value = list()\n for se in sel:\n wall_time.append(se.wall_time)\n step.append(se.step)\n value.append(se.value)\n return {\n \"wall_time\": wall_time,\n \"step\": step,\n \"value\": value,\n }\n\n event_acc = EventAccumulator(os.path.join(save_dir, \"summaries\", \"train\"))\n event_acc.Reload()\n train_loss = scallarEvent_list_2_dict(event_acc.Scalars(\"loss\"))\n train_acc = scallarEvent_list_2_dict(event_acc.Scalars(\"accuracy\"))\n\n event_acc = EventAccumulator(os.path.join(save_dir, \"summaries\", \"dev\"))\n event_acc.Reload()\n val_loss = scallarEvent_list_2_dict(event_acc.Scalars(\"loss\"))\n val_acc = scallarEvent_list_2_dict(event_acc.Scalars(\"accuracy\"))\n return {\n \"train_loss\": train_loss[\"value\"],\n \"train_acc\": train_acc[\"value\"],\n \"train_step\": train_loss[\"step\"],\n \"train_timestamp\": train_loss[\"wall_time\"],\n \"val_loss\": val_loss[\"value\"],\n \"val_acc\": val_acc[\"value\"],\n \"val_step\": val_loss[\"step\"],\n \"val_timestamp\": val_loss[\"wall_time\"],\n }\n\n\ndef confmat(y0, y1, num_classes=None):\n \"\"\"\n compute confusion matrix for y1 and y2 does not meter if either of them is in vector or integer form\n :param y0: list of - labels or vector of probabilities\n :param y1: list of - labels or vector of probabilities\n :param num_classes: number of classes if is not defined takes maximal value in labels as the highest class\n :return: confusion matrix\n \"\"\"\n if not isinstance(y0[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):\n y0 = np.argmax(y0, axis=1)\n elif isinstance(y0, list):\n y0 = np.array(y0)\n if not isinstance(y1[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):\n y1 = np.argmax(y1, axis=1)\n elif isinstance(y1, list):\n y1 = np.array(y1)\n labels_num = max(max(y0), max(y1)) + 1 if num_classes is None else num_classes\n out = np.zeros((labels_num, labels_num))\n for i in range(labels_num):\n for j in range(labels_num):\n out[i, j] = np.sum(y1[y0==i]==j)\n return out\n\n\ndef accuracy(y0, y1):\n \"\"\"\n compute accuracy for y1 and y2 does not meter if either of them is in vector or integer form\n :param y0: list of - labels or vector of probabilities\n :param y1: list of - labels or vector of probabilities\n :return: accuracy\n \"\"\"\n if not isinstance(y0[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):\n y0 = np.argmax(y0, axis=1)\n elif isinstance(y0, list):\n y0 = np.array(y0)\n if not isinstance(y1[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):\n y1 = np.argmax(y1, axis=1)\n elif isinstance(y1, list):\n y1 = np.array(y1)\n\n out = np.sum(y0==y1)/len(y0)\n return out\n\n\ndef labels2probabilities(labels):\n \"\"\"\n transforms labels into the 1-hod encoded vectors\n :param labels: list of integer values 0..(number_of_classes - 1), size n\n :return: matrix size (n, num_of_classes), ones are on the indexes defined by param labels\n \"\"\"\n num_of_classes = max(labels)+1\n return np.apply_along_axis(lambda x: np.eye(num_of_classes)[x], 0, labels)\n\n\ndef save_weights(list_of_numpy_arrays, file_name):\n \"\"\"\n saves list of numpy arrays into the file\n if the file have other than npz extension or no extension at all the .npz is added at the end of file_name\n (uses nympy function savez_compressed)\n :param list_of_numpy_arrays: list of numpy arrays\n :param file_name: filename with format npz\n \"\"\"\n if os.path.dirname(file_name):\n check_dir(os.path.dirname(file_name))\n if not str(file_name).endswith(\".npz\"):\n file_name = file_name + \".npz\"\n\n np.savez_compressed(file_name, *list_of_numpy_arrays)\n\n\ndef load_weights(file_name):\n \"\"\"\n loads weights saved by save_weights, so the extension npz of the file is necessary\n :param file_name: filename with format npz\n :return: list of loaded numpy arrays\n \"\"\"\n if not str(file_name).endswith(\".npz\"):\n raise IOError(\"file_name has bad format use .npz file insted.\")\n l = np.load(file_name)\n files = l.files\n output = list()\n for file in files:\n output += [l[file]]\n return output\n\n\ndef check_dir(directory):\n \"\"\"\n Checks if the path exists and if not it creates all missing folders\n \"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef join_weights_and_biases(weights, biases):\n \"\"\"\n joins two arrays into one\n :param weights: list of numpy arrays.\n :param biases: list of numpy arrays with same length as weights.\n :return: list of list with two numpy arrays for weights and biases, respectively.\n - the first index is defines layer and the second weight (0) or bias (1)\n \"\"\"\n out = list()\n for i, _ in enumerate(weights):\n out.append([weights[i], biases[i]])\n return out\n\n\ndef get_class_vs_others_indexes(class_index, labels, return_new_labels=False):\n \"\"\"\n Generate a indexes that contains the same number of samples from the specified class (class_index) and\n other remaining classes. It also can return new labels, i.e., 0 for samples with class_index and 1 for the others.\n The returned indexes are randomly shuffled.\n :param class_index: index of the base class\n :param labels: list of labels for given dataset\n :param return_new_labels: a flag\n :return: indexes of samples with all samples from the class (class_index) and the same number of other classes\n - if return_new_labels is True then also new labels are returned\n \"\"\"\n ar = np.arange(len(labels))\n indexes0 = ar[labels==class_index]\n indexes1 = np.random.choice(ar[labels!=class_index], len(indexes0))\n out = indexes0 + indexes1\n out_lables = np.zeros((2*len(indexes0)), dtype=np.int)\n out_lables[-len(indexes0):] = 1\n rp = np.random.permutation(len(out))\n if return_new_labels:\n return out[rp], out_lables[rp]\n else:\n return out[rp]\n\n" ]
[ [ "numpy.eye", "numpy.savez_compressed", "numpy.argmax", "numpy.load", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mariesig/privacy-evaluator
[ "4e6ced65cc71bb661aef4518192517e23e22595e", "4e6ced65cc71bb661aef4518192517e23e22595e" ]
[ "privacy_evaluator/metrics/basics.py", "privacy_evaluator/models/tf/dcti.py" ]
[ "import numpy as np\n\n\ndef accuracy(y: np.ndarray, y_prediction: np.ndarray) -> np.float32:\n \"\"\"Calculates accuracy for true labels and predicted labels.\n\n :params y: True labels.\n :params y_prediction: Predicted labels.\n :return: Accuracy\n :raises ValueError: If true labels and predicted labels are not of the same shape.\n \"\"\"\n if y.shape != y_prediction.shape:\n raise ValueError(\n f\"Expected true labels and predicted labels to be of same shape, received true labels with shape {str(y.shape)} and predicted labels with shape {str(y_prediction.shape)} instead.\"\n )\n return (np.argmax(y, axis=1) == np.argmax(y_prediction, axis=1)).sum() / y.shape[0]\n\n\ndef train_to_test_accuracy_gap(\n train_accuracy: np.float32, test_accuracy: np.float32\n) -> np.float32:\n \"\"\"Calculates the gap between the train and test accuracy of a classifier.\n\n The gap is calculated by subtracting the test accuracy from the train accuracy.\n\n :params train_accuracy: The train accuracy.\n :params test_accuracy: The test accuracy.\n :return: The gap between the train and test accuracy.\n \"\"\"\n return train_accuracy - test_accuracy\n\n\ndef train_to_test_accuracy_ratio(\n train_accuracy: np.float32, test_accuracy: np.float32\n) -> np.float32:\n \"\"\"Calculates the ratio between the train and test accuracy of a classifier.\n\n The ratio is calculated by dividing the test accuracy by the train accuracy.\n\n :params train_accuracy: The train accuracy.\n :params test_accuracy: The test accuracy.\n :return: The ratio between the train and test accuracy.\n \"\"\"\n return train_accuracy / test_accuracy\n", "import os\n\nimport tensorflow as tf\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import (\n Conv2D,\n MaxPooling2D,\n Flatten,\n Dense,\n BatchNormalization,\n ReLU,\n SpatialDropout2D,\n GlobalAveragePooling2D,\n Dropout,\n)\n\n\n__all__ = [\"DCTI\", \"dcti\"]\n\n\nclass Block(tf.keras.Model):\n def __init__(self, filters: int):\n super().__init__()\n\n self.model = Sequential(\n [\n Conv2D(filters, kernel_size=3, padding=\"same\"),\n BatchNormalization(),\n ReLU(),\n ]\n )\n\n def call(self, x, training=False):\n return self.model(x, training=training)\n\n\nclass DCTI(tf.keras.Model):\n def __init__(self):\n super().__init__()\n\n self.model = Sequential(\n [\n Block(64),\n Block(64),\n SpatialDropout2D(0.3),\n MaxPooling2D((2, 2)),\n Block(128),\n Block(128),\n SpatialDropout2D(0.3),\n MaxPooling2D((2, 2)),\n Block(256),\n Block(256),\n Block(256),\n SpatialDropout2D(0.4),\n MaxPooling2D((2, 2)),\n Block(512),\n Block(512),\n SpatialDropout2D(0.4),\n MaxPooling2D((2, 2)),\n Block(512),\n GlobalAveragePooling2D(),\n Flatten(),\n Dropout(0.5),\n Dense(10, activation=\"softmax\"),\n ]\n )\n\n def call(self, x, training=False):\n return self.model(x, training=training)\n\n\ndef dcti(pretrained: bool = True) -> DCTI:\n \"\"\"\n DCTI model from\n `\"Lightweight Deep Convolutional Network for Tiny Object Recognition\"\n <https://www.scitepress.org/Papers/2018/67520/67520.pdf>`_.\n\n Note:\n The pre-trained model expects inputs to be first scaled to [0, 1] and\n then normalized with tensorflow.image.per_image_standardization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on CIFAR-10\n \"\"\"\n if pretrained:\n here = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(here, \"dcti\", \"model\")\n model = tf.keras.models.load_model(path)\n else:\n model = DCTI()\n\n return model\n" ]
[ [ "numpy.argmax" ], [ "tensorflow.keras.models.load_model", "tensorflow.keras.layers.ReLU", "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.SpatialDropout2D" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
glassroom/torch_train_test_loop
[ "fbd575c59cbf2823d13eb1df86b5d90f29febd23" ]
[ "torch_train_test_loop.py" ]
[ "import torch\nimport contextlib\n\nTRAIN_DESC, VALID_DESC, TEST_DESC = ('train', 'valid', 'test')\n\nclass EarlyStopException(Exception):\n pass\n\nclass LoopComponent():\n r\"\"\"\n Base class for loop components. Each method is a callback to be\n invoked by a `TrainTestLoop` instance, which is passed as an input.\n If the loop instance has multiple components, on each iteration their\n callbacks will be invoked in the following order:\n `\n Iteration\n +------->-------+\n | |\n | +-----------v-----------+-----------------------+--\n | | Loop component #1 | Loop component #2 | ...\n | +-----------------------+-----------------------+--\n | | on_train_begin -----+-> on_train_begin -----+-> ...\n | | on_epoch_begin ---+---> on_epoch_begin ---+---> ...\n | | on_batch_begin -+-----> on_batch_begin -+-----> ...\n | | : | : |\n | | on_batch_end ---+-----> on_batch_end --+-----> ...\n | | on_epoch_end -----+---> on_epoch_end -----+---> ...\n | | on_train_end -------+-> on_train_end -------+-> ... :\n | +-----------------------+-----------------------+-- |\n | v\n +-------------------------------<------------------------------+\n `\n \"\"\"\n def on_train_begin(self, loop): pass # called by loop at start of training\n def on_epoch_begin(self, loop): pass # called by loop at start of each epoch\n def on_batch_begin(self, loop): pass # called by loop at start of each batch\n def on_grads_reset(self, loop): pass # called by loop to zero out gradients, if training\n def on_forward_pass(self, loop): pass # called by loop to compute forward pass\n def on_loss_compute(self, loop): pass # called by loop to compute model loss\n def on_backward_pass(self, loop): pass # called by loop to compute backward pass, if training\n def on_optim_step(self, loop): pass # called by loop to compute/schedule optim, if training\n def on_batch_end(self, loop): pass # called by loop at end of each batch\n def on_epoch_end(self, loop): pass # called by loop at end of each epoch\n def on_train_end(self, loop): pass # called by loop at end of training\n\nclass TrainTestLoop():\n r\"\"\"\n Composable loop for training and testing PyTorch models. On each\n iteration of the loop, computations are performed by one or more\n `LoopComponent` instances that access and modify loop state. The\n number and order of loop components can be modified at any time.\n\n Args:\n model: `torch.nn.Module` object containing the model.\n components: iterable of `LoopComponent` instances that perform\n computations on each iteration, in order of invocation.\n train_data: iterable for which len() returns length.\n valid_data: iterable for which len() returns length.\n\n Methods:\n train(n_epochs): train/validate model for n_epochs: int.\n test(test_data): test model for one epoch on previously unseen\n test_data, an iterable for which len() returns length.\n stop(): stop early and, if training and validating, invoke the\n 'on_train_end' callbacks of all loop components. Any\n component of the loop can call stop() at any time.\n\n Sample usage:\n >>> loop = TrainTestLoop(model, components, train_data, valid_data)\n >>> loop.train(n_epochs)\n >>> loop.test(test_data)\n >>> print(*vars(loop), sep='\\n') # vars holding loop state\n \"\"\"\n def __init__(self, model, components, train_data, valid_data):\n self.model, self.components, self.train_data, self.valid_data = (model, list(components), train_data, valid_data)\n self.epoch_num = 0\n\n def _components_do(self, *args):\n for callback in [getattr(comp, arg) for arg in args for comp in self.components]:\n callback(self)\n\n def _run_epoch(self, data, desc):\n self.n_batches, self.epoch_desc = (len(data), desc)\n self.is_training, self.is_validating, self.is_testing = [desc == s for s in (TRAIN_DESC, VALID_DESC, TEST_DESC)]\n assert [self.is_training, self.is_validating, self.is_testing].count(True) == 1\n self.model.train() if self.is_training else self.model.eval()\n with torch.no_grad() if not self.is_training else contextlib.suppress():\n self._components_do('on_epoch_begin')\n for self.batch_num, self.batch in enumerate(iter(data)):\n if self.is_training: self._components_do('on_grads_reset')\n self._components_do('on_batch_begin', 'on_forward_pass', 'on_loss_compute')\n if self.is_training:\n self._components_do('on_backward_pass', 'on_optim_step')\n self.optim_step_num += 1\n self._components_do('on_batch_end')\n self._components_do('on_epoch_end')\n\n def train(self, n_epochs):\n self.n_epochs = n_epochs\n self.n_optim_steps, self.optim_step_num = (self.n_epochs * len(self.train_data), 0)\n self._components_do('on_train_begin')\n for _ in range(n_epochs):\n try:\n self._run_epoch(self.train_data, TRAIN_DESC)\n self._run_epoch(self.valid_data, VALID_DESC)\n self.epoch_num += 1\n except EarlyStopException: break\n self._components_do('on_train_end')\n \n def test(self, test_data):\n try:\n self.n_epochs = 1\n self._run_epoch(test_data, TEST_DESC)\n except EarlyStopException: pass\n\n def stop(self):\n raise EarlyStopException\n" ]
[ [ "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JulianKu/megastep
[ "21ac85510d03f20600d438618a02569c6f1e34e1", "21ac85510d03f20600d438618a02569c6f1e34e1" ]
[ "rebar/stats/gpu.py", "megastep/demo/heads.py" ]
[ "import torch\nimport pandas as pd\nfrom io import BytesIO\nfrom subprocess import check_output\nfrom . import writing\nimport time\n\n\ndef memory(device=0):\n total_mem = torch.cuda.get_device_properties(f'cuda:{device}').total_memory\n writing.max(f'gpu-memory/cache/{device}', torch.cuda.max_memory_cached(device)/total_mem)\n torch.cuda.reset_max_memory_cached()\n writing.max(f'gpu-memory/alloc/{device}', torch.cuda.max_memory_allocated(device)/total_mem)\n torch.cuda.reset_max_memory_allocated()\n torch.cuda.reset_max_memory_cached()\n\ndef dataframe():\n \"\"\"Use `nvidia-smi --help-query-gpu` to get a list of query params\"\"\"\n params = {\n 'device': 'index', \n 'compute': 'utilization.gpu', 'access': 'utilization.memory', \n 'memused': 'memory.used', 'memtotal': 'memory.total',\n 'fan': 'fan.speed', 'power': 'power.draw', 'temp': 'temperature.gpu'}\n command = f\"\"\"nvidia-smi --format=csv,nounits,noheader --query-gpu={','.join(params.values())}\"\"\"\n df = pd.read_csv(BytesIO(check_output(command, shell=True)), header=None)\n df.columns = list(params.keys())\n df = df.set_index('device')\n df = df.apply(pd.to_numeric, errors='coerce')\n return df\n\n_last = -1\ndef vitals(device=None, throttle=0):\n # This is a fairly expensive op, so let's avoid doing it too often\n global _last\n if time.time() - _last < throttle:\n return\n _last = time.time()\n\n df = dataframe()\n if device is None:\n pass\n elif isinstance(device, int):\n df = df.loc[[device]]\n else:\n df = df.loc[device]\n\n fields = ['compute', 'access', 'fan', 'power', 'temp']\n for (device, field), value in df[fields].stack().iteritems():\n writing.mean(f'gpu/{field}/{device}', value)\n\n for device in df.index:\n writing.mean(f'gpu/memory/{device}', 100*df.loc[device, 'memused']/df.loc[device, 'memtotal'])", "\"\"\"\nTODO-DOCS Heads docs\n\"\"\"\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nclass MultiVectorIntake(nn.Module):\n\n def __init__(self, space, width):\n super().__init__()\n A, C = space.shape\n\n self.core = nn.Sequential(\n nn.Linear(C, width), nn.ReLU(),)\n self.proj = nn.Sequential(\n nn.Linear(A*width, width), nn.ReLU(),)\n \n def forward(self, obs, **kwargs):\n if obs.ndim == 3:\n return self.forward(obs[None], **kwargs).squeeze(0)\n\n T, B, A, C = obs.shape\n x = self.core(obs.reshape(T*B*A, C)).reshape(T, B, -1)\n return self.proj(x)\n\nclass MultiImageIntake(nn.Module):\n\n def __init__(self, space, width):\n super().__init__()\n A, C, H, W = space.shape\n\n self.conv = nn.Sequential(\n nn.Conv2d(C, 32, (1, 8), stride=(1, 4)), nn.ReLU(),\n nn.Conv2d(32, 64, (1, 4), stride=(1, 2)), nn.ReLU(),\n nn.Conv2d(64, 128, (1, 3), stride=(1, 2)), nn.ReLU())\n\n zeros = torch.zeros((A, C, H, W))\n convwidth = self.conv(zeros).nelement()\n\n self.proj = nn.Sequential(\n nn.Linear(convwidth, width), nn.ReLU(),\n nn.Linear(width, width), nn.ReLU())\n\n def forward(self, obs, **kwargs):\n if obs.ndim == 5:\n return self.forward(obs[None], **kwargs).squeeze(0)\n\n T, B, A, C, H, W = obs.shape\n if obs.dtype == torch.uint8:\n obs = obs/255.\n x = self.conv(obs.reshape(T*B*A, C, H, W)).reshape(T, B, -1)\n return self.proj(x)\n\nclass ConcatIntake(nn.Module):\n\n def __init__(self, space, width):\n super().__init__()\n\n intakes = type(space)({k: intake(v, width) for k, v in space.items()})\n self.core = nn.Linear(len(intakes)*width, width)\n self.intakes = nn.ModuleDict(intakes)\n\n def forward(self, x, **kwargs):\n ys = [self.intakes[k](x[k]) for k in self.intakes]\n return self.core(torch.cat(ys, -1))\n\ndef intake(space, width):\n if isinstance(space, dict):\n return ConcatIntake(space, width)\n name = f'{type(space).__name__}Intake'\n if name in globals():\n return globals()[name](space, width)\n raise ValueError(f'Can\\'t handle {space}')\n\nclass MultiDiscreteOutput(nn.Module):\n\n def __init__(self, space, width):\n super().__init__()\n shape = space.shape\n self.core = nn.Linear(width, int(np.prod(shape)))\n self.shape = shape\n \n def forward(self, x, **kwargs):\n y = self.core(x).reshape(*x.shape[:-1], *self.shape)\n return F.log_softmax(y, -1)\n\n def sample(self, logits, test=False):\n if test:\n return logits.argmax(-1)\n else:\n return torch.distributions.Categorical(logits=logits).sample()\n\nclass DictOutput(nn.Module):\n\n def __init__(self, space, width):\n super().__init__()\n self.core = nn.Linear(width, width*len(space))\n\n self._dtype = type(space)\n self.outputs = nn.ModuleDict({k: output(v, width) for k, v in space.items()})\n\n def forward(self, x, **kwargs):\n ys = torch.chunk(self.core(x), len(self.outputs), -1)\n return self._dtype({k: v(ys[i]) for i, (k, v) in enumerate(self.outputs.items())})\n \n def sample(self, l):\n return self._dtype({k: v.sample(l[k]) for k, v in self.outputs.items()})\n\nclass ValueOutput(nn.Module):\n\n def __init__(self, width):\n super().__init__()\n self.core = nn.Linear(width, 1)\n\n def forward(self, x, **kwargs):\n return self.core.forward(x).squeeze(-1)\n\ndef output(space, width):\n if isinstance(space, dict):\n return DictOutput(space, width)\n name = f'{type(space).__name__}Output'\n if name in globals():\n return globals()[name](space, width)\n raise ValueError(f'Can\\'t handle {space}')\n\n" ]
[ [ "torch.cuda.get_device_properties", "torch.cuda.reset_max_memory_cached", "torch.cuda.max_memory_allocated", "torch.cuda.max_memory_cached", "torch.cuda.reset_max_memory_allocated" ], [ "torch.nn.functional.log_softmax", "torch.zeros", "torch.cat", "torch.nn.ModuleDict", "torch.nn.Conv2d", "torch.nn.Linear", "torch.distributions.Categorical", "numpy.prod", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GeWu-Lab/MUSIC-AVQA_CVPR2022
[ "f704130f37a342b5ff861780282c75cc875221b2" ]
[ "net_grd_baseline/nets_qa_grd_baseline.py" ]
[ "import torch\nimport torchvision.models as models\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom visual_net import resnet18\n\n\ndef batch_organize(audio_data, posi_img_data, nega_img_data):\n\n # print(\"audio data: \", audio_data.shape)\n (B, T, C) = audio_data.size()\n audio_data_batch=audio_data.view(B*T,C)\n batch_audio_data = torch.zeros(audio_data_batch.shape[0] * 2, audio_data_batch.shape[1])\n\n (B, T, C, H, W) = posi_img_data.size()\n posi_img_data_batch=posi_img_data.view(B*T,C,H,W)\n nega_img_data_batch=nega_img_data.view(B*T,C,H,W)\n\n batch_image_data = torch.zeros(posi_img_data_batch.shape[0] * 2, posi_img_data_batch.shape[1], posi_img_data_batch.shape[2],posi_img_data_batch.shape[3])\n batch_labels = torch.zeros(audio_data_batch.shape[0] * 2)\n for i in range(audio_data_batch.shape[0]):\n batch_audio_data[i * 2, :] = audio_data_batch[i, :]\n batch_audio_data[i * 2 + 1, :] = audio_data_batch[i, :]\n batch_image_data[i * 2, :] = posi_img_data_batch[i, :]\n batch_image_data[i * 2 + 1, :] = nega_img_data_batch[i, :]\n batch_labels[i * 2] = 1\n batch_labels[i * 2 + 1] = 0\n \n return batch_audio_data, batch_image_data, batch_labels\n\n# Question\nclass QstEncoder(nn.Module):\n\n def __init__(self, qst_vocab_size, word_embed_size, embed_size, num_layers, hidden_size):\n\n super(QstEncoder, self).__init__()\n self.word2vec = nn.Embedding(qst_vocab_size, word_embed_size)\n self.tanh = nn.Tanh()\n self.lstm = nn.LSTM(word_embed_size, hidden_size, num_layers)\n self.fc = nn.Linear(2*num_layers*hidden_size, embed_size) # 2 for hidden and cell states\n\n def forward(self, question):\n\n qst_vec = self.word2vec(question) # [batch_size, max_qst_length=30, word_embed_size=300]\n qst_vec = self.tanh(qst_vec)\n qst_vec = qst_vec.transpose(0, 1) # [max_qst_length=30, batch_size, word_embed_size=300]\n self.lstm.flatten_parameters()\n _, (hidden, cell) = self.lstm(qst_vec) # [num_layers=2, batch_size, hidden_size=512]\n qst_feature = torch.cat((hidden, cell), 2) # [num_layers=2, batch_size, 2*hidden_size=1024]\n qst_feature = qst_feature.transpose(0, 1) # [batch_size, num_layers=2, 2*hidden_size=1024]\n qst_feature = qst_feature.reshape(qst_feature.size()[0], -1) # [batch_size, 2*num_layers*hidden_size=2048]\n qst_feature = self.tanh(qst_feature)\n qst_feature = self.fc(qst_feature) # [batch_size, embed_size]\n\n return qst_feature\n\n\nclass AVQA_Fusion_Net(nn.Module):\n\n def __init__(self):\n super(AVQA_Fusion_Net, self).__init__()\n\n # for features\n self.fc_a1 = nn.Linear(128, 512)\n self.fc_a2=nn.Linear(512,512)\n\n self.visual_net = resnet18(pretrained=True)\n\n self.fc_v = nn.Linear(2048, 512)\n self.fc_st = nn.Linear(512, 512)\n self.fc_fusion = nn.Linear(1024, 512)\n self.fc = nn.Linear(1024, 512)\n self.fc_aq = nn.Linear(512, 512)\n self.fc_vq = nn.Linear(512, 512)\n\n self.linear11 = nn.Linear(512, 512)\n self.dropout1 = nn.Dropout(0.1)\n self.linear12 = nn.Linear(512, 512)\n\n self.linear21 = nn.Linear(512, 512)\n self.dropout2 = nn.Dropout(0.1)\n self.linear22 = nn.Linear(512, 512)\n self.norm1 = nn.LayerNorm(512)\n self.norm2 = nn.LayerNorm(512)\n self.dropout3 = nn.Dropout(0.1)\n self.dropout4 = nn.Dropout(0.1)\n self.norm3 = nn.LayerNorm(512)\n\n self.attn_a = nn.MultiheadAttention(512, 4, dropout=0.1)\n self.attn_v = nn.MultiheadAttention(512, 4, dropout=0.1)\n\n # question\n self.question_encoder = QstEncoder(93, 512, 512, 1, 512)\n\n self.tanh = nn.Tanh()\n self.dropout = nn.Dropout(0.5)\n self.fc_ans = nn.Linear(512, 42)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc_gl=nn.Linear(1024,512)\n\n\n # combine\n self.fc1 = nn.Linear(1024, 512)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(512, 256)\n self.relu2 = nn.ReLU()\n self.fc3 = nn.Linear(256, 128)\n self.relu3 = nn.ReLU()\n self.fc4 = nn.Linear(128, 2)\n self.relu4 = nn.ReLU()\n\n\n def forward(self, audio,visual_posi,visual_nega, question): \n # print(\"net audio input: \", audio.shape)\n # print(\"net question input: \", question.shape)\n ## question features\n qst_feature = self.question_encoder(question)\n xq = qst_feature.unsqueeze(0)\n\n ## audio features B T,128\n audio_feat = F.relu(self.fc_a1(audio))\n audio_feat = self.fc_a2(audio_feat) \n audio_feat_flag = audio_feat\n\n ## visua: [2*B*T, 512,14,14]\n # print(\"v feat1: \", visual_posi.shape) # [64, 10, 512, 14, 14]\n # v_feat = self.avgpool(visual_posi)\n # print(\"v feat: \", v_feat.shape)\n # posi_visual_feat=v_feat.squeeze(-1).squeeze(-1) # B T 512\n\n\n B,T,C,H,W=visual_posi.size()\n temp_visual=visual_posi.view(B*T,C,H,W)\n v_feat=self.avgpool(temp_visual)\n # print(\"v_feat: \", v_feat.shape) # [640, 512, 1, 1]\n posi_visual_feat=v_feat.squeeze(-1).squeeze(-1) # B T 512\n posi_visual_feat=posi_visual_feat.view(audio_feat.size(0),-1,C)\n # print(\"posi_visual_feat: \", posi_visual_feat.shape) # [64, 10, 512]\n\n # T,C,H,W=visual_posi.size()\n # visual_nega=torch.zeros(T,C,H,W)\n\n\n out_match = None\n match_label=None\n\n\n # print(\"posi_visual_feat: \", posi_visual_feat.shape)\n visual_feat_grd=posi_visual_feat.permute(1,0,2)\n \n ## attention, question as query on visual_feat_grd\n visual_feat_att = self.attn_v(xq, visual_feat_grd, visual_feat_grd, attn_mask=None, key_padding_mask=None)[0].squeeze(0)\n src = self.linear12(self.dropout1(F.relu(self.linear11(visual_feat_att))))\n visual_feat_att = visual_feat_att + self.dropout2(src)\n visual_feat_att = self.norm1(visual_feat_att)\n \n # attention, question as query on audio\n audio_feat = audio_feat.permute(1, 0, 2)\n audio_feat_att = self.attn_a(xq, audio_feat, audio_feat, attn_mask=None,key_padding_mask=None)[0].squeeze(0)\n src = self.linear22(self.dropout3(F.relu(self.linear21(audio_feat_att))))\n audio_feat_att = audio_feat_att + self.dropout4(src)\n audio_feat_att = self.norm2(audio_feat_att)\n \n feat = torch.cat((audio_feat_att, visual_feat_att), dim=-1)\n feat = self.tanh(feat)\n feat = self.fc_fusion(feat)\n\n ## fusion with question\n combined_feature = torch.mul(feat, qst_feature)\n combined_feature = self.tanh(combined_feature)\n out_qa = self.fc_ans(combined_feature) # [batch_size, ans_vocab_size]\n\n return out_qa,out_match,match_label\n" ]
[ [ "torch.nn.Dropout", "torch.nn.MultiheadAttention", "torch.nn.LSTM", "torch.zeros", "torch.cat", "torch.nn.Embedding", "torch.nn.Tanh", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.mul", "torch.nn.AdaptiveAvgPool2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
edgardeng/machine-learning-pytorch
[ "24a060894f5226b5ef20cc311db72f1adc037548", "24a060894f5226b5ef20cc311db72f1adc037548" ]
[ "practice-ocr/train_code/train_ctpn/data/dataset.py", "practice-ocr/detect/ctpn_utils.py" ]
[ "#-*- coding:utf-8 -*-\n#'''\n# Created on 18-12-27 上午10:34\n#\n# @Author: Greg Gao(laygin)\n#'''\n\nimport os\nimport xml.etree.ElementTree as ET\nimport numpy as np\nimport cv2\nfrom torch.utils.data import Dataset\nimport torch\nfrom config import IMAGE_MEAN\nfrom ctpn_utils import cal_rpn\n\n\ndef readxml(path):\n gtboxes = []\n imgfile = ''\n xml = ET.parse(path)\n for elem in xml.iter():\n if 'filename' in elem.tag:\n imgfile = elem.text\n if 'object' in elem.tag:\n for attr in list(elem):\n if 'bndbox' in attr.tag:\n xmin = int(round(float(attr.find('xmin').text)))\n ymin = int(round(float(attr.find('ymin').text)))\n xmax = int(round(float(attr.find('xmax').text)))\n ymax = int(round(float(attr.find('ymax').text)))\n\n gtboxes.append((xmin, ymin, xmax, ymax))\n\n return np.array(gtboxes), imgfile\n\n\n# for ctpn text detection\nclass VOCDataset(Dataset):\n def __init__(self,\n datadir,\n labelsdir):\n '''\n\n :param txtfile: image name list text file\n :param datadir: image's directory\n :param labelsdir: annotations' directory\n '''\n if not os.path.isdir(datadir):\n raise Exception('[ERROR] {} is not a directory'.format(datadir))\n if not os.path.isdir(labelsdir):\n raise Exception('[ERROR] {} is not a directory'.format(labelsdir))\n\n self.datadir = datadir\n self.img_names = os.listdir(self.datadir)\n self.labelsdir = labelsdir\n\n def __len__(self):\n return len(self.img_names)\n\n def __getitem__(self, idx):\n img_name = self.img_names[idx]\n img_path = os.path.join(self.datadir, img_name)\n print(img_path)\n xml_path = os.path.join(self.labelsdir, img_name.replace('.jpg', '.xml'))\n gtbox, _ = readxml(xml_path)\n img = cv2.imread(img_path)\n h, w, c = img.shape\n\n # clip image\n if np.random.randint(2) == 1:\n img = img[:, ::-1, :]\n newx1 = w - gtbox[:, 2] - 1\n newx2 = w - gtbox[:, 0] - 1\n gtbox[:, 0] = newx1\n gtbox[:, 2] = newx2\n\n [cls, regr], _ = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)\n\n m_img = img - IMAGE_MEAN\n\n regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])\n\n cls = np.expand_dims(cls, axis=0)\n\n # transform to torch tensor\n m_img = torch.from_numpy(m_img.transpose([2, 0, 1])).float()\n cls = torch.from_numpy(cls).float()\n regr = torch.from_numpy(regr).float()\n\n return m_img, cls, regr\n\nclass ICDARDataset(Dataset):\n def __init__(self,\n datadir,\n labelsdir):\n '''\n\n :param txtfile: image name list text file\n :param datadir: image's directory\n :param labelsdir: annotations' directory\n '''\n if not os.path.isdir(datadir):\n raise Exception('[ERROR] {} is not a directory'.format(datadir))\n if not os.path.isdir(labelsdir):\n raise Exception('[ERROR] {} is not a directory'.format(labelsdir))\n\n self.datadir = datadir\n self.img_names = os.listdir(self.datadir)\n self.labelsdir = labelsdir\n\n def __len__(self):\n return len(self.img_names)\n\n def box_transfer(self,coor_lists,rescale_fac = 1.0):\n gtboxes = []\n for coor_list in coor_lists:\n coors_x = [int(coor_list[2*i]) for i in range(4)]\n coors_y = [int(coor_list[2*i+1]) for i in range(4)]\n xmin = min(coors_x)\n xmax = max(coors_x)\n ymin = min(coors_y)\n ymax = max(coors_y)\n if rescale_fac>1.0:\n xmin = int(xmin / rescale_fac)\n xmax = int(xmax / rescale_fac)\n ymin = int(ymin / rescale_fac)\n ymax = int(ymax / rescale_fac)\n gtboxes.append((xmin, ymin, xmax, ymax))\n return np.array(gtboxes)\n\n def box_transfer_v2(self,coor_lists,rescale_fac = 1.0):\n gtboxes = []\n for coor_list in coor_lists:\n coors_x = [int(coor_list[2 * i]) for i in range(4)]\n coors_y = [int(coor_list[2 * i + 1]) for i in range(4)]\n xmin = min(coors_x)\n xmax = max(coors_x)\n ymin = min(coors_y)\n ymax = max(coors_y)\n if rescale_fac > 1.0:\n xmin = int(xmin / rescale_fac)\n xmax = int(xmax / rescale_fac)\n ymin = int(ymin / rescale_fac)\n ymax = int(ymax / rescale_fac)\n prev = xmin\n for i in range(xmin // 16 + 1, xmax // 16 + 1):\n next = 16*i-0.5\n gtboxes.append((prev, ymin, next, ymax))\n prev = next\n gtboxes.append((prev, ymin, xmax, ymax))\n return np.array(gtboxes)\n\n def parse_gtfile(self,gt_path,rescale_fac = 1.0):\n coor_lists = list()\n with open(gt_path) as f:\n content = f.readlines()\n for line in content:\n coor_list = line.split(',')[:8]\n if len(coor_list)==8:\n coor_lists.append(coor_list)\n return self.box_transfer_v2(coor_lists,rescale_fac)\n\n def draw_boxes(self,img,cls,base_anchors,gt_box):\n for i in range(len(cls)):\n if cls[i]==1:\n pt1 = (int(base_anchors[i][0]),int(base_anchors[i][1]))\n pt2 = (int(base_anchors[i][2]),int(base_anchors[i][3]))\n img = cv2.rectangle(img,pt1,pt2,(200,100,100))\n for i in range(gt_box.shape[0]):\n pt1 = (int(gt_box[i][0]),int(gt_box[i][1]))\n pt2 = (int(gt_box[i][2]),int(gt_box[i][3]))\n img = cv2.rectangle(img, pt1, pt2, (100, 200, 100))\n return img\n\n def __getitem__(self, idx):\n img_name = self.img_names[idx]\n img_path = os.path.join(self.datadir, img_name)\n # print(img_path)\n img = cv2.imread(img_path)\n #####for read error, use default image#####\n if img is None:\n print(img_path)\n with open('error_imgs.txt','a') as f:\n f.write('{}\\n'.format(img_path))\n img_name = 'img_2647.jpg'\n img_path = os.path.join(self.datadir, img_name)\n img = cv2.imread(img_path)\n\n #####for read error, use default image#####\n\n h, w, c = img.shape\n rescale_fac = max(h, w) / 1600\n if rescale_fac>1.0:\n h = int(h/rescale_fac)\n w = int(w/rescale_fac)\n img = cv2.resize(img,(w,h))\n\n gt_path = os.path.join(self.labelsdir, 'gt_'+img_name.split('.')[0]+'.txt')\n gtbox = self.parse_gtfile(gt_path,rescale_fac)\n\n # clip image\n if np.random.randint(2) == 1:\n img = img[:, ::-1, :]\n newx1 = w - gtbox[:, 2] - 1\n newx2 = w - gtbox[:, 0] - 1\n gtbox[:, 0] = newx1\n gtbox[:, 2] = newx2\n\n [cls, regr], base_anchors = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)\n # debug_img = self.draw_boxes(img.copy(),cls,base_anchors,gtbox)\n # cv2.imwrite('debug/{}'.format(img_name),debug_img)\n m_img = img - IMAGE_MEAN\n\n regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])\n\n cls = np.expand_dims(cls, axis=0)\n\n # transform to torch tensor\n m_img = torch.from_numpy(m_img.transpose([2, 0, 1])).float()\n cls = torch.from_numpy(cls).float()\n regr = torch.from_numpy(regr).float()\n\n return m_img, cls, regr\n\nif __name__ == '__main__':\n xmin = 15\n xmax = 95\n for i in range(xmin//16+1,xmax//16+1):\n print(16*i-0.5)", "#-*- coding:utf-8 -*-\n#'''\n# Created on 18-12-11 上午10:05\n#\n# @Author: Greg Gao(laygin)\n#'''\nimport numpy as np\nimport cv2\nfrom detect.config import *\n\n\ndef resize(image, width=None, height=None, inter=cv2.INTER_AREA):\n # initialize the dimensions of the image to be resized and\n # grab the image size\n dim = None\n (h, w) = image.shape[:2]\n\n # if both the width and height are None, then return the\n # original image\n if width is None and height is None:\n return image\n\n # check to see if the width is None\n if width is None:\n # calculate the ratio of the height and construct the\n # dimensions\n r = height / float(h)\n dim = (int(w * r), height)\n\n # otherwise, the height is None\n else:\n # calculate the ratio of the width and construct the\n # dimensions\n r = width / float(w)\n dim = (width, int(h * r))\n\n # resize the image\n resized = cv2.resize(image, dim, interpolation=inter)\n\n # return the resized image\n return resized\n\n\ndef gen_anchor(featuresize, scale):\n \"\"\"\n gen base anchor from feature map [HXW][9][4]\n reshape [HXW][9][4] to [HXWX9][4]\n \"\"\"\n heights = [11, 16, 23, 33, 48, 68, 97, 139, 198, 283]\n widths = [16, 16, 16, 16, 16, 16, 16, 16, 16, 16]\n\n # gen k=9 anchor size (h,w)\n heights = np.array(heights).reshape(len(heights), 1)\n widths = np.array(widths).reshape(len(widths), 1)\n\n base_anchor = np.array([0, 0, 15, 15])\n # center x,y\n xt = (base_anchor[0] + base_anchor[2]) * 0.5\n yt = (base_anchor[1] + base_anchor[3]) * 0.5\n\n # x1 y1 x2 y2\n x1 = xt - widths * 0.5\n y1 = yt - heights * 0.5\n x2 = xt + widths * 0.5\n y2 = yt + heights * 0.5\n base_anchor = np.hstack((x1, y1, x2, y2))\n\n h, w = featuresize\n shift_x = np.arange(0, w) * scale\n shift_y = np.arange(0, h) * scale\n # apply shift\n anchor = []\n for i in shift_y:\n for j in shift_x:\n anchor.append(base_anchor + [j, i, j, i])\n return np.array(anchor).reshape((-1, 4))\n\n\ndef cal_iou(box1, box1_area, boxes2, boxes2_area):\n \"\"\"\n box1 [x1,y1,x2,y2]\n boxes2 [Msample,x1,y1,x2,y2]\n \"\"\"\n x1 = np.maximum(box1[0], boxes2[:, 0])\n x2 = np.minimum(box1[2], boxes2[:, 2])\n y1 = np.maximum(box1[1], boxes2[:, 1])\n y2 = np.minimum(box1[3], boxes2[:, 3])\n\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n iou = intersection / (box1_area + boxes2_area[:] - intersection[:])\n return iou\n\n\ndef cal_overlaps(boxes1, boxes2):\n \"\"\"\n boxes1 [Nsample,x1,y1,x2,y2] anchor\n boxes2 [Msample,x1,y1,x2,y2] grouth-box\n\n \"\"\"\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3])\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3])\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps\n\n\ndef bbox_transfrom(anchors, gtboxes):\n \"\"\"\n compute relative predicted vertical coordinates Vc ,Vh\n with respect to the bounding box location of an anchor\n \"\"\"\n regr = np.zeros((anchors.shape[0], 2))\n Cy = (gtboxes[:, 1] + gtboxes[:, 3]) * 0.5\n Cya = (anchors[:, 1] + anchors[:, 3]) * 0.5\n h = gtboxes[:, 3] - gtboxes[:, 1] + 1.0\n ha = anchors[:, 3] - anchors[:, 1] + 1.0\n\n Vc = (Cy - Cya) / ha\n Vh = np.log(h / ha)\n\n return np.vstack((Vc, Vh)).transpose()\n\n\ndef bbox_transfor_inv(anchor, regr):\n \"\"\"\n return predict bbox\n \"\"\"\n\n Cya = (anchor[:, 1] + anchor[:, 3]) * 0.5\n ha = anchor[:, 3] - anchor[:, 1] + 1\n\n Vcx = regr[0, :, 0]\n Vhx = regr[0, :, 1]\n\n Cyx = Vcx * ha + Cya\n hx = np.exp(Vhx) * ha\n xt = (anchor[:, 0] + anchor[:, 2]) * 0.5\n\n x1 = xt - 16 * 0.5\n y1 = Cyx - hx * 0.5\n x2 = xt + 16 * 0.5\n y2 = Cyx + hx * 0.5\n bbox = np.vstack((x1, y1, x2, y2)).transpose()\n\n return bbox\n\n\ndef clip_box(bbox, im_shape):\n # x1 >= 0\n bbox[:, 0] = np.maximum(np.minimum(bbox[:, 0], im_shape[1] - 1), 0)\n # y1 >= 0\n bbox[:, 1] = np.maximum(np.minimum(bbox[:, 1], im_shape[0] - 1), 0)\n # x2 < im_shape[1]\n bbox[:, 2] = np.maximum(np.minimum(bbox[:, 2], im_shape[1] - 1), 0)\n # y2 < im_shape[0]\n bbox[:, 3] = np.maximum(np.minimum(bbox[:, 3], im_shape[0] - 1), 0)\n\n return bbox\n\n\ndef filter_bbox(bbox, minsize):\n ws = bbox[:, 2] - bbox[:, 0] + 1\n hs = bbox[:, 3] - bbox[:, 1] + 1\n keep = np.where((ws >= minsize) & (hs >= minsize))[0]\n return keep\n\n\ndef cal_rpn(imgsize, featuresize, scale, gtboxes):\n imgh, imgw = imgsize\n\n # gen base anchor\n base_anchor = gen_anchor(featuresize, scale)\n\n # calculate iou\n overlaps = cal_overlaps(base_anchor, gtboxes)\n\n # init labels -1 don't care 0 is negative 1 is positive\n labels = np.empty(base_anchor.shape[0])\n labels.fill(-1)\n\n # for each GT box corresponds to an anchor which has highest IOU\n gt_argmax_overlaps = overlaps.argmax(axis=0)\n\n # the anchor with the highest IOU overlap with a GT box\n anchor_argmax_overlaps = overlaps.argmax(axis=1)\n anchor_max_overlaps = overlaps[range(overlaps.shape[0]), anchor_argmax_overlaps]\n\n # IOU > IOU_POSITIVE\n labels[anchor_max_overlaps > IOU_POSITIVE] = 1\n # IOU <IOU_NEGATIVE\n labels[anchor_max_overlaps < IOU_NEGATIVE] = 0\n # ensure that every GT box has at least one positive RPN region\n labels[gt_argmax_overlaps] = 1\n\n # only keep anchors inside the image\n outside_anchor = np.where(\n (base_anchor[:, 0] < 0) |\n (base_anchor[:, 1] < 0) |\n (base_anchor[:, 2] >= imgw) |\n (base_anchor[:, 3] >= imgh)\n )[0]\n labels[outside_anchor] = -1\n\n # subsample positive labels ,if greater than RPN_POSITIVE_NUM(default 128)\n fg_index = np.where(labels == 1)[0]\n if (len(fg_index) > RPN_POSITIVE_NUM):\n labels[np.random.choice(fg_index, len(fg_index) - RPN_POSITIVE_NUM, replace=False)] = -1\n\n # subsample negative labels\n bg_index = np.where(labels == 0)[0]\n num_bg = RPN_TOTAL_NUM - np.sum(labels == 1)\n if (len(bg_index) > num_bg):\n # print('bgindex:',len(bg_index),'num_bg',num_bg)\n labels[np.random.choice(bg_index, len(bg_index) - num_bg, replace=False)] = -1\n\n # calculate bbox targets\n # debug here\n bbox_targets = bbox_transfrom(base_anchor, gtboxes[anchor_argmax_overlaps, :])\n # bbox_targets=[]\n\n return [labels, bbox_targets], base_anchor\n\n\ndef nms(dets, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n return keep\n\n\n# for predict\nclass Graph:\n def __init__(self, graph):\n self.graph = graph\n\n def sub_graphs_connected(self):\n sub_graphs = []\n for index in range(self.graph.shape[0]):\n if not self.graph[:, index].any() and self.graph[index, :].any():\n v = index\n sub_graphs.append([v])\n while self.graph[v, :].any():\n v = np.where(self.graph[v, :])[0][0]\n sub_graphs[-1].append(v)\n return sub_graphs\n\n\nclass TextLineCfg:\n SCALE = 600\n MAX_SCALE = 1200\n TEXT_PROPOSALS_WIDTH = 16\n MIN_NUM_PROPOSALS = 2\n MIN_RATIO = 0.5\n LINE_MIN_SCORE = 0.9\n MAX_HORIZONTAL_GAP = 60\n TEXT_PROPOSALS_MIN_SCORE = 0.7\n TEXT_PROPOSALS_NMS_THRESH = 0.3\n MIN_V_OVERLAPS = 0.6\n MIN_SIZE_SIM = 0.6\n\n\nclass TextProposalGraphBuilder:\n \"\"\"\n Build Text proposals into a graph.\n \"\"\"\n\n def get_successions(self, index):\n box = self.text_proposals[index]\n results = []\n for left in range(int(box[0]) + 1, min(int(box[0]) + TextLineCfg.MAX_HORIZONTAL_GAP + 1, self.im_size[1])):\n adj_box_indices = self.boxes_table[left]\n for adj_box_index in adj_box_indices:\n if self.meet_v_iou(adj_box_index, index):\n results.append(adj_box_index)\n if len(results) != 0:\n return results\n return results\n\n def get_precursors(self, index):\n box = self.text_proposals[index]\n results = []\n for left in range(int(box[0]) - 1, max(int(box[0] - TextLineCfg.MAX_HORIZONTAL_GAP), 0) - 1, -1):\n adj_box_indices = self.boxes_table[left]\n for adj_box_index in adj_box_indices:\n if self.meet_v_iou(adj_box_index, index):\n results.append(adj_box_index)\n if len(results) != 0:\n return results\n return results\n\n def is_succession_node(self, index, succession_index):\n precursors = self.get_precursors(succession_index)\n if self.scores[index] >= np.max(self.scores[precursors]):\n return True\n return False\n\n def meet_v_iou(self, index1, index2):\n def overlaps_v(index1, index2):\n h1 = self.heights[index1]\n h2 = self.heights[index2]\n y0 = max(self.text_proposals[index2][1], self.text_proposals[index1][1])\n y1 = min(self.text_proposals[index2][3], self.text_proposals[index1][3])\n return max(0, y1 - y0 + 1) / min(h1, h2)\n\n def size_similarity(index1, index2):\n h1 = self.heights[index1]\n h2 = self.heights[index2]\n return min(h1, h2) / max(h1, h2)\n\n return overlaps_v(index1, index2) >= TextLineCfg.MIN_V_OVERLAPS and \\\n size_similarity(index1, index2) >= TextLineCfg.MIN_SIZE_SIM\n\n def build_graph(self, text_proposals, scores, im_size):\n self.text_proposals = text_proposals\n self.scores = scores\n self.im_size = im_size\n self.heights = text_proposals[:, 3] - text_proposals[:, 1] + 1\n\n boxes_table = [[] for _ in range(self.im_size[1])]\n for index, box in enumerate(text_proposals):\n boxes_table[int(box[0])].append(index)\n self.boxes_table = boxes_table\n\n graph = np.zeros((text_proposals.shape[0], text_proposals.shape[0]), np.bool)\n\n for index, box in enumerate(text_proposals):\n successions = self.get_successions(index)\n if len(successions) == 0:\n continue\n succession_index = successions[np.argmax(scores[successions])]\n if self.is_succession_node(index, succession_index):\n # NOTE: a box can have multiple successions(precursors) if multiple successions(precursors)\n # have equal scores.\n graph[index, succession_index] = True\n return Graph(graph)\n\n\nclass TextProposalConnectorOriented:\n \"\"\"\n Connect text proposals into text lines\n \"\"\"\n\n def __init__(self):\n self.graph_builder = TextProposalGraphBuilder()\n\n def group_text_proposals(self, text_proposals, scores, im_size):\n graph = self.graph_builder.build_graph(text_proposals, scores, im_size)\n return graph.sub_graphs_connected()\n\n def fit_y(self, X, Y, x1, x2):\n # len(X) != 0\n # if X only include one point, the function will get line y=Y[0]\n if np.sum(X == X[0]) == len(X):\n return Y[0], Y[0]\n p = np.poly1d(np.polyfit(X, Y, 1))\n return p(x1), p(x2)\n\n def get_text_lines(self, text_proposals, scores, im_size):\n \"\"\"\n text_proposals:boxes\n\n \"\"\"\n # tp=text proposal\n tp_groups = self.group_text_proposals(text_proposals, scores, im_size) # 首先还是建图,获取到文本行由哪几个小框构成\n\n text_lines = np.zeros((len(tp_groups), 8), np.float32)\n\n for index, tp_indices in enumerate(tp_groups):\n text_line_boxes = text_proposals[list(tp_indices)] # 每个文本行的全部小框\n X = (text_line_boxes[:, 0] + text_line_boxes[:, 2]) / 2 # 求每一个小框的中心x,y坐标\n Y = (text_line_boxes[:, 1] + text_line_boxes[:, 3]) / 2\n\n z1 = np.polyfit(X, Y, 1) # 多项式拟合,根据之前求的中心店拟合一条直线(最小二乘)\n\n x0 = np.min(text_line_boxes[:, 0]) # 文本行x坐标最小值\n x1 = np.max(text_line_boxes[:, 2]) # 文本行x坐标最大值\n\n offset = (text_line_boxes[0, 2] - text_line_boxes[0, 0]) * 0.5 # 小框宽度的一半\n\n # 以全部小框的左上角这个点去拟合一条直线,然后计算一下文本行x坐标的极左极右对应的y坐标\n lt_y, rt_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0 + offset, x1 - offset)\n # 以全部小框的左下角这个点去拟合一条直线,然后计算一下文本行x坐标的极左极右对应的y坐标\n lb_y, rb_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0 + offset, x1 - offset)\n\n score = scores[list(tp_indices)].sum() / float(len(tp_indices)) # 求全部小框得分的均值作为文本行的均值\n\n text_lines[index, 0] = x0\n text_lines[index, 1] = min(lt_y, rt_y) # 文本行上端 线段 的y坐标的小值\n text_lines[index, 2] = x1\n text_lines[index, 3] = max(lb_y, rb_y) # 文本行下端 线段 的y坐标的大值\n text_lines[index, 4] = score # 文本行得分\n text_lines[index, 5] = z1[0] # 根据中心点拟合的直线的k,b\n text_lines[index, 6] = z1[1]\n height = np.mean((text_line_boxes[:, 3] - text_line_boxes[:, 1])) # 小框平均高度\n text_lines[index, 7] = height + 2.5\n\n text_recs = np.zeros((len(text_lines), 9), np.float)\n index = 0\n for line in text_lines:\n b1 = line[6] - line[7] / 2 # 根据高度和文本行中心线,求取文本行上下两条线的b值\n b2 = line[6] + line[7] / 2\n x1 = line[0]\n y1 = line[5] * line[0] + b1 # 左上\n x2 = line[2]\n y2 = line[5] * line[2] + b1 # 右上\n x3 = line[0]\n y3 = line[5] * line[0] + b2 # 左下\n x4 = line[2]\n y4 = line[5] * line[2] + b2 # 右下\n disX = x2 - x1\n disY = y2 - y1\n width = np.sqrt(disX * disX + disY * disY) # 文本行宽度\n\n fTmp0 = y3 - y1 # 文本行高度\n fTmp1 = fTmp0 * disY / width\n x = np.fabs(fTmp1 * disX / width) # 做补偿\n y = np.fabs(fTmp1 * disY / width)\n if line[5] < 0:\n x1 -= x\n y1 += y\n x4 += x\n y4 -= y\n else:\n x2 += x\n y2 += y\n x3 -= x\n y3 -= y\n text_recs[index, 0] = x1\n text_recs[index, 1] = y1\n text_recs[index, 2] = x2\n text_recs[index, 3] = y2\n text_recs[index, 4] = x3\n text_recs[index, 5] = y3\n text_recs[index, 6] = x4\n text_recs[index, 7] = y4\n text_recs[index, 8] = line[4]\n index = index + 1\n\n return text_recs\n\n" ]
[ [ "numpy.array", "numpy.expand_dims", "torch.from_numpy", "numpy.random.randint" ], [ "numpy.polyfit", "numpy.minimum", "numpy.sqrt", "numpy.fabs", "numpy.max", "numpy.mean", "numpy.exp", "numpy.where", "numpy.hstack", "numpy.arange", "numpy.argmax", "numpy.zeros", "numpy.log", "numpy.min", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.empty", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rom1mouret/kak
[ "e6edb931d94f8a675ed6eb441a6b796dd8b35894" ]
[ "shoputils/shoputils/data_fitter.py" ]
[ "import yaml\nimport numpy as np\nimport re\nimport os\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .util import (\n to_gpu, split_chunk, slice_tensors,\n build_vocab, clean_strings, generate_uid,\n yaml_serializable, hash_strings\n)\n\n\nclass Net(nn.Module):\n def __init__(self, text_input_dim, text_embed_dim, text_output_dim,\n brand_dim, latent_dim):\n super(Net, self).__init__()\n\n self._latent_dim = latent_dim\n price_dim = 3\n price_embed_dim = 8\n other_dim = 4\n slope = 0.2\n\n hidden_dim = 2 * self._latent_dim\n self._dim = self._latent_dim + 2 * hidden_dim + price_embed_dim\n\n # product + model embeddings\n self._embedding = nn.EmbeddingBag(text_input_dim, text_embed_dim, mode='sum')\n\n # price embedding (sorta)\n self._price_proc = nn.Sequential(\n nn.Linear(price_dim, 128),\n nn.LeakyReLU(slope),\n nn.Linear(128, price_embed_dim)\n )\n\n # encoder\n self._encoder = nn.Sequential(\n nn.Linear(text_embed_dim + price_embed_dim + other_dim, 2048),\n nn.LeakyReLU(slope),\n nn.Linear(2048, hidden_dim),\n nn.LeakyReLU(slope),\n nn.Linear(hidden_dim, self._latent_dim),\n )\n\n # decoders\n self._txt_decoder = nn.Sequential(\n nn.Linear(latent_dim, 2048),\n nn.LeakyReLU(slope),\n nn.Dropout(0.25),\n nn.Linear(2048, text_output_dim)\n )\n\n self._brand_decoder = nn.Sequential(\n nn.Linear(latent_dim, hidden_dim),\n nn.LeakyReLU(slope),\n nn.Linear(hidden_dim, brand_dim),\n nn.LogSoftmax()\n )\n\n self._maker_decoder = nn.Sequential(\n nn.Linear(latent_dim, hidden_dim),\n nn.LeakyReLU(slope),\n nn.Linear(hidden_dim, brand_dim),\n nn.LogSoftmax()\n )\n\n for layer in (self._price_proc[0],\n self._encoder[0], self._encoder[2],\n self._txt_decoder[0],\n self._brand_decoder[0], self._maker_decoder[0]):\n nn.init.xavier_normal_(\n layer.weight,\n gain=nn.init.calculate_gain(\"leaky_relu\", param=slope))\n\n def dim(self):\n return self._dim\n\n def extract_features(self, batch):\n latent, price = self.latent_features(batch)\n b = F.linear(latent, self._brand_decoder[0].weight)\n m = F.linear(latent, self._maker_decoder[0].weight)\n\n return torch.cat([latent, price, b, m], dim=1)\n\n def latent_features(self, batch):\n # embeddings\n embedded = self._embedding(batch[\"text\"])\n\n # price pre-processing\n price = self._price_proc(batch[\"price\"])\n\n # encoder input\n inp = torch.cat([embedded, price, batch[\"missing\"]], dim=1)\n\n # encoder\n encoded = self._encoder(inp)\n\n return encoded, price\n\n def forward(self, batch):\n latent = self.latent_features(batch)[0]\n txt = self._txt_decoder(latent)\n brand = self._brand_decoder(latent)\n maker = self._maker_decoder(latent)\n\n return txt, brand, maker\n\n\nclass DataFitter:\n def __init__(self, static_params, gpu=-1, gpu_batch_size=16, out_dir=\"models/\"):\n self._gpu = gpu\n self._gpu_batch_size = gpu_batch_size\n trained = type(static_params) is not dict\n\n if trained:\n self._yaml_path = static_params\n with open(self._yaml_path, \"r\", encoding=\"utf-8\") as f:\n static_params = yaml.load(f)\n\n dirloc = os.path.dirname(self._yaml_path)\n self._torch_mdl_path = os.path.join(dirloc, static_params[\"torch_file\"])\n self._vocab_path = os.path.join(dirloc, static_params[\"vocab_file\"])\n else:\n prefix = \"fitter_\"\n uid = generate_uid()\n print(\"creating new model\", uid)\n self._torch_mdl_path = os.path.join(out_dir, \"%s%s.torch\" % (prefix, uid))\n self._yaml_path = os.path.join(out_dir, \"%s%s.yml\" % (prefix, uid))\n self._vocab_path = os.path.join(out_dir, \"vocab_%s.yml\" % (uid, ))\n\n self._static_params = static_params\n\n def net_factory():\n net = Net(\n text_input_dim=static_params[\"text_input_dim\"],\n text_embed_dim=static_params[\"text_embed_dim\"],\n text_output_dim=static_params[\"text_output_dim\"],\n brand_dim=static_params[\"brand_dim\"],\n latent_dim=static_params[\"latent_dim\"]\n )\n\n if self._gpu >= 0:\n net = net.cuda(self._gpu)\n\n return net\n\n self._net_factory = net_factory\n self._net = net_factory()\n self._bce_loss = nn.BCEWithLogitsLoss(reduction='sum')\n self._nll_loss = nn.NLLLoss(reduction='sum')\n\n self._foreign = re.compile(\"[a-z]+\")\n self._hangul = re.compile(\"[가-힣]\")\n self._number = re.compile(\"[0-9]+\")\n\n if trained:\n self.load(self._yaml_path)\n\n def __getitem__(self, key):\n return self._static_params[key]\n\n def train_preprocessing(self, chunk):\n # brand & maker\n vocab_dim = self._static_params[\"brand_dim\"]\n self._brand_vocab = build_vocab(chunk[\"brand\"], vocab_dim, shift=1)\n self._maker_vocab = build_vocab(chunk[\"maker\"], vocab_dim, shift=1)\n\n vocab = {\n \"brand\": self._brand_vocab,\n \"maker\": self._maker_vocab\n }\n\n with open(self._vocab_path, \"w\", encoding=\"utf-8\") as f:\n yaml.dump(vocab, f)\n\n # prices\n prices = chunk[\"price\"].copy().astype(np.float32)\n missing = prices == -1\n\n log_prices = np.log(1.01 + prices)\n log_prices[missing] = np.nan\n prices[missing] = np.nan\n\n self._logprice_median = np.nanmedian(log_prices)\n self._logprice_std = np.nanstd(log_prices)\n self._price_median = np.nanmedian(prices)\n self._price_std = np.nanstd(prices)\n\n print(\"price median\", self._price_median, \"price std\", self._price_std)\n\n def _tokenize(self, text):\n \"\"\" gets the text input ready for embedding \"\"\"\n text = text.lower()\n arr = list(self._hangul.findall(text))\n arr += list(self._foreign.findall(text))\n if self._number.search(text):\n arr.append(\"9\")\n\n return set(arr)\n\n def _text_output(self, sequences):\n vocab_dim = self._static_params[\"text_output_dim\"]\n X = torch.zeros(len(sequences), vocab_dim)\n for i, seq in enumerate(sequences):\n indices = hash_strings(seq, mod=vocab_dim, seed=1)\n if len(indices) > 0:\n X[[i] * len(indices), indices] = 1\n\n return X\n\n def _embedding_input(self, sequences):\n vocab_dim = self._static_params[\"text_input_dim\"]\n max_length = max(1, max(map(len, sequences)))\n inp = torch.zeros(len(sequences), max_length, dtype=torch.int64)\n for i, seq in enumerate(sequences):\n indices = hash_strings(seq, mod=vocab_dim-1, seed=0) + 1\n inp[i, :len(indices)] = torch.LongTensor(indices)\n\n return inp\n\n def _build_batch(self, chunk, with_targets=True, with_idf=True, gpu=-1):\n cols = {} # to store a copy of the chunk\n\n # decoding\n for col_name in (\"product\", \"model\"):\n cols[col_name] = (s.decode(\"utf-8\") for s in chunk[col_name])\n\n # tokenization\n sequences = []\n for product, model in zip(cols[\"product\"], cols[\"model\"]):\n sequences.append(self._tokenize(product) | self._tokenize(model))\n\n # text input\n text = self._embedding_input(sequences)\n\n # missing columns (with no decoding required)\n missing_product = map(bool, chunk[\"product\"])\n missing_model = map(bool, chunk[\"product\"])\n missing_brand = map(bool, chunk[\"brand\"])\n missing_maker = map(bool, chunk[\"maker\"])\n tranposed = zip(missing_product, missing_model, missing_brand, missing_maker)\n missing = torch.Tensor(list(tranposed))\n\n # prices\n prices = chunk[\"price\"]\n price_missing = prices == -1\n log_prices = (np.log(1.01 + prices) - self._logprice_median) / self._logprice_std\n log_prices[price_missing] = 0\n reg_prices = (prices - self._price_median) / self._price_std\n reg_prices[price_missing] = 0\n np_price = np.column_stack([price_missing.astype(np.float32), log_prices, reg_prices])\n price_feat = torch.from_numpy(np_price.astype(np.float32))\n\n if with_targets:\n # brand & makers\n for col_name in (\"brand\", \"maker\"):\n cols[col_name] = clean_strings((s.decode(\"utf-8\") for s in chunk[col_name]))\n\n # maker & brand\n makers = torch.LongTensor([self._maker_vocab.get(v, 0) for v in cols[\"maker\"]])\n brands = torch.LongTensor([self._brand_vocab.get(v, 0) for v in cols[\"brand\"]])\n\n # text\n if with_idf:\n y_text = self._text_output(sequences)\n else:\n y_text = None\n\n # move everything to GPU\n y = to_gpu(gpu, y_text, makers, brands)\n else:\n y = None\n\n batch = {\n \"text\": text,\n \"price\": price_feat,\n \"missing\": missing\n }\n\n batch = to_gpu(gpu, batch)[0]\n\n return batch, y\n\n def extract_features(self, chunk):\n batch, y = self._build_batch(chunk, with_targets=False, gpu=self._gpu)\n return self._net.extract_features(batch).data\n\n def prediction_err(self, chunk):\n batch, (y, brand, maker) = self._build_batch(chunk, with_targets=True, gpu=self._gpu)\n y_pred, brand_pred, maker_pred = self._net(batch)\n bce = nn.BCEWithLogitsLoss(reduction='none')\n nll = nn.NLLLoss(reduction='none')\n err1 = bce(y_pred, y).sum(dim=1).data\n err2 = nll(brand_pred, brand).data\n err3 = nll(maker_pred, maker).data\n\n return err1, err2, err3\n\n def find_err_thresholds(self, chunk):\n self._net.eval()\n minichunks = split_chunk(chunk, batch_size=self._gpu_batch_size)\n errs1, errs2, errs3 = [], [], []\n for minichunk in minichunks:\n err1, err2, err3 = self.prediction_err(minichunk)\n errs1 += err1.tolist()\n errs2 += err2.tolist()\n errs3 += err3.tolist()\n\n errs = np.array([errs1, errs2, errs3])\n\n thresholds = {}\n for p in (50, 75, 80, 85, 90, 92, 95, 96, 97, 98, 99):\n thresholds[p] = np.percentile(errs, p, axis=1).tolist()\n\n # save in yaml file\n with open(self._yaml_path, \"r\", encoding=\"utf-8\") as f:\n content = yaml.load(f)\n content[\"thresholds\"] = thresholds\n with open(self._yaml_path, \"w\", encoding=\"utf-8\") as f:\n yaml.dump(content, f, default_flow_style=False)\n\n self._net.train()\n\n return thresholds\n\n def dim(self):\n return self._net.dim()\n\n def prepare_hyperopt_dataset(self, train_set):\n self._train_set = self._build_batch(train_set)\n\n def set_holdout_val_set(self, val_set):\n chunks = split_chunk(val_set, batch_size=self._gpu_batch_size,\n max_rows=5000)\n print(\"holdout batch size:\", len(chunks) * self._gpu_batch_size)\n self._val_batches = list(map(self._build_batch, chunks))\n\n def train_and_eval(self, space):\n # copy the current net + new optimization hyperparameters\n net = self._net_factory()\n if os.path.exists(self._torch_mdl_path):\n net.load_state_dict(torch.load(self._torch_mdl_path))\n\n optim = self._create_optimizer(net, space)\n\n # training\n train_batch, (y, brand, maker) = self._train_set\n batch_size = int(space[\"batch_size\"])\n L = y.size(0)\n ordering = np.random.permutation(L) # gets us a meaningful variance to provide to Bayesian optim\n for k in range(0, L, batch_size):\n # forward\n loss = 0\n for i in range(k, min(L, k+batch_size), self._gpu_batch_size):\n indices = ordering[i:i+self._gpu_batch_size]\n batch, y_true, brand_true, maker_true = \\\n slice_tensors(indices, train_batch, y, brand, maker)\n batch, y_true, brand_true, maker_true = \\\n to_gpu(self._gpu, batch, y_true, brand_true, maker_true)\n\n # prediction\n y_pred, brand_pred, maker_pred = net(batch)\n\n # loss\n loss = loss + self._bce_loss(y_pred, y_true) + \\\n self._nll_loss(brand_pred, brand_true) + \\\n self._nll_loss(maker_pred, maker_true)\n\n # backward\n optim.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(net.parameters(), space[\"clipping\"])\n optim.step()\n\n # evaluation\n return self._validate(net.eval())[0]\n\n def reset_optimizer(self, space):\n self._optim = self._create_optimizer(self._net, space)\n self._space = space\n\n def train_on_batch(self, chunk):\n mini_chunks = split_chunk(chunk, batch_size=self._gpu_batch_size)\n\n # forward\n loss = 0\n for mini_chunk in mini_chunks:\n mini_batch, (y_true, maker_true, brand_true) = \\\n self._build_batch(mini_chunk, gpu=self._gpu)\n y_pred, maker_pred, brand_pred = self._net(mini_batch)\n loss = loss + self._bce_loss(y_pred, y_true) + \\\n self._nll_loss(maker_pred, maker_true) + \\\n self._nll_loss(brand_pred, brand_true)\n\n # backward\n self._optim.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self._net.parameters(), self._space[\"clipping\"])\n self._optim.step()\n\n def save(self, checkpoint, loss, report):\n torch.save(self._net.state_dict(), self._torch_mdl_path)\n\n metadata = {\n \"price_median\": self._price_median,\n \"price_std\": self._price_std,\n \"logprice_median\": self._logprice_median,\n \"logprice_std\": self._logprice_std,\n \"feat_extract_dim\": self._net.dim(),\n \"loss\": loss,\n \"checkpoint\": checkpoint,\n \"torch_file\": os.path.basename(self._torch_mdl_path),\n \"vocab_file\": os.path.basename(self._vocab_path),\n \"time\": time.strftime(\"%Y-%m-%d %H:%M\")\n }\n metadata.update(report)\n metadata.update(self._space)\n metadata.update(self._static_params)\n metadata = yaml_serializable(metadata)\n\n with open(self._yaml_path, \"w\") as f:\n yaml.dump(metadata, f, default_flow_style=False)\n\n return self._yaml_path\n\n def load(self, yaml_file):\n \"\"\" build the model structure before calling this method \"\"\"\n print(\"loading\", yaml_file)\n with open(yaml_file, \"r\") as f:\n metadata = yaml.load(f)\n self._space = metadata\n\n if \"thresholds\" in metadata:\n self._thresholds = metadata[\"thresholds\"]\n\n # price stuff\n self._price_std = metadata[\"price_std\"]\n self._price_median = metadata[\"price_median\"]\n self._logprice_std = metadata[\"logprice_std\"]\n self._logprice_median = metadata[\"logprice_median\"]\n\n # vocab\n with open(self._vocab_path, \"r\", encoding=\"utf-8\") as f:\n vocab = yaml.load(f)\n self._brand_vocab = vocab[\"brand\"]\n self._maker_vocab = vocab[\"maker\"]\n\n # torch network\n self._net.load_state_dict(torch.load(self._torch_mdl_path))\n\n if self._gpu >= 0:\n self._net = self._net.cuda(self._gpu)\n\n def thresholds(self):\n return self._thresholds\n\n def eval(self):\n self._net = self._net.eval()\n return self\n\n def validate(self):\n net = self._net.eval()\n val = self._validate(net)\n self._net.train()\n\n return val\n\n def _create_optimizer(self, net, space):\n optim = torch.optim.SGD(\n net.parameters(),\n lr=space[\"lr\"],\n momentum=space[\"momentum\"],\n weight_decay=0)\n\n return optim\n\n def _validate(self, net):\n loss = 0\n n = 0\n for batch, (y_true, maker_true, brand_true) in self._val_batches:\n batch, y_true, maker_true, brand_true = \\\n to_gpu(self._gpu, batch, y_true, maker_true, brand_true)\n y_pred, maker_pred, brand_pred = net(batch)\n loss += self._bce_loss(y_pred, y_true) + \\\n self._nll_loss(maker_pred, maker_true) + \\\n self._nll_loss(brand_pred, brand_true)\n n += y_pred.size(0)\n\n loss /= 3 * n\n\n return loss.item(), {}\n" ]
[ [ "torch.nn.init.calculate_gain", "torch.nn.NLLLoss", "numpy.log", "numpy.nanmedian", "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.cat", "torch.LongTensor", "torch.load", "numpy.percentile", "torch.nn.Linear", "torch.nn.BCEWithLogitsLoss", "numpy.random.permutation", "torch.nn.LeakyReLU", "numpy.nanstd", "numpy.array", "torch.nn.functional.linear", "torch.nn.EmbeddingBag" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pvalienteverde/MeetUpIntroMLySistemasRecomendacion
[ "50e24dfc5303b0d4a9edaf4ff634d25388351343" ]
[ "scripts/Introduccion/utiles.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef to_hot_encoding(datos,caracteristicas_categoricas): \n for cat in caracteristicas_categoricas:\n one_encoding=pd.get_dummies(datos[cat],prefix=cat)\n datos=pd.concat([datos,one_encoding],axis=1)\n del datos[cat] \n return datos\n\ndef mostrar_imagenes(datos,target=None,prediccion=None):\n fig = plt.figure(figsize=(15, 3))\n n,_=datos.shape\n for i in range(n):\n titulo=''\n if not target is None: \n titulo=\"T:{},\".format(target[i])\n if not prediccion is None:\n titulo=\"{}P:{}\".format(titulo,prediccion[i])\n\n ax = fig.add_subplot(1, n, 1 + i, xticks=[], yticks=[],title=titulo)\n ax.imshow(datos[i].reshape((8, 8)), cmap=plt.cm.binary)\n \n" ]
[ [ "pandas.concat", "pandas.get_dummies", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Timsbim/Show-COVID-19-Data
[ "170109015eef03d0639fb48840aeb46133beb139" ]
[ "lib/showing.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom lib.basics import *\nfrom lib.prepping import get_base_data\n\n\n# Showing the data\n\n\ndef get_country_data_to_show(date_, plots, *countries, length=1000):\n \"\"\"Returns the data from day date_ for the categories and variants defined\n in the dictionary plots and the countries, all loaded in one dictionary\n \"\"\"\n tbl = pd.read_json(\n get_data_file_path(date_, file_format=\"json.gz\"),\n orient=\"table\",\n compression=\"gzip\",\n ).sort_index()\n\n data = dict()\n for country in countries:\n data[country] = {category: {} for category in plots}\n for category, variant, country in [\n (category, variant, country)\n for category in plots\n for variant in plots[category]\n for country in countries\n ]:\n data[country][category][variant] = tbl.loc[\n (category, variant), country\n ].tail(length)\n\n return data\n\n\ndef get_title_translation():\n \"\"\"Returns dictionary which translates shortcuts in text suitable for plot\n titles\n \"\"\"\n return json.load(get_settings_file_path(\"title_translation\").open(\"r\"))\n\n\ndef setup_ax(ax, days):\n \"\"\"Set up the axes for the plots\"\"\"\n\n # Months\n months = (\n \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\",\n \"Nov\", \"Dec\",\n )\n\n # Setting the viewable range of x-axis\n ax.set_xlim(-2, len(days) + 1)\n\n # Setting the ticks (positions and labels) mostly on x-axis\n\n # Minor ticks: Only 3 for each month, roughly the end of the 1., 2. and 3.\n # week\n minor_ticks = [i for i in range(len(days)) if days[i].day in (8, 16, 24)]\n\n # Minor labels: Day of minor tick, i.e. 8, 16 and 24\n minor_labels = [days[i].day for i in minor_ticks]\n\n # Major: Ticks = months end/beginning, label: short version of months name\n major_ticks = []\n major_labels = []\n month = days[0].month\n for i, day in enumerate(days[1:], start=1):\n # Detecting the beginning of a new month\n if day.month != month:\n major_ticks.append(i)\n month = day.month\n major_labels.append(months[month - 1])\n\n # Actually setting the prepared ticks/labels, including the label size\n ax.xaxis.set_ticks(minor_ticks, minor=True)\n ax.xaxis.set_ticklabels(minor_labels, minor=True)\n ax.xaxis.set_ticks(major_ticks, minor=False)\n ax.xaxis.set_ticklabels(major_labels, minor=False)\n ax.xaxis.set_tick_params(which=\"both\", labelsize=10)\n ax.yaxis.set_tick_params(which=\"both\", labelsize=12)\n\n # Setting the grid\n ax.grid(True, which=\"both\")\n ax.grid(which=\"major\", linestyle=\"dashed\", linewidth=2)\n ax.grid(which=\"minor\", linestyle=\"dashed\")\n\n # Setting the labels of the x-axis, including the font size\n ax.set_xlabel(\"day\", fontsize=14)\n\n\ndef show_countries(date_, *countries, length=1000):\n \"\"\"Creates a standard set of plots for every country provided by the\n argument countries (usually a list). The set contains:\n - Confirmed cases, cumulative and diffs (including the 1-week-moving\n average)\n - Deaths, cumulative and diffs (including the 1-week-moving average)\n - Active cases, cumulative and diffs (including the 1-week-moving\n average)\n The plots are available in single-plot files, files per category\n (containing 2 plots), and a file containing all 6 plots\n \"\"\"\n print_log(f\"Plotting countries: {str.join(', ', countries)} ...\")\n\n # Defining the plots that should be included\n plots = {\n \"confirmed\": [\"cum\", \"diff\", \"diff_ma1w\"],\n \"deaths\": [\"cum\", \"diff\", \"diff_ma1w\"],\n \"active\": [\"cum\", \"diff\", \"diff_ma1w\"],\n }\n categories = plots\n\n # Getting the title text bits\n trsl = get_title_translation()\n iso3_to_name = get_base_data(date_, columns=(\"iso3\", \"name\"))\n\n # Read data from files produced by prepare_data\n data = get_country_data_to_show(date_, plots, *countries, length=length)\n\n # Creating the plots for the selected countries\n title_font_size = 30\n for country in countries:\n # Creating the figure which includes all plots\n fig_all, axs_all = plt.subplots(3, 2, figsize=(40, 50))\n fig_all.suptitle(\n iso3_to_name[country], fontsize=title_font_size, fontweight=\"bold\"\n )\n\n for i, category in enumerate(categories):\n # Creating the figure for all plots per category\n fig_cat, axs_cat = plt.subplots(2, 1, figsize=(20, 25))\n fig_cat.suptitle(\n f\"{iso3_to_name[country]} - {trsl[category]}\",\n fontsize=title_font_size,\n fontweight=\"bold\",\n )\n\n for j, variant in enumerate([\"cum\", \"diff\"]):\n series = data[country][category][variant]\n days = list(series.index)\n\n # Creating the figure for single plot (category and variant)\n fig, axs = plt.subplots(figsize=(25, 16))\n fig.suptitle(\n f\"{iso3_to_name[country]} - \"\n f\"{trsl[category]} - {trsl[variant]}\",\n fontsize=title_font_size,\n fontweight=\"bold\",\n )\n for ax in [axs, axs_cat[j], axs_all[i][j]]:\n ax.set_title(\n f\"{trsl[category]} - {trsl[variant]}\", fontsize=20\n )\n setup_ax(ax, days)\n ax.plot(\n list(range(len(series.index))), series.values, \"bo\"\n )\n if variant == \"diff\":\n series_ma = data[country][category][\"diff_ma1w\"]\n ax.plot(\n list(range(len(series_ma.index))),\n series_ma.values,\n \"r-\",\n label=trsl[\"diff_ma1w\"],\n )\n ax.legend(fontsize=\"xx-large\")\n\n # Due to data corrections there are sometimes negative\n # diffs for confirmed cases, which should be always\n # non-negative. This can lead to distorted plots and is\n # therefore adjusted by setting the minimum value of the\n # y-axis to -25.\n if category == \"confirmed\" and variant == \"diff\":\n ax.set_ylim(bottom=-25)\n\n # Saving the figure for single plot\n fig.align_labels()\n fig.savefig(\n get_plot_file_path(date_, country, category, variant)\n )\n\n # Saving the figure with all plots per category\n fig_cat.align_labels()\n fig_cat.savefig(get_plot_file_path(date_, country, category))\n\n # Saving the figure with all plots\n fig_all.align_labels()\n fig_all.savefig(get_plot_file_path(date_, country))\n\n plt.close(\"all\")\n\n print_log(f\"Plots for {country} finished\")\n\n print_log(\"Plotting finished\")\n\n\ndef get_group_data_to_show(date_, plots, groups, length=1000):\n \"\"\"Returns the data from day dte for the categories and variants defined\n in the dictionary plots and the groups in list groups, loaded into a\n dictionary\n \"\"\"\n tbl = pd.read_json(\n get_data_file_path(date_, file_format=\"json.gz\"),\n orient=\"table\",\n compression=\"gzip\",\n ).sort_index()\n\n data = dict()\n for group in groups:\n data[group] = {category: {} for category in plots}\n for category, variant, group in [\n (category, variant, group)\n for category in plots\n for variant in plots[category]\n for group in groups\n ]:\n data[group][category][variant] = tbl.loc[\n (category, variant), groups[group]\n ].tail(length)\n\n return data\n\n\ndef show_groups(date_, groups, length=1000):\n \"\"\"Creates a standard set of plots for groups of countries provided by the\n argument groups (a dictionary). The set contains:\n - Confirmed cases per million, cumulative and diffs (including the\n 1-week-moving average)\n - Deaths per 100,000, cumulative and diffs (including the 1-week-moving\n average)\n - Active cases per million, cumulative and diffs (including the\n 1-week-moving average)\n The plots are available in single-plot files, files per category\n (containing 2 plots), and a file containing all 6 plots\n \"\"\"\n # Defining the plots that should be included\n plots = {\n \"confirmed\": [\"cum_rel_popmio\", \"diff_rel_popmio_ma1w\"],\n \"deaths\": [\"cum_rel_pop100k\", \"diff_rel_pop100k_ma1w\"],\n \"active\": [\"cum_rel_popmio\", \"diff_rel_popmio_ma1w\"],\n }\n trsl = get_title_translation()\n categories = plots\n\n # Reading data from files produced by prepare_data\n data = get_group_data_to_show(date_, plots, groups, length)\n\n title_font_size = 30\n for group in groups:\n print_log(\n f\"Plotting group {group} with countries \"\n f\"{str.join(', ', groups[group])} ...\"\n )\n\n # Creating list of countries in group\n countries = groups[group]\n\n # Creating the figure which includes all plots\n fig_all, axs_all = plt.subplots(3, 2, figsize=(40, 50))\n fig_all.suptitle(group, fontsize=title_font_size, fontweight=\"bold\")\n for i, category in enumerate(categories):\n variants = plots[category]\n\n # Creating the figure for all plots per category\n fig_cat, axs_cat = plt.subplots(2, 1, figsize=(20, 25))\n fig_cat.suptitle(\n f\"{group} - {trsl[category]}\",\n fontsize=title_font_size,\n fontweight=\"bold\",\n )\n for j, variant in enumerate(variants):\n # Creating the figure for single plot (category and variant)\n fig, axs = plt.subplots(figsize=(25, 16))\n fig.suptitle(\n f\"{group} - {trsl[category]} - {trsl[variant]}\",\n fontsize=title_font_size,\n fontweight=\"bold\",\n )\n for ax in [axs, axs_cat[j], axs_all[i][j]]:\n ax.set_title(\n f\"{trsl[category]} - {trsl[variant]}\", fontsize=20\n )\n days = list(data[group][category][variant].index)\n setup_ax(ax, days)\n ax.plot(\n list(range(len(days))),\n data[group][category][variant][countries],\n \"o\",\n )\n ax.legend(countries)\n\n # Due to data corrections there are sometimes negative\n # diffs for confirmed cases, which should be always\n # non-negative. This can lead to distorted plots and is\n # therefore adjusted by setting the minimum value of the\n # y-axis to -10.\n if (\n category == \"confirmed\"\n and variant == \"diff_rel_popmio_ma1w\"\n ):\n ax.set_ylim(bottom=-10)\n\n # Saving the figure with single plot\n fig.align_labels()\n fig.savefig(\n get_plot_file_path(date_, group, category, variant)\n )\n\n # Saving the figure with all plots per category\n fig_cat.align_labels()\n fig_cat.savefig(get_plot_file_path(date_, group, category))\n\n # Saving the figure with all plots\n fig_all.align_labels()\n fig_all.savefig(get_plot_file_path(date_, group))\n plt.close(\"all\")\n\n print_log(\"Plotting finished\")\n\n\ndef show_countries_beyond_threshold(\n date_, category, variant, threshold, *countries\n):\n \"\"\"Creates a plot for the variable category -> variant for the group of\n countries. Here the plots are \"normalized\": The series starts with the\n day the variable first exceeds the threshold. I.e., the x-axis just\n shows the number of days (past exceeding the threshold), not calendar\n days.\n \"\"\"\n # Fetching the relevant data and loading it into a DataFrame\n plots = {category: [variant]}\n data = get_country_data_to_show(date_, plots, *countries)\n tbl = pd.DataFrame()\n for country in countries:\n tbl.insert(\n loc=len(tbl.columns),\n column=country,\n value=data[country][category][variant],\n )\n\n # Initializing the plot\n fig, ax = plt.subplots(figsize=(20, 7.5))\n\n # Processing the series into the plot\n for country in tbl.columns:\n series = tbl[country]\n series.index = np.arange(len(series.index))\n try:\n # Adjust the series: New series starts with the first day it\n # exceeds the threshold\n start = series[series > threshold].index[0]\n series_new = pd.Series(series[start:].values)\n series_new.name = series.name\n # Adding new series to plot\n ax.plot(series_new, \".\")\n finally:\n # If all values of the series are below the threshold the series\n # isn't included in the plot (obviously)\n pass\n\n # Setting up the plot\n ax.grid(which=\"major\", linestyle=\"dashed\", linewidth=1)\n trsl = get_title_translation()\n ax.set_title(\n f\"{trsl[category]} - {trsl[variant]}: \"\n f\"Days beyond threshold ({threshold})\"\n )\n ax.set_xlabel(f\"days\")\n ax.set_ylabel(f\"{category}\")\n ax.legend(countries)\n plt.show()\n plt.close(\"all\")\n" ]
[ [ "pandas.Series", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.close", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
wclark3/machine-learning
[ "f4f09d6d1efa022d9c34647883e49ae8e2f1fe6c" ]
[ "final-project/old/gpu-tutorial/kfkd.py" ]
[ "# file kfkd.py\nimport os\n\nimport numpy as np\nfrom pandas.io.parsers import read_csv\nfrom sklearn.utils import shuffle\n\n# Implicit import needed in newer theano versions\n# https://groups.google.com/forum/#!msg/lasagne-users/gEtFrC8mkms/0oNCDbSKbTkJ\nimport lasagne.layers.cuda_convnet\n\nFTRAIN = '../data/training.csv'\nFTEST = '../data/test.csv'\n\n\ndef load(test=False, cols=None):\n \"\"\"Loads data from FTEST if *test* is True, otherwise from FTRAIN.\n Pass a list of *cols* if you're only interested in a subset of the\n target columns.\n \"\"\"\n fname = FTEST if test else FTRAIN\n df = read_csv(os.path.expanduser(fname)) # load pandas dataframe\n\n # The Image column has pixel values separated by space; convert\n # the values to numpy arrays:\n df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))\n\n if cols: # get a subset of columns\n df = df[list(cols) + ['Image']]\n\n print(df.count()) # prints the number of values for each column\n df = df.dropna() # drop all rows that have missing values in them\n\n X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]\n X = X.astype(np.float32)\n\n if not test: # only FTRAIN has any target columns\n y = df[df.columns[:-1]].values\n y = (y - 48) / 48 # scale target coordinates to [-1, 1]\n X, y = shuffle(X, y, random_state=42) # shuffle train data\n y = y.astype(np.float32)\n else:\n y = None\n\n return X, y\n\n\nfrom lasagne import layers\nfrom lasagne.updates import nesterov_momentum\nfrom nolearn.lasagne import NeuralNet\n\nnet1 = NeuralNet(\n layers=[ # three layers: one hidden layer\n ('input', layers.InputLayer),\n ('hidden', layers.DenseLayer),\n ('output', layers.DenseLayer),\n ],\n # layer parameters:\n input_shape=(128, 9216), # 128 images per batch times 96x96 input pixels\n hidden_num_units=100, # number of units in hidden layer\n output_nonlinearity=None, # output layer uses identity function\n output_num_units=30, # 30 target values\n\n # optimization method:\n update=nesterov_momentum,\n update_learning_rate=0.01,\n update_momentum=0.9,\n\n regression=True, # flag to indicate we're dealing with regression problem\n max_epochs=400, # we want to train this many epochs\n verbose=1,\n )\n\nX, y = load()\nnet1.fit(X, y)\n\ndef load2d(test=False, cols=None):\n X, y = load(test=test)\n X = X.reshape(-1, 1, 96, 96)\n return X, y\n\n\n# use the cuda-convnet implementations of conv and max-pool layer\nConv2DLayer = layers.cuda_convnet.Conv2DCCLayer\nMaxPool2DLayer = layers.cuda_convnet.MaxPool2DCCLayer\n\nnet2 = NeuralNet(\n layers=[\n ('input', layers.InputLayer),\n ('conv1', layers.Conv2DLayer),\n ('pool1', layers.MaxPool2DLayer),\n ('conv2', layers.Conv2DLayer),\n ('pool2', layers.MaxPool2DLayer),\n ('conv3', layers.Conv2DLayer),\n ('pool3', layers.MaxPool2DLayer),\n ('hidden4', layers.DenseLayer),\n ('hidden5', layers.DenseLayer),\n ('output', layers.DenseLayer),\n ],\n input_shape=(None, 1, 96, 96),\n conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),\n conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),\n conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),\n hidden4_num_units=500, hidden5_num_units=500,\n output_num_units=30, output_nonlinearity=None,\n\n update_learning_rate=0.01,\n update_momentum=0.9,\n\n regression=True,\n max_epochs=1000,\n verbose=1,\n )\n\n#X, y = load2d() # load 2-d data\n#net2.fit(X, y)\n\n# Training for 1000 epochs will take a while. We'll pickle the\n# trained model so that we can load it back later:\nimport cPickle as pickle\nwith open('net2.pickle', 'wb') as f:\n pickle.dump(net2, f, -1)\n" ]
[ [ "sklearn.utils.shuffle", "numpy.fromstring", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nanfeng1101/DBQA
[ "56b0a320e4641f46e80db78039d4ca79e9037d7e" ]
[ "torch_models/qa_cnn.py" ]
[ "# -*- coding:utf-8 -*-\n__author__ = 'chenjun'\n\nimport torch\nimport torch.nn as nn\nfrom torch_models.layers import InteractLayer, BatchNormLayer, MLP, MLPDropout\n\n\nclass CNNModule(nn.Module):\n \"\"\"\n qa_cnn module.\n \"\"\"\n def __init__(self, feature_maps, filter_shape, pool_size, channels=1):\n \"\"\"\n qa_cnn module init.\n :param feature_maps: feature maps(filter_num) after convolution.\n :param filter_shape: filter shape for convolution.\n :param pool_size: pool size for max pooling.\n :param channels: in channels, default=1.\n \"\"\"\n super(CNNModule, self).__init__()\n self.cnn_layer = nn.Sequential(nn.Conv2d(channels, feature_maps, filter_shape),\n nn.ReLU(),\n nn.MaxPool2d(pool_size)\n )\n\n def forward(self, q_input, a_input):\n \"\"\"\n convolution + max_pool for q_input and a_input.\n :param q_input: q_input vec.\n :param a_input: a_inut vec.\n :return:\n \"\"\"\n q_out = self.cnn_layer(q_input)\n a_out = self.cnn_layer(a_input)\n return q_out, a_out\n\n\nclass InceptionModule(nn.Module):\n \"\"\"\n simple inception module.\n \"\"\"\n def __init__(self, img_h, img_w, filter_windows, filter_num):\n \"\"\"\n inception module init.\n :param img_h: sentence length\n :param img_w: embedding dim\n :param filter_windows: multi filter height\n :param filter_num: feature maps\n \"\"\"\n super(InceptionModule, self).__init__()\n self.img_h = img_h\n self.img_w = img_w\n self.filter_windows = filter_windows\n self.filter_num = filter_num\n self.num_feature_maps = len(filter_windows) * filter_num\n self.layers_num, self.filter_shapes, self.pool_sizes = self.param()\n for i, filter_shape, pool_size in zip(self.layers_num, self.filter_shapes, self.pool_sizes):\n self.add_module(name=\"cnn_layer_{}\".format(i), module=CNNModule(self.filter_num, filter_shape, pool_size))\n\n def param(self):\n \"\"\"\n get param(filter_shape and pool_size) for cnn module.\n :return:\n \"\"\"\n filter_shapes = []\n pool_sizes = []\n layers_num = []\n for i, filter_h in enumerate(self.filter_windows):\n filter_shapes.append((filter_h, self.img_w))\n pool_sizes.append((self.img_h - filter_h + 1, 1))\n layers_num.append(i)\n return layers_num, filter_shapes, pool_sizes\n\n def forward(self, q_input, a_input):\n \"\"\"\n concat outputs of multi-cnn-layer(conv+max_pool) with q_input vec and a_input vec.\n :param q_input: q_input vec\n :param a_input: a_input vec\n :return:\n \"\"\"\n q_output, a_output = [], []\n for cnn_layer in self.children():\n q_out, a_out = cnn_layer(q_input, a_input)\n q_output.append(q_out)\n a_output.append(a_out)\n q_vec = torch.cat(q_output, dim=1).view(-1, self.num_feature_maps) # batch * num_feature_maps\n a_vec = torch.cat(a_output, dim=1).view(-1, self.num_feature_maps)\n return q_vec, a_vec\n\n\nclass QACNNModel(nn.Module):\n \"\"\"\n cnn model for QA pair.\n \"\"\"\n def __init__(self, embedding, img_h, img_w, filter_windows, filter_num, n_in, n_hidden, n_out):\n \"\"\"\n model init.\n :param embedding: word embedding.\n :param img_h: sentence length.\n :param img_w: embedding dim.\n :param filter_windows: collection of filter height.\n :param filter_num: feature maps.\n :param n_in: input size for mlp\n :param n_hidden: hidden size for mlp\n :param n_out: out size for mlp\n \"\"\"\n super(QACNNModel, self).__init__()\n self.embedding = embedding\n self.img_h = img_h\n self.img_w = img_w\n self.filter_windows = filter_windows\n self.filter_num = filter_num\n self.input_size = n_in\n self.hidden_size = n_hidden\n self.out_size = n_out\n self.num_feature_maps = len(self.filter_windows) * self.filter_num\n self.inception_module_layers = InceptionModule(self.img_h, self.img_w, self.filter_windows, self.filter_num)\n self.interact_layer = InteractLayer(self.num_feature_maps, self.num_feature_maps, self.input_size)\n self.bn_layer = BatchNormLayer(self.input_size)\n self.mlp = MLPDropout(self.input_size, self.hidden_size, self.out_size)\n\n def forward(self, q_input, a_input, drop_rate):\n \"\"\"\n input -> embedding_layer -> multi_cnn_layer -> interact_layer -> batchnorm_layer -> mlp_layer\n :param q_input: question sentence vec\n :param a_input: answer sentence vec\n :param: drop_rate: dropout rate\n :return:\n \"\"\"\n q_input_emb = torch.unsqueeze(self.embedding(q_input), dim=1)\n a_input_emb = torch.unsqueeze(self.embedding(a_input), dim=1)\n q_vec, a_vec = self.inception_module_layers(q_input_emb, a_input_emb)\n qa_vec = self.interact_layer(q_vec, a_vec)\n bn_vec = self.bn_layer(qa_vec)\n prop, cate = self.mlp(bn_vec, drop_rate)\n return prop, cate\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
haifeng-jin/SMAC3
[ "999d04408f084ec085844255b592e485dba241dd" ]
[ "examples/rf.py" ]
[ "import logging\nimport os\nimport inspect\n\nimport numpy as np\nfrom sklearn.metrics import make_scorer\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.datasets import load_boston\n\nfrom smac.configspace import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import CategoricalHyperparameter, \\\n UniformFloatHyperparameter, UniformIntegerHyperparameter\n\nfrom smac.tae.execute_func import ExecuteTAFuncDict\nfrom smac.scenario.scenario import Scenario\nfrom smac.facade.smac_facade import SMAC\n\nboston = load_boston()\n\ndef rf_from_cfg(cfg, seed):\n \"\"\"\n Creates a random forest regressor from sklearn and fits the given data on it.\n This is the function-call we try to optimize. Chosen values are stored in\n the configuration (cfg).\n\n Parameters:\n -----------\n cfg: Configuration\n configuration chosen by smac\n seed: int or RandomState\n used to initialize the rf's random generator\n\n Returns:\n -----------\n np.mean(rmses): float\n mean of root mean square errors of random-forest test predictions\n per cv-fold\n \"\"\"\n rfr = RandomForestRegressor(\n n_estimators=cfg[\"num_trees\"],\n criterion=cfg[\"criterion\"],\n min_samples_split=cfg[\"min_samples_to_split\"],\n min_samples_leaf=cfg[\"min_samples_in_leaf\"],\n min_weight_fraction_leaf=cfg[\"min_weight_frac_leaf\"],\n max_features=cfg[\"max_features\"],\n max_leaf_nodes=cfg[\"max_leaf_nodes\"],\n bootstrap=cfg[\"do_bootstrapping\"],\n random_state=seed)\n\n def rmse(y, y_pred):\n return np.sqrt(np.mean((y_pred - y)**2))\n # Creating root mean square error for sklearns crossvalidation\n rmse_scorer = make_scorer(rmse, greater_is_better=False)\n score = cross_val_score(rfr, boston.data, boston.target, cv=11, scoring=rmse_scorer)\n return -1 * np.mean(score) # Because cross_validation sign-flips the score\n\n\nlogger = logging.getLogger(\"RF-example\")\nlogging.basicConfig(level=logging.INFO)\n#logging.basicConfig(level=logging.DEBUG) # Enable to show debug-output\nlogger.info(\"Running random forest example for SMAC. If you experience \"\n \"difficulties, try to decrease the memory-limit.\")\n\n# Build Configuration Space which defines all parameters and their ranges.\n# To illustrate different parameter types,\n# we use continuous, integer and categorical parameters.\ncs = ConfigurationSpace()\n\n# We can add single hyperparameters:\ndo_bootstrapping = CategoricalHyperparameter(\n \"do_bootstrapping\", [\"true\", \"false\"], default_value=\"true\")\ncs.add_hyperparameter(do_bootstrapping)\n\n# Or we can add multiple hyperparameters at once:\nnum_trees = UniformIntegerHyperparameter(\"num_trees\", 10, 50, default_value=10)\nmax_features = UniformIntegerHyperparameter(\"max_features\", 1, boston.data.shape[1], default_value=1)\nmin_weight_frac_leaf = UniformFloatHyperparameter(\"min_weight_frac_leaf\", 0.0, 0.5, default_value=0.0)\ncriterion = CategoricalHyperparameter(\"criterion\", [\"mse\", \"mae\"], default_value=\"mse\")\nmin_samples_to_split = UniformIntegerHyperparameter(\"min_samples_to_split\", 2, 20, default_value=2)\nmin_samples_in_leaf = UniformIntegerHyperparameter(\"min_samples_in_leaf\", 1, 20, default_value=1)\nmax_leaf_nodes = UniformIntegerHyperparameter(\"max_leaf_nodes\", 10, 1000, default_value=100)\n\ncs.add_hyperparameters([num_trees, min_weight_frac_leaf, criterion,\n max_features, min_samples_to_split, min_samples_in_leaf, max_leaf_nodes])\n\n# SMAC scenario oject\nscenario = Scenario({\"run_obj\": \"quality\", # we optimize quality (alternative runtime)\n \"runcount-limit\": 50, # maximum number of function evaluations\n \"cs\": cs, # configuration space\n \"deterministic\": \"true\",\n \"memory_limit\": 3072, # adapt this to reasonable value for your hardware\n })\n\n# To optimize, we pass the function to the SMAC-object\nsmac = SMAC(scenario=scenario, rng=np.random.RandomState(42),\n tae_runner=rf_from_cfg)\n\n# Example call of the function with default values\n# It returns: Status, Cost, Runtime, Additional Infos\ndef_value = smac.get_tae_runner().run(cs.get_default_configuration(), 1)[1]\nprint(\"Value for default configuration: %.2f\" % (def_value))\n\n# Start optimization\ntry:\n incumbent = smac.optimize()\nfinally:\n incumbent = smac.solver.incumbent\n\ninc_value = smac.get_tae_runner().run(incumbent, 1)[1]\nprint(\"Optimized Value: %.2f\" % (inc_value))\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "sklearn.cross_validation.cross_val_score", "numpy.mean", "sklearn.metrics.make_scorer", "sklearn.datasets.load_boston", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
igctty/pydy
[ "ea1b6902a6625e66d0f41d06e12bbd1e7096fdf9", "ea1b6902a6625e66d0f41d06e12bbd1e7096fdf9" ]
[ "src/dl_iris_data_processing.py", "src/dl_iris_trainer.py" ]
[ "# coding: UTF-8\n\nimport numpy as np\nfrom sklearn import datasets\nimport chainer\nfrom chainer import Variable,Chain\nimport chainer.links as L\nimport chainer.functions as F\nimport chainer.optimizers as O\n\n# データ読み込み\niris_data = datasets.load_iris()\n# print(iris_data)\n\n# データの取り出し\nx = iris_data.data.astype(np.float32) # iris 花の特徴を表す4種のデータ\nt = iris_data.target # 品種を表す数値\nn = t.size # 品種を表す数値のサイズ\n# print(x)\n# print(t)\n# print(n)\n\n# 教師データの準備\nt_matrix = np.zeros(3*n).reshape(n, 3).astype(np.float32)\nfor i in range(n):\n t_matrix[i,t[i]] = 1.0\n\n# print(t_matrix)\n\n# 訓練用データとテスト用データ 半分ずつ\nindexes = np.arange(n)\nindexes_training = indexes[indexes%2 != 0]\nindexes_test = indexes[indexes%2 == 0]\n\n# print(indexes)\n# print(indexes_training)\n# print(indexes_test)\n\nx_training = x[indexes_training, : ] # 訓練用 入力\nt_training = t_matrix[indexes_training, : ] # 訓練用 正解\nx_test = x[indexes_test, : ] # テスト用 入力\nt_test = t[indexes_test] # テスト用 正解\n\n# print(x_training)\n# print(x_test)\n# print(t_training)\n# print(t_test)\n\n# Variable に変換\nx_training_v = Variable(x_training)\nt_training_v = Variable(t_training)\nx_test_v = Variable(x_test)\n\nprint(x_training_v)\nprint(t_training_v)\nprint(x_test_v)\n", "# coding: UTF-8\n\nimport numpy as np\nfrom sklearn import datasets\nimport chainer\nfrom chainer import Variable,Chain\nfrom chainer.datasets import tuple_dataset\nfrom chainer import training,iterators\nfrom chainer.training import extensions\nimport chainer.links as L\nimport chainer.functions as F\nimport chainer.optimizers as O\n\n\n# データ読み込み\niris_data = datasets.load_iris()\n# print(iris_data)\n\n# データの取り出し\nx = iris_data.data.astype(np.float32) # iris 花の特徴を表す4種のデータ\nt = iris_data.target # 品種を表す数値\nn = t.size # 品種を表す数値のサイズ\n# print(x)\n# print(t)\n# print(n)\n\n# 教師データの準備\nt_matrix = np.zeros(3*n).reshape(n, 3).astype(np.float32)\nfor i in range(n):\n t_matrix[i,t[i]] = 1.0\n\n# print(t_matrix)\n\n# 訓練用データとテスト用データ 半分ずつ\nindexes = np.arange(n)\nindexes_training = indexes[indexes%2 != 0]\nindexes_test = indexes[indexes%2 == 0]\n\n# print(indexes)\n# print(indexes_training)\n# print(indexes_test)\n\nx_training = x[indexes_training, : ] # 訓練用 入力\nt_training = t_matrix[indexes_training, : ] # 訓練用 正解\nx_test = x[indexes_test, : ] # テスト用 入力\nt_test = t[indexes_test] # テスト用 正解\n\n# print(x_training)\n# print(x_test)\n# print(t_training)\n# print(t_test)\n\n# # Variable に変換\n# x_training_v = Variable(x_training)\n# t_training_v = Variable(t_training)\nx_test_v = Variable(x_test)\n# print(x_training_v)\n# print(t_training_v)\n# print(x_test_v)\n\n# trainer\ntrain = tuple_dataset.TupleDataset(x_training, t_training)\n\n# Chain\nclass IrisChain(Chain):\n def __init__(self):\n super(IrisChain, self).__init__(\n l1=L.Linear(4, 6),\n l2=L.Linear(6, 6),\n l3=L.Linear(6, 3),\n )\n\n def __call__(self, x, t):\n return F.mean_squared_error(self.predict(x), t)\n\n def predict(self,x):\n h1 = F.sigmoid(self.l1(x))\n h2 = F.sigmoid(self.l2(h1))\n h3 = self.l3(h2)\n return h3\n\n# model, optimizer\nmodel = IrisChain()\noptimizer = O.Adam()\noptimizer.setup(model)\n\n# learn\n# for i in range(10000):\n# model.cleargrads()\n# y_training_v = model.predict(x_training_v)\n#\n# # 損失関数:平均二乗誤差\n# loss = F.mean_squared_error(y_training_v, t_training_v)\n# loss.backward()\n#\n# # 重みの更新\n# optimizer.update()\ntrain_iter = iterators.SerialIterator(train, 30) #ミニバッチデータの数:30個\nupdater = training.StandardUpdater(train_iter, optimizer)\ntrainer = training.Trainer(updater, (5000, 'epoch'))\ntrainer.extend(extensions.ProgressBar())\ntrainer.run()\n\n\n# テスト\nmodel.cleargrads()\ny_test_v = model.predict(x_test_v)\ny_test = y_test_v.data\n\n# 正解数カウント\ncorrect = 0\nrowCount = y_test.shape[0] # y_test の要素数\nfor i in range(rowCount):\n maxIndex = np.argmax(y_test[i, :]) # np.argmax関数は最大の要素のインデックスを返す\n print(y_test[i, :], maxIndex)\n if maxIndex == t_test[i]:\n correct = correct+1\n\n# 正解率\nprint(\"Correct:\", correct, \"Total:\", rowCount, \"Accuracy:\", (correct/rowCount)*100, \"%\")\n\n" ]
[ [ "numpy.arange", "sklearn.datasets.load_iris", "numpy.zeros" ], [ "numpy.arange", "numpy.argmax", "sklearn.datasets.load_iris", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IceCreamWW/ESPnet-informed-se
[ "38471b7749b7df0fadeae500cf8a050ac66872d2", "38471b7749b7df0fadeae500cf8a050ac66872d2", "38471b7749b7df0fadeae500cf8a050ac66872d2", "38471b7749b7df0fadeae500cf8a050ac66872d2" ]
[ "espnet2/enh/espnet_enh_informed_model.py", "espnet2/iterators/multiple_iter_factory.py", "espnet/nets/pytorch_backend/transformer/attention.py", "espnet2/iterators/sequence_iter_factory.py" ]
[ "from distutils.version import LooseVersion\nfrom functools import reduce\nfrom itertools import permutations\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Tuple\n\nimport torch\nfrom torch_complex.tensor import ComplexTensor\nfrom typeguard import check_argument_types\n\nfrom espnet2.enh.informed_encoder.abs_informed_encoder import AbsInformedEncoder\nfrom espnet2.enh.encoder.abs_encoder import AbsEncoder\nfrom espnet2.enh.encoder.conv_encoder import ConvEncoder\nfrom espnet2.enh.fusion.abs_fusion import AbsFusion\nfrom espnet2.enh.separator.abs_separator import AbsSeparator\nfrom espnet2.enh.decoder.abs_decoder import AbsDecoder\nfrom espnet2.torch_utils.device_funcs import force_gatherable\nfrom espnet2.train.abs_espnet_model import AbsESPnetModel\nfrom espnet.nets.pytorch_backend.transformer.label_smoothing_loss import (\n LabelSmoothingLoss, # noqa: H301\n)\nfrom espnet.nets.pytorch_backend.nets_utils import make_pad_mask, make_non_pad_mask\nfrom einops import rearrange\nimport copy\n\nimport logging\nimport pdb\n\n\nis_torch_1_3_plus = LooseVersion(torch.__version__) >= LooseVersion(\"1.3.0\")\nALL_LOSS_TYPES = (\n # mse_loss(predicted_mask, target_label)\n \"mask_mse\",\n # mse_loss(enhanced_magnitude_spectrum, target_magnitude_spectrum)\n \"magnitude\",\n # mse_loss(enhanced_complex_spectrum, target_complex_spectrum)\n \"spectrum\",\n # log_mse_loss(enhanced_complex_spectrum, target_complex_spectrum)\n \"spectrum_log\",\n # si_snr(enhanced_waveform, target_waveform)\n \"si_snr\",\n)\nEPS = torch.finfo(torch.get_default_dtype()).eps\n\n\nclass ESPnetEnhancementInformedModel(AbsESPnetModel):\n \"\"\"Speech enhancement or separation Frontend model\"\"\"\n\n def __init__(\n self,\n informed_encoder: AbsInformedEncoder,\n fusion: AbsFusion,\n encoder: AbsEncoder,\n separator: AbsSeparator,\n decoder: AbsDecoder,\n stft_consistency: bool = False,\n loss_type: str = \"mask_mse\",\n mask_type: Optional[str] = None,\n component_loss: bool = False,\n informed_ali_mtl: float = 0.0,\n informed_ali_mtl_lastn: int = 2,\n informed_ali_mtl_sil_scale: float = 1,\n disable_enh_loss: bool = False,\n expand_informed: bool = False,\n mask_before_fusion: bool = True,\n detached_fusion: bool = False,\n multi_grained: bool = False,\n ):\n assert check_argument_types()\n\n super().__init__()\n\n self.informed_encoder = informed_encoder\n self.encoder = encoder\n self.fusion = fusion\n self.separator = separator\n self.decoder = decoder\n self.num_spk = separator.num_spk\n self.num_noise_type = getattr(self.separator, \"num_noise_type\", 1)\n self.component_loss = component_loss\n self.informed_ali_mtl = informed_ali_mtl\n self.informed_ali_mtl_lastn = informed_ali_mtl_lastn\n self.informed_ali_mtl_sil_scale = informed_ali_mtl_sil_scale\n self.disable_enh_loss = disable_enh_loss\n self.expand_informed = expand_informed\n self.mask_before_fusion = mask_before_fusion\n self.detached_fusion = detached_fusion\n self.multi_grained = multi_grained\n\n if loss_type != \"si_snr\" and isinstance(encoder, ConvEncoder):\n raise TypeError(f\"{loss_type} is not supported with {type(ConvEncoder)}\")\n\n # get mask type for TF-domain models (only used when loss_type=\"mask_*\")\n self.mask_type = mask_type.upper() if mask_type else None\n # get loss type for model training\n self.loss_type = loss_type\n # whether to compute the TF-domain loss while enforcing STFT consistency\n self.stft_consistency = stft_consistency\n\n if stft_consistency and loss_type in [\"mask_mse\", \"si_snr\"]:\n raise ValueError(\n f\"stft_consistency will not work when '{loss_type}' loss is used\"\n )\n\n assert self.loss_type in ALL_LOSS_TYPES, self.loss_type\n # for multi-channel signal\n self.ref_channel = getattr(self.separator, \"ref_channel\", -1)\n\n\n if self.informed_ali_mtl != 0:\n self.criterion_align = LabelSmoothingLoss(\n size=self.informed_encoder.input_size,\n padding_idx=-1,\n smoothing=0,\n normalize_length=True,\n scales={1:self.informed_ali_mtl_sil_scale},\n )\n\n \n def make_post_from_att(self, informed, informed_lengths, attn):\n bsz = informed.shape[0]\n vocab_size = self.informed_encoder.input_size\n M = torch.zeros(bsz, max(informed_lengths), vocab_size)\n for b in range(bsz):\n M[b,torch.arange(informed_lengths[b]),informed[b]] = 1\n M = M.to(attn.device)\n post = torch.bmm(attn, M)\n return post\n\n def make_ali_from_kaldi_ali(self, kaldi_ali, frame_len, frame_shift, real_lens):\n assert (160 / frame_shift) == (160 // frame_shift), f\"frame_shift {frame_shift} cannot be divided by 160\"\n repeats = 160 // frame_shift\n ali_real = make_non_pad_mask(real_lens).type(real_lens.dtype) # sil = 1, padding = -1\n\n # start from half of 15ms (offset the first frame to center), 16 samples per ms \n start = round((7.5 * 16 - (frame_len - frame_shift) / 2) / frame_shift)\n ali_real[:,start:kaldi_ali.shape[1] * repeats + start] = rearrange(kaldi_ali.unsqueeze(-1).repeat(1,1,repeats), 'b d r -> b (d r)')\n ali_real = ali_real.to(kaldi_ali.device)\n return ali_real\n\n\n @staticmethod\n def _create_mask_label(mix_spec, ref_spec, mask_type=\"IAM\"):\n \"\"\"Create mask label.\n\n Args:\n mix_spec: ComplexTensor(B, T, F)\n ref_spec: List[ComplexTensor(B, T, F), ...]\n mask_type: str\n Returns:\n labels: List[Tensor(B, T, F), ...] or List[ComplexTensor(B, T, F), ...]\n \"\"\"\n\n # Must be upper case\n assert mask_type in [\n \"IBM\",\n \"IRM\",\n \"IAM\",\n \"PSM\",\n \"NPSM\",\n \"PSM^2\",\n ], f\"mask type {mask_type} not supported\"\n mask_label = []\n for r in ref_spec:\n mask = None\n if mask_type == \"IBM\":\n flags = [abs(r) >= abs(n) for n in ref_spec]\n mask = reduce(lambda x, y: x * y, flags)\n mask = mask.int()\n elif mask_type == \"IRM\":\n # TODO(Wangyou): need to fix this,\n # as noise referecens are provided separately\n mask = abs(r) / (sum(([abs(n) for n in ref_spec])) + EPS)\n elif mask_type == \"IAM\":\n mask = abs(r) / (abs(mix_spec) + EPS)\n mask = mask.clamp(min=0, max=1)\n elif mask_type == \"PSM\" or mask_type == \"NPSM\":\n phase_r = r / (abs(r) + EPS)\n phase_mix = mix_spec / (abs(mix_spec) + EPS)\n # cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)\n cos_theta = (\n phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag\n )\n mask = (abs(r) / (abs(mix_spec) + EPS)) * cos_theta\n mask = (\n mask.clamp(min=0, max=1)\n if mask_type == \"NPSM\"\n else mask.clamp(min=-1, max=1)\n )\n elif mask_type == \"PSM^2\":\n # This is for training beamforming masks\n phase_r = r / (abs(r) + EPS)\n phase_mix = mix_spec / (abs(mix_spec) + EPS)\n # cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)\n cos_theta = (\n phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag\n )\n mask = (abs(r).pow(2) / (abs(mix_spec).pow(2) + EPS)) * cos_theta\n mask = mask.clamp(min=-1, max=1)\n assert mask is not None, f\"mask type {mask_type} not supported\"\n mask_label.append(mask)\n return mask_label\n\n def forward(\n self,\n speech_mix: torch.Tensor,\n informed: torch.Tensor,\n speech_mix_lengths: torch.Tensor = None,\n informed_lengths: torch.Tensor = None,\n informed_ali_ref: torch.Tensor = None,\n **kwargs,\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:\n \"\"\"Frontend + Encoder + Decoder + Calc loss\n\n Args:\n speech_mix: (Batch, samples) or (Batch, samples, channels)\n speech_ref: (Batch, num_speaker, samples)\n or (Batch, num_speaker, samples, channels)\n speech_mix_lengths: (Batch,), default None for chunk interator,\n because the chunk-iterator does not have the\n speech_lengths returned. see in\n espnet2/iterators/chunk_iter_factory.py\n \"\"\"\n # clean speech signal of each speaker\n speech_ref = [\n kwargs[\"speech_ref{}\".format(spk + 1)] for spk in range(self.num_spk)\n ]\n # (Batch, num_speaker, samples) or (Batch, num_speaker, samples, channels)\n speech_ref = torch.stack(speech_ref, dim=1)\n\n if \"noise_ref1\" in kwargs:\n # noise signal (optional, required when using\n # frontend models with beamformering)\n noise_ref = [\n kwargs[\"noise_ref{}\".format(n + 1)] for n in range(self.num_noise_type)\n ]\n # (Batch, num_noise_type, samples) or\n # (Batch, num_noise_type, samples, channels)\n noise_ref = torch.stack(noise_ref, dim=1)\n else:\n noise_ref = None\n\n # dereverberated (noisy) signal\n # (optional, only used for frontend models with WPE)\n if \"dereverb_ref1\" in kwargs:\n # noise signal (optional, required when using\n # frontend models with beamformering)\n dereverb_speech_ref = [\n kwargs[\"dereverb_ref{}\".format(n + 1)]\n for n in range(self.num_spk)\n if \"dereverb_ref{}\".format(n + 1) in kwargs\n ]\n assert len(dereverb_speech_ref) in (1, self.num_spk), len(\n dereverb_speech_ref\n )\n # (Batch, N, samples) or (Batch, N, samples, channels)\n dereverb_speech_ref = torch.stack(dereverb_speech_ref, dim=1)\n else:\n dereverb_speech_ref = None\n\n batch_size = speech_mix.shape[0]\n speech_lengths = (\n speech_mix_lengths\n if speech_mix_lengths is not None\n else torch.ones(batch_size).int().fill_(speech_mix.shape[1])\n )\n assert speech_lengths.dim() == 1, speech_lengths.shape\n # Check that batch_size is unified\n assert speech_mix.shape[0] == speech_ref.shape[0] == speech_lengths.shape[0], (\n speech_mix.shape,\n speech_ref.shape,\n speech_lengths.shape,\n )\n\n # for data-parallel\n speech_ref = speech_ref[:, :, : speech_lengths.max()]\n speech_mix = speech_mix[:, : speech_lengths.max()]\n\n\n loss, speech_pre, others, out_lengths, perm = self._compute_loss(\n speech_mix,\n informed,\n speech_lengths,\n informed_lengths,\n speech_ref,\n dereverb_speech_ref=dereverb_speech_ref,\n noise_ref=noise_ref,\n informed_ali_ref=informed_ali_ref\n )\n\n # raise RuntimeError(\"out of memory\")\n\n # add stats for logging\n if self.loss_type != \"si_snr\":\n if self.training:\n si_snr = None\n else:\n speech_pre = [self.decoder(ps, speech_lengths)[0] for ps in speech_pre]\n speech_ref = torch.unbind(speech_ref, dim=1)\n if speech_ref[0].dim() == 3:\n # For si_snr loss, only select one channel as the reference\n speech_ref = [sr[..., self.ref_channel] for sr in speech_ref]\n # compute si-snr loss\n si_snr_loss, perm = self._permutation_loss(\n speech_ref, speech_pre, self.si_snr_loss, perm=perm\n )\n si_snr = -si_snr_loss.detach()\n\n stats = dict(\n si_snr=si_snr,\n loss=loss.detach(),\n )\n else:\n stats = dict(si_snr=-loss.detach(), loss=loss.detach())\n\n\n # informed align CE loss\n if self.informed_ali_mtl > 0:\n assert informed_ali_ref is not None, \"informed align ref is not available\"\n assert isinstance(self.encoder, ConvEncoder), \"informed align mtl support only ConvEncoder\"\n assert (160 / self.encoder.stride) == (160 // self.encoder.stride), f\"encoder stride {self.encoder.stride} cannot be divided by 160\"\n\n repeats = 160 // self.encoder.stride\n speech_flens = (speech_mix_lengths - self.encoder.kernel_size) // self.encoder.stride + 1\n informed_ali_ref_re = make_non_pad_mask(speech_flens).type(speech_flens.dtype) * 2 - 1 # sil = 1, padding = -1\n\n # start from half of 15ms (offset the first frame to center), 16 samples per ms \n start = round((7.5 * 16 - (self.encoder.kernel_size - self.encoder.stride) / 2) / self.encoder.stride)\n informed_ali_ref_re[:,start:informed_ali_ref.shape[1] * repeats + start] = rearrange(informed_ali_ref.unsqueeze(-1).repeat(1,1,repeats), 'b d r -> b (d r)')\n informed_ali_ref_re = informed_ali_ref_re.to(informed_ali_ref.device)\n\n loss_align = 0\n for idx in range(-1, -1 - self.informed_ali_mtl_lastn, -1):\n post = self.make_post_from_att(informed, informed_lengths, self.fusion.encoders[idx].src_attn.attn[:,0,:,:])\n loss_align += self.criterion_align(post, informed_ali_ref_re)\n loss_align /= self.informed_ali_mtl_lastn\n\n stats[\"loss_align\"] = loss_align.detach()\n\n if self.disable_enh_loss:\n loss = loss_align\n stats[\"loss\"] = loss.detach()\n del stats[\"si_snr\"]\n else:\n loss += loss_align * self.informed_ali_mtl\n stats[\"loss\"] = loss.detach()\n\n\n # force_gatherable: to-device and to-tensor if scalar for DataParallel\n loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)\n return loss, stats, weight\n\n def _compute_loss(\n self,\n speech_mix,\n informed,\n speech_lengths,\n informed_lengths,\n speech_ref,\n dereverb_speech_ref=None,\n noise_ref=None,\n cal_loss=True,\n informed_ali_ref=None\n ):\n \"\"\"Compute loss according to self.loss_type.\n\n Args:\n speech_mix: (Batch, samples) or (Batch, samples, channels)\n speech_lengths: (Batch,), default None for chunk interator,\n because the chunk-iterator does not have the\n speech_lengths returned. see in\n espnet2/iterators/chunk_iter_factory.py\n speech_ref: (Batch, num_speaker, samples)\n or (Batch, num_speaker, samples, channels)\n dereverb_speech_ref: (Batch, N, samples)\n or (Batch, num_speaker, samples, channels)\n noise_ref: (Batch, num_noise_type, samples)\n or (Batch, num_speaker, samples, channels)\n cal_loss: whether to calculate enh loss, defualt is True\n\n Returns:\n loss: (torch.Tensor) speech enhancement loss\n speech_pre: (List[torch.Tensor] or List[ComplexTensor])\n enhanced speech or spectrum(s)\n others: (OrderedDict) estimated masks or None\n output_lengths: (Batch,)\n perm: () best permutation\n \"\"\"\n\n # pdb.set_trace()\n speech_feature_mix, speech_flens = self.encoder(speech_mix, speech_lengths)\n if self.expand_informed:\n informed = self.make_ali_from_kaldi_ali(informed_ali_ref, self.encoder.kernel_size, self.encoder.stride, speech_flens)\n informed_lengths = speech_flens\n\n informed_feature, informed_flens = self.informed_encoder(informed, informed_lengths)\n\n if self.detached_fusion:\n assert self.mask_before_fusion, \"detached fusion must work together with mask_before_fusion\"\n detached_speech_feature_mix = speech_feature_mix.detach()\n feature_mix, flens, _ = self.fusion(detached_speech_feature_mix, informed_feature, speech_flens, informed_flens)\n else:\n feature_mix, flens, _ = self.fusion(speech_feature_mix, informed_feature, speech_flens, informed_flens)\n\n if self.mask_before_fusion:\n feature_pre, flens, others = self.separator(feature_mix, flens, speech_feature_mix)\n else:\n feature_pre, flens, others = self.separator(feature_mix, flens)\n\n\n if self.loss_type != \"si_snr\":\n spectrum_mix = feature_mix\n spectrum_pre = feature_pre\n # predict separated speech and masks\n if self.stft_consistency:\n # pseudo STFT -> time-domain -> STFT (compute loss)\n tmp_t_domain = [\n self.decoder(sp, speech_lengths)[0] for sp in spectrum_pre\n ]\n spectrum_pre = [\n self.encoder(sp, speech_lengths)[0] for sp in tmp_t_domain\n ]\n pass\n\n if spectrum_pre is not None and not isinstance(\n spectrum_pre[0], ComplexTensor\n ):\n spectrum_pre = [\n ComplexTensor(*torch.unbind(sp, dim=-1)) for sp in spectrum_pre\n ]\n\n if not cal_loss:\n loss, perm = None, None\n return loss, spectrum_pre, others, flens, perm\n\n # prepare reference speech and reference spectrum\n speech_ref = torch.unbind(speech_ref, dim=1)\n # List[ComplexTensor(Batch, T, F)] or List[ComplexTensor(Batch, T, C, F)]\n spectrum_ref = [self.encoder(sr, speech_lengths)[0] for sr in speech_ref]\n\n\n # compute TF masking loss\n if self.loss_type == \"magnitude\":\n # compute loss on magnitude spectrum\n assert spectrum_pre is not None\n magnitude_pre = [abs(ps + 1e-15) for ps in spectrum_pre]\n if spectrum_ref[0].dim() > magnitude_pre[0].dim():\n # only select one channel as the reference\n magnitude_ref = [\n abs(sr[..., self.ref_channel, :]) for sr in spectrum_ref\n ]\n else:\n magnitude_ref = [abs(sr) for sr in spectrum_ref]\n\n tf_loss, perm = self._permutation_loss(\n magnitude_ref, magnitude_pre, self.tf_mse_loss\n )\n elif self.loss_type.startswith(\"spectrum\"):\n # compute loss on complex spectrum\n if self.loss_type == \"spectrum\":\n loss_func = self.tf_mse_loss\n elif self.loss_type == \"spectrum_log\":\n loss_func = self.tf_log_mse_loss\n else:\n raise ValueError(\"Unsupported loss type: %s\" % self.loss_type)\n\n assert spectrum_pre is not None\n if spectrum_ref[0].dim() > spectrum_pre[0].dim():\n # only select one channel as the reference\n spectrum_ref = [sr[..., self.ref_channel, :] for sr in spectrum_ref]\n\n tf_loss, perm = self._permutation_loss(\n spectrum_ref, spectrum_pre, loss_func\n )\n elif self.loss_type.startswith(\"mask\"):\n if self.loss_type == \"mask_mse\":\n loss_func = self.tf_mse_loss\n else:\n raise ValueError(\"Unsupported loss type: %s\" % self.loss_type)\n\n assert others is not None\n mask_pre_ = [\n others[\"mask_spk{}\".format(spk + 1)] for spk in range(self.num_spk)\n ]\n\n # prepare ideal masks\n mask_ref = self._create_mask_label(\n spectrum_mix, spectrum_ref, mask_type=self.mask_type\n )\n\n # compute TF masking loss\n tf_loss, perm = self._permutation_loss(mask_ref, mask_pre_, loss_func)\n\n if \"mask_dereverb1\" in others:\n if dereverb_speech_ref is None:\n raise ValueError(\n \"No dereverberated reference for training!\\n\"\n 'Please specify \"--use_dereverb_ref true\" in run.sh'\n )\n\n mask_wpe_pre = [\n others[\"mask_dereverb{}\".format(spk + 1)]\n for spk in range(self.num_spk)\n if \"mask_dereverb{}\".format(spk + 1) in others\n ]\n assert len(mask_wpe_pre) == dereverb_speech_ref.size(1), (\n len(mask_wpe_pre),\n dereverb_speech_ref.size(1),\n )\n dereverb_speech_ref = torch.unbind(dereverb_speech_ref, dim=1)\n dereverb_spectrum_ref = [\n self.encoder(dr, speech_lengths)[0]\n for dr in dereverb_speech_ref\n ]\n dereverb_mask_ref = self._create_mask_label(\n spectrum_mix, dereverb_spectrum_ref, mask_type=self.mask_type\n )\n\n tf_dereverb_loss, perm_d = self._permutation_loss(\n dereverb_mask_ref, mask_wpe_pre, loss_func\n )\n tf_loss = tf_loss + tf_dereverb_loss\n\n if \"mask_noise1\" in others:\n if noise_ref is None:\n raise ValueError(\n \"No noise reference for training!\\n\"\n 'Please specify \"--use_noise_ref true\" in run.sh'\n )\n\n noise_ref = torch.unbind(noise_ref, dim=1)\n noise_spectrum_ref = [\n self.encoder(nr, speech_lengths)[0] for nr in noise_ref\n ]\n noise_mask_ref = self._create_mask_label(\n spectrum_mix, noise_spectrum_ref, mask_type=self.mask_type\n )\n\n mask_noise_pre = [\n others[\"mask_noise{}\".format(n + 1)]\n for n in range(self.num_noise_type)\n ]\n tf_noise_loss, perm_n = self._permutation_loss(\n noise_mask_ref, mask_noise_pre, loss_func\n )\n tf_loss = tf_loss + tf_noise_loss\n else:\n raise ValueError(\"Unsupported loss type: %s\" % self.loss_type)\n\n loss = tf_loss\n return loss, spectrum_pre, others, flens, perm\n\n else:\n speech_pre = [self.decoder(ps, speech_lengths)[0] for ps in feature_pre]\n if not cal_loss:\n loss, perm = None, None\n return loss, speech_pre, None, speech_lengths, perm\n\n # speech_pre: list[(batch, sample)]\n assert speech_pre[0].dim() == 2, speech_pre[0].dim()\n\n if speech_ref.dim() == 4:\n # For si_snr loss of multi-channel input,\n # only select one channel as the reference\n speech_ref = speech_ref[..., self.ref_channel]\n speech_ref = torch.unbind(speech_ref, dim=1)\n\n # compute si-snr loss\n si_snr_loss, perm = self._permutation_loss(\n speech_ref, speech_pre, self.si_snr_loss_zeromean_multi_grained if (self.multi_grained and self.training) else self.si_snr_loss_zeromean\n )\n loss = si_snr_loss\n\n return loss, speech_pre, None, speech_lengths, perm\n\n @staticmethod\n def tf_mse_loss(ref, inf):\n \"\"\"time-frequency MSE loss.\n\n Args:\n ref: (Batch, T, F) or (Batch, T, C, F)\n inf: (Batch, T, F) or (Batch, T, C, F)\n Returns:\n loss: (Batch,)\n \"\"\"\n assert ref.shape == inf.shape, (ref.shape, inf.shape)\n if not is_torch_1_3_plus:\n # in case of binary masks\n ref = ref.type(inf.dtype)\n diff = ref - inf\n if isinstance(diff, ComplexTensor):\n mseloss = diff.real ** 2 + diff.imag ** 2\n else:\n mseloss = diff ** 2\n if ref.dim() == 3:\n mseloss = mseloss.mean(dim=[1, 2])\n elif ref.dim() == 4:\n mseloss = mseloss.mean(dim=[1, 2, 3])\n else:\n raise ValueError(\n \"Invalid input shape: ref={}, inf={}\".format(ref.shape, inf.shape)\n )\n\n return mseloss\n\n @staticmethod\n def tf_log_mse_loss(ref, inf):\n \"\"\"time-frequency log-MSE loss.\n\n Args:\n ref: (Batch, T, F) or (Batch, T, C, F)\n inf: (Batch, T, F) or (Batch, T, C, F)\n Returns:\n loss: (Batch,)\n \"\"\"\n assert ref.shape == inf.shape, (ref.shape, inf.shape)\n if not is_torch_1_3_plus:\n # in case of binary masks\n ref = ref.type(inf.dtype)\n diff = ref - inf\n if isinstance(diff, ComplexTensor):\n log_mse_loss = diff.real ** 2 + diff.imag ** 2\n else:\n log_mse_loss = diff ** 2\n if ref.dim() == 3:\n log_mse_loss = torch.log10(log_mse_loss.sum(dim=[1, 2])) * 10\n elif ref.dim() == 4:\n log_mse_loss = torch.log10(log_mse_loss.sum(dim=[1, 2, 3])) * 10\n else:\n raise ValueError(\n \"Invalid input shape: ref={}, inf={}\".format(ref.shape, inf.shape)\n )\n\n return log_mse_loss\n\n @staticmethod\n def tf_l1_loss(ref, inf):\n \"\"\"time-frequency L1 loss.\n\n Args:\n ref: (Batch, T, F) or (Batch, T, C, F)\n inf: (Batch, T, F) or (Batch, T, C, F)\n Returns:\n loss: (Batch,)\n \"\"\"\n assert ref.shape == inf.shape, (ref.shape, inf.shape)\n if not is_torch_1_3_plus:\n # in case of binary masks\n ref = ref.type(inf.dtype)\n if isinstance(inf, ComplexTensor):\n l1loss = abs(ref - inf + EPS)\n else:\n l1loss = abs(ref - inf)\n if ref.dim() == 3:\n l1loss = l1loss.mean(dim=[1, 2])\n elif ref.dim() == 4:\n l1loss = l1loss.mean(dim=[1, 2, 3])\n else:\n raise ValueError(\n \"Invalid input shape: ref={}, inf={}\".format(ref.shape, inf.shape)\n )\n return l1loss\n\n @staticmethod\n def si_snr_loss(ref, inf):\n \"\"\"SI-SNR loss\n\n Args:\n ref: (Batch, samples)\n inf: (Batch, samples)\n Returns:\n loss: (Batch,)\n \"\"\"\n ref = ref / torch.norm(ref, p=2, dim=1, keepdim=True)\n inf = inf / torch.norm(inf, p=2, dim=1, keepdim=True)\n\n s_target = (ref * inf).sum(dim=1, keepdims=True) * ref\n e_noise = inf - s_target\n\n si_snr = 20 * (\n torch.log10(torch.norm(s_target, p=2, dim=1).clamp(min=EPS))\n - torch.log10(torch.norm(e_noise, p=2, dim=1).clamp(min=EPS))\n )\n return -si_snr\n\n @staticmethod\n def si_snr_loss_zeromean_multi_grained(ref, inf):\n \"\"\"SI-SNR loss with zero-mean in pre-processing.\n\n Args:\n ref: (Batch, samples)\n inf: (Batch, samples)\n Returns:\n loss: (Batch,)\n \"\"\"\n # logging.info(\"applying multi grained si snr\")\n\n assert ref.size() == inf.size()\n B, T = ref.size()\n\n base = 1000\n pair_wise_si_snr = 0\n cnt = 1\n\n while True:\n if base > T:\n break\n for start in range(0, T, base):\n end = start + base\n pair_wise_si_snr += ESPnetEnhancementInformedModel.si_snr_loss_zeromean(ref[:, start:end], inf[:, start:end])\n cnt += 1\n base *= 4\n pair_wise_si_snr += ESPnetEnhancementInformedModel.si_snr_loss_zeromean(ref, inf)\n return pair_wise_si_snr / cnt\n\n @staticmethod\n def si_snr_loss_zeromean(ref, inf):\n \"\"\"SI-SNR loss with zero-mean in pre-processing.\n\n Args:\n ref: (Batch, samples)\n inf: (Batch, samples)\n Returns:\n loss: (Batch,)\n \"\"\"\n assert ref.size() == inf.size()\n B, T = ref.size()\n # mask padding position along T\n\n # Step 1. Zero-mean norm\n mean_target = torch.sum(ref, dim=1, keepdim=True) / T\n mean_estimate = torch.sum(inf, dim=1, keepdim=True) / T\n zero_mean_target = ref - mean_target\n zero_mean_estimate = inf - mean_estimate\n\n # Step 2. SI-SNR with order\n # reshape to use broadcast\n s_target = zero_mean_target # [B, T]\n s_estimate = zero_mean_estimate # [B, T]\n # s_target = <s', s>s / ||s||^2\n pair_wise_dot = torch.sum(s_estimate * s_target, dim=1, keepdim=True) # [B, 1]\n s_target_energy = torch.sum(s_target ** 2, dim=1, keepdim=True) + EPS # [B, 1]\n pair_wise_proj = pair_wise_dot * s_target / s_target_energy # [B, T]\n # e_noise = s' - s_target\n e_noise = s_estimate - pair_wise_proj # [B, T]\n\n # SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)\n pair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim=1) / (\n torch.sum(e_noise ** 2, dim=1) + EPS\n )\n # print('pair_si_snr',pair_wise_si_snr[0,:])\n pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + EPS) # [B]\n # print(pair_wise_si_snr)\n\n return -1 * pair_wise_si_snr\n\n @staticmethod\n def _permutation_loss(ref, inf, criterion, perm=None):\n \"\"\"The basic permutation loss function.\n\n Args:\n ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk\n inf (List[torch.Tensor]): [(batch, ...), ...]\n criterion (function): Loss function\n perm (torch.Tensor): specified permutation (batch, num_spk)\n Returns:\n loss (torch.Tensor): minimum loss with the best permutation (batch)\n perm (torch.Tensor): permutation for inf (batch, num_spk)\n e.g. tensor([[1, 0, 2], [0, 1, 2]])\n \"\"\"\n assert len(ref) == len(inf), (len(ref), len(inf))\n num_spk = len(ref)\n\n def pair_loss(permutation):\n return sum(\n [criterion(ref[s], inf[t]) for s, t in enumerate(permutation)]\n ) / len(permutation)\n\n if perm is None:\n device = ref[0].device\n all_permutations = list(permutations(range(num_spk)))\n losses = torch.stack([pair_loss(p) for p in all_permutations], dim=1)\n loss, perm = torch.min(losses, dim=1)\n perm = torch.index_select(\n torch.tensor(all_permutations, device=device, dtype=torch.long),\n 0,\n perm,\n )\n else:\n loss = torch.tensor(\n [\n torch.tensor(\n [\n criterion(\n ref[s][batch].unsqueeze(0), inf[t][batch].unsqueeze(0)\n )\n for s, t in enumerate(p)\n ]\n ).mean()\n for batch, p in enumerate(perm)\n ]\n )\n\n return loss.mean(), perm\n\n def collect_feats(\n self, speech_mix: torch.Tensor, speech_mix_lengths: torch.Tensor, **kwargs\n ) -> Dict[str, torch.Tensor]:\n # for data-parallel\n speech_mix = speech_mix[:, : speech_mix_lengths.max()]\n\n feats, feats_lengths = speech_mix, speech_mix_lengths\n return {\"feats\": feats, \"feats_lengths\": feats_lengths}\n", "import logging\nfrom typing import Callable\nfrom typing import Collection\nfrom typing import Iterator\n\nimport numpy as np\nfrom typeguard import check_argument_types\n\nfrom espnet2.iterators.abs_iter_factory import AbsIterFactory\n\n\nclass MultipleIterFactory(AbsIterFactory):\n def __init__(\n self,\n build_funcs: Collection[Callable[[], AbsIterFactory]],\n seed: int = 0,\n shuffle: bool = False,\n ):\n assert check_argument_types()\n self.build_funcs = list(build_funcs)\n self.seed = seed\n self.shuffle = shuffle\n\n def build_iter(self, epoch: int, shuffle: bool = None) -> Iterator:\n if shuffle is None:\n shuffle = self.shuffle\n self.dataset.iepoch = epoch\n\n build_funcs = list(self.build_funcs)\n\n if shuffle:\n np.random.RandomState(epoch + self.seed).shuffle(build_funcs)\n\n for i, build_func in enumerate(build_funcs):\n logging.info(f\"Building {i}th iter-factory...\")\n iter_factory = build_func()\n assert isinstance(iter_factory, AbsIterFactory), type(iter_factory)\n yield from iter_factory.build_iter(epoch, shuffle)\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Shigeki Karita\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Multi-Head Attention layer definition.\"\"\"\n\nimport math\n\nimport numpy\nimport torch\nfrom torch import nn\nimport pdb\n\n\nclass MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super(MultiHeadedAttention, self).__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout_rate)\n\n def forward_qkv(self, query, key, value):\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\n\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n\n return q, k, v\n\n def forward_attention(self, value, scores, mask):\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n if mask is not None:\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n min_value = float(\n numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min\n )\n scores = scores.masked_fill(mask, min_value)\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n else:\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(self.attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(self, query, key, value, mask):\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n return self.forward_attention(v, scores, mask)\n\n\nclass LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding (old version).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n Paper: https://arxiv.org/abs/1901.02860\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n self.zero_triu = zero_triu\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, head, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor.\n\n \"\"\"\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)\n\n if self.zero_triu:\n ones = torch.ones((x.size(2), x.size(3)))\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(self, query, key, value, pos_emb, mask):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, time1)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask)\n\n\nclass RelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding (new implementation).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n Paper: https://arxiv.org/abs/1901.02860\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n self.zero_triu = zero_triu\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).\n time1 means the length of query vector.\n\n Returns:\n torch.Tensor: Output tensor.\n\n \"\"\"\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)[\n :, :, :, : x.size(-1) // 2 + 1\n ] # only keep the positions from 0 to time2\n\n if self.zero_triu:\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(self, query, key, value, pos_emb, mask):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n pos_emb (torch.Tensor): Positional embedding tensor\n (#batch, 2*time1-1, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, 2*time1-1)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask)\n", "from typing import Any\nfrom typing import Sequence\nfrom typing import Union\n\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom typeguard import check_argument_types\n\nfrom espnet2.iterators.abs_iter_factory import AbsIterFactory\nfrom espnet2.samplers.abs_sampler import AbsSampler\n\n\nclass RawSampler(AbsSampler):\n def __init__(self, batches):\n self.batches = batches\n\n def __len__(self):\n return len(self.batches)\n\n def __iter__(self):\n return iter(self.batches)\n\n def generate(self, seed):\n return list(self.batches)\n\n\nclass SequenceIterFactory(AbsIterFactory):\n \"\"\"Build iterator for each epoch.\n\n This class simply creates pytorch DataLoader except for the following points:\n - The random seed is decided according to the number of epochs. This feature\n guarantees reproducibility when resuming from middle of training process.\n - Enable to restrict the number of samples for one epoch. This features\n controls the interval number between training and evaluation.\n\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batches: Union[AbsSampler, Sequence[Sequence[Any]]],\n num_iters_per_epoch: int = None,\n seed: int = 0,\n shuffle: bool = False,\n num_workers: int = 0,\n collate_fn=None,\n pin_memory: bool = False,\n ):\n assert check_argument_types()\n\n if not isinstance(batches, AbsSampler):\n self.sampler = RawSampler(batches)\n else:\n self.sampler = batches\n\n self.dataset = dataset\n self.num_iters_per_epoch = num_iters_per_epoch\n self.shuffle = shuffle\n self.seed = seed\n self.num_workers = num_workers\n self.collate_fn = collate_fn\n # https://discuss.pytorch.org/t/what-is-the-disadvantage-of-using-pin-memory/1702\n self.pin_memory = pin_memory\n\n def build_iter(self, epoch: int, shuffle: bool = None) -> DataLoader:\n if shuffle is None:\n shuffle = self.shuffle\n\n self.dataset.iepoch = epoch\n\n if self.num_iters_per_epoch is not None:\n N = len(self.sampler)\n # If corpus size is larger than the num_per_epoch\n if self.num_iters_per_epoch < N:\n N = len(self.sampler)\n real_epoch, offset = divmod(self.num_iters_per_epoch * epoch, N)\n\n if offset >= self.num_iters_per_epoch:\n current_batches = self.sampler.generate(real_epoch + self.seed)\n if shuffle:\n np.random.RandomState(real_epoch + self.seed).shuffle(\n current_batches\n )\n batches = current_batches[\n offset - self.num_iters_per_epoch : offset\n ]\n else:\n prev_batches = self.sampler.generate(real_epoch - 1 + self.seed)\n current_batches = self.sampler.generate(real_epoch + self.seed)\n if shuffle:\n np.random.RandomState(real_epoch - 1 + self.seed).shuffle(\n prev_batches\n )\n np.random.RandomState(real_epoch + self.seed).shuffle(\n current_batches\n )\n batches = (\n prev_batches[offset - self.num_iters_per_epoch :]\n + current_batches[:offset]\n )\n\n # If corpus size is less than the num_per_epoch\n else:\n _epoch, _cursor = divmod(self.num_iters_per_epoch * (epoch - 1), N)\n _remain = self.num_iters_per_epoch\n batches = []\n current_batches = self.sampler.generate(_epoch + self.seed)\n if shuffle:\n np.random.RandomState(_epoch + self.seed).shuffle(current_batches)\n while _remain > 0:\n\n _batches = current_batches[_cursor : _cursor + _remain]\n batches += _batches\n if _cursor + _remain >= N:\n _epoch += 1\n _cursor = 0\n current_batches = self.sampler.generate(_epoch + self.seed)\n if shuffle:\n np.random.RandomState(_epoch + self.seed).shuffle(\n current_batches\n )\n else:\n _cursor = _cursor + _remain\n _remain -= len(_batches)\n\n assert len(batches) == self.num_iters_per_epoch\n\n else:\n batches = self.sampler.generate(epoch + self.seed)\n if shuffle:\n np.random.RandomState(epoch + self.seed).shuffle(batches)\n\n # For backward compatibility for pytorch DataLoader\n if self.collate_fn is not None:\n kwargs = dict(collate_fn=self.collate_fn)\n else:\n kwargs = {}\n\n return DataLoader(\n dataset=self.dataset,\n batch_sampler=batches,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n **kwargs,\n )\n" ]
[ [ "torch.norm", "torch.ones", "torch.min", "torch.sum", "torch.arange", "torch.tensor", "torch.bmm", "torch.unbind", "torch.stack", "torch.get_default_dtype", "torch.log10" ], [ "numpy.random.RandomState" ], [ "torch.nn.Dropout", "torch.softmax", "torch.Tensor", "torch.cat", "torch.tensor", "torch.nn.Linear", "torch.matmul", "torch.nn.init.xavier_uniform_" ], [ "numpy.random.RandomState", "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adiyen/codeday_sf_obj_detection_flask
[ "7b134e454303a79377b7b7ec54ada9c8dccf3f7a" ]
[ "object_detection.py" ]
[ "import matplotlib; matplotlib.use(\"Agg\")\n\nfrom imageai.Detection import ObjectDetection\nimport os, glob\n\ndef runner():\n execution_path = os.getcwd()\n\n detector = ObjectDetection()\n detector.setModelTypeAsRetinaNet()\n detector.setModelPath(os.path.join(execution_path , \"resnet50_coco_best_v2.0.1.h5\"))\n detector.loadModel()\n\n\n path = \"static/pics/\"\n files = os.listdir(path)\n\n oldest = files[-1]\n print(oldest)\n detections = detector.detectObjectsFromImage(input_image = os.path.join(execution_path , f\"static/pics/{oldest}\"), output_image_path = os.path.join(f\"static/pics/{oldest}\"))\n\n for obj in detections:\n# # if eachObject[\"name\"] == wanted_item:\n# # print(\"Found what you were looking for!\")\n# # print(eachObject[\"name\"] , \" : \" , eachObject[\"percentage_probability\"])\n# # else:\n print(obj[\"name\"] , \" : \" , obj[\"percentage_probability\"])\n\nrunner()\n\n# from imageai.Detection import VideoObjectDetection\n# import os\n# import cv2\n\n# execution_path = os.getcwd()\n\n# camera = cv2.VideoCapture(0)\n\n\n# detector = VideoObjectDetection()\n# detector.setModelTypeAsYOLOv3()\n# detector.setModelPath(os.path.join(execution_path , \"yolo.h5\"))\n# detector.loadModel()\n\n# video_path = detector.detectObjectsFromVideo(camera_input=camera,\n# output_file_path=os.path.join(execution_path, \"camera_detected_video\")\n# , frames_per_second=2, log_progress=True, minimum_percentage_probability=30)\n\n# cv2.imshow(video_path)\n\n# print(video_path)\n# def runner():\n\n# counter = 0\n# while True:\n# ret, img = camera.read()\n# counter+=1\n# cv2.waitKey(100)\n# detections = detector.detectObjectsFromVideo(camera_input = camera)\n# for obj in detections:\n# # cv2.imwrite(\"pics/\" + str(name) + \".\" + str(counter) + \".jpg\", img)\n# cv2.waitKey(100)\n# # cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n\n# cv2.imshow(\"Face\", img)\n# # k = cv2.waitKey(1)\n# if counter > 50:\n# break\n\n# cam.release()\n# cv2.destroyAllWindows()\n\n# runner()" ]
[ [ "matplotlib.use" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tayuny/Chicago_Business_Prodictor
[ "c9076b93ddaacb619d4eefef830d3b7276174528", "c9076b93ddaacb619d4eefef830d3b7276174528" ]
[ "pipeline/evaluator.py", "pipeline/minmax_scaler.py" ]
[ "import numpy as np\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import recall_score\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.metrics import auc\r\nfrom sklearn.metrics import precision_recall_curve\r\nimport matplotlib.pyplot as plt\r\n\r\ndef compute_acc(y_true, y_scores, k):\r\n '''\r\n Compute accuracy score based on threshold\r\n :param pred_scores: (np array) an array of predicted score\r\n :param threshold: (float) the threshold of labeling predicted results\r\n :param y_test: test set\r\n\r\n :return: (float) an accuracy score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n\r\n return accuracy_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef compute_f1(y_true, y_scores, k):\r\n '''\r\n Compute f1 score based on threshold\r\n :param pred_scores: (np array) an array of predicted score\r\n :param threshold: (float) the threshold of labeling predicted results\r\n :param y_test: test set\r\n\r\n :return: (float) an f1 score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n\r\n return f1_score(y_true_sorted, preds_at_k)\r\n\r\ndef compute_auc_roc(y_true, y_scores, k):\r\n '''\r\n Compute area under Receiver Operator Characteristic Curve\r\n :param pred_scores: (np array) an array of predicted score\r\n :param threshold: (float) the threshold of labeling predicted results\r\n :param y_test: test set\r\n\r\n :return: (float) an auc_roc score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n\r\n return roc_auc_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef compute_auc(pred_scores, true_labels):\r\n '''\r\n Compute auc score\r\n :param pred_scores: an array of predicted scores\r\n :param true_labels: an array of true labels\r\n\r\n :return: area under curve score\r\n '''\r\n fpr, tpr, thresholds = roc_curve(true_labels, pred_scores, pos_label=2)\r\n return auc(fpr, tpr)\r\n\r\n\r\n# The following functions are referenced from:\r\n# https://github.com/rayidghani/magicloops/blob/master/mlfunctions.py\r\n\r\ndef joint_sort_descending(l1, l2):\r\n '''\r\n Sort two arrays together\r\n :param l1: numpy array\r\n :param l2: numpy array\r\n\r\n :return: two sorted arrays\r\n '''\r\n idx = np.argsort(l1)[::-1]\r\n return l1[idx], l2[idx]\r\n\r\n\r\ndef generate_binary_at_k(y_scores, k):\r\n '''\r\n predict labels based on thresholds\r\n :param y_scores: the predicted scores\r\n :param k: (int or float) threshold\r\n\r\n :return: predicted labels\r\n '''\r\n cutoff_index = int(len(y_scores) * (k / 100.0))\r\n predictions_binary = [1 if x < cutoff_index else 0 for x in range(len(y_scores))]\r\n return predictions_binary\r\n\r\n\r\ndef precision_at_k(y_true, y_scores, k):\r\n '''\r\n Compute precision based on threshold (percentage)\r\n :param y_true: the true labels\r\n :param y_scores: the predicted labels\r\n :param k: (int or float) the threshold\r\n\r\n :return: (float) precision score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n return precision_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef recall_at_k(y_true, y_scores, k):\r\n '''\r\n Compute recall based on threshold (percentage)\r\n :param y_true: the true labels\r\n :param y_scores: the predicted labels\r\n :param k: (int or float) the threshold\r\n\r\n :return: (float) recall score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n return recall_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef plot_precision_recall_n(y_true, y_prob, name, save_name, output_type):\r\n #pdb.set_trace() \r\n y_score = y_prob\r\n precision_curve, recall_curve, pr_thresholds = precision_recall_curve(y_true, y_score)\r\n precision_curve = precision_curve[:-1]\r\n recall_curve = recall_curve[:-1]\r\n pct_above_per_thresh = []\r\n number_scored = len(y_score)\r\n for value in pr_thresholds:\r\n num_above_thresh = len(y_score[y_score >= value])\r\n pct_above_thresh = num_above_thresh / float(number_scored)\r\n pct_above_per_thresh.append(pct_above_thresh)\r\n pct_above_per_thresh = np.array(pct_above_per_thresh)\r\n\r\n plt.clf()\r\n fig, ax1 = plt.subplots()\r\n ax1.plot(pct_above_per_thresh, precision_curve, 'b')\r\n ax1.set_xlabel('percent of population')\r\n ax1.set_ylabel('precision', color='b')\r\n ax2 = ax1.twinx()\r\n ax2.plot(pct_above_per_thresh, recall_curve, 'r')\r\n ax2.set_ylabel('recall', color='r')\r\n ax1.set_ylim([0, 1])\r\n ax1.set_ylim([0, 1])\r\n ax2.set_xlim([0, 1])\r\n\r\n plt.title(name)\r\n if (output_type == 'save'):\r\n plt.savefig(save_name)\r\n plt.close()\r\n elif (output_type == 'show'):\r\n plt.show()\r\n else:\r\n plt.show()\r\n\r\n\r\ndef plot_roc(name, save_name, probs, y_true, output_type):\r\n \r\n fpr, tpr, thresholds = roc_curve(y_true, probs)\r\n roc_auc = auc(fpr, tpr)\r\n plt.clf()\r\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\r\n plt.plot([0, 1], [0, 1], 'k--')\r\n plt.xlim([0.0, 1.05])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title(name)\r\n plt.legend(loc=\"lower right\")\r\n if (output_type == 'save'):\r\n plt.savefig(save_name, close=True)\r\n plt.close()\r\n elif (output_type == 'show'):\r\n plt.show()\r\n else:\r\n plt.show()\r\n\r\n", "import pandas as pd\r\nimport numpy as np\r\nimport gc\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\ndef min_max_transformation(train_df, test_df, continuous_columns):\r\n '''\r\n This function is used to perform min-max scaling for all variables in the\r\n training data, and transform both training and testing dataframe\r\n Inputs:\r\n train_df: training dataframe\r\n test_df: testing dataframe\r\n Returns: updated dataframes\r\n '''\r\n scaler = MinMaxScaler()\r\n train_df_cont = scaler.fit_transform(train_df[continuous_columns])\r\n test_df_cont = scaler.transform(test_df[continuous_columns])\r\n\r\n train_df = train_df.drop(continuous_columns, axis=1).reset_index(drop=True)\r\n test_df = test_df.drop(continuous_columns, axis=1).reset_index(drop=True)\r\n train_df = train_df.join(pd.DataFrame(data=train_df_cont, columns=continuous_columns))\r\n test_df = test_df.join(pd.DataFrame(data=test_df_cont,columns=continuous_columns))\r\n gc.collect()\r\n return train_df, test_df\r\n" ]
[ [ "sklearn.metrics.roc_auc_score", "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "sklearn.metrics.f1_score", "sklearn.metrics.precision_recall_curve", "matplotlib.pyplot.close", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "sklearn.metrics.precision_score", "sklearn.metrics.roc_curve", "matplotlib.pyplot.savefig", "numpy.argsort", "sklearn.metrics.auc", "numpy.array", "sklearn.metrics.recall_score", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "sklearn.metrics.accuracy_score" ], [ "sklearn.preprocessing.MinMaxScaler", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ysx001/IIC
[ "e72eb0833785e867ded0a9bac47ce1d1f9f47b4b" ]
[ "iic/code/utils/segmentation/segmentation_eval.py" ]
[ "from __future__ import print_function\n\nimport sys\nfrom datetime import datetime\n\nimport torch\n\nfrom code.utils.cluster.cluster_eval import cluster_subheads_eval\nfrom code.utils.cluster.transforms import sobel_process\n\n\ndef segmentation_eval(config, net,\n mapping_assignment_dataloader,\n mapping_test_dataloader,\n sobel, using_IR=False, verbose=0, return_only=False):\n torch.cuda.empty_cache()\n net.eval()\n\n stats_dict = cluster_subheads_eval(config, net,\n mapping_assignment_dataloader=mapping_assignment_dataloader,\n mapping_test_dataloader=mapping_test_dataloader,\n sobel=sobel,\n using_IR=using_IR,\n get_data_fn=_segmentation_get_data,\n verbose=verbose)\n\n net.train()\n\n acc = stats_dict[\"best\"]\n is_best = (len(config.epoch_acc) > 0) and (acc > max(config.epoch_acc))\n\n torch.cuda.empty_cache()\n\n if not return_only:\n config.epoch_stats.append(stats_dict)\n config.epoch_acc.append(acc)\n config.epoch_avg_subhead_acc.append(stats_dict[\"avg\"])\n\n return is_best\n else:\n return stats_dict\n\n\ndef _segmentation_get_data(config, net, dataloader, sobel=False,\n using_IR=False, verbose=0):\n # returns (vectorised) cuda tensors for flat preds and targets\n # sister of _clustering_get_data\n\n assert (config.output_k <= 255)\n\n num_batches = len(dataloader)\n num_samples = 0\n\n # upper bound, will be less for last batch\n samples_per_batch = config.batch_sz * config.input_sz * config.input_sz\n\n if verbose > 0:\n print(\"started _segmentation_get_data %s\" % datetime.now())\n sys.stdout.flush()\n\n # vectorised\n flat_predss_all = [torch.zeros((num_batches * samples_per_batch),\n dtype=torch.uint8).cpu() for _ in xrange(\n config.num_sub_heads)]\n flat_targets_all = torch.zeros((num_batches * samples_per_batch),\n dtype=torch.uint8).cpu()\n mask_all = torch.zeros((num_batches * samples_per_batch),\n dtype=torch.uint8).cpu()\n\n if verbose > 0:\n batch_start = datetime.now()\n all_start = batch_start\n print(\"starting batches %s\" % batch_start)\n\n for b_i, batch in enumerate(dataloader):\n\n imgs, flat_targets, mask = batch\n imgs = imgs.cpu()\n\n if sobel:\n imgs = sobel_process(imgs, config.include_rgb, using_IR=using_IR, cpu=True)\n\n with torch.no_grad():\n x_outs = net(imgs)\n\n assert (x_outs[0].shape[1] == config.output_k)\n assert (x_outs[0].shape[2] == config.input_sz and x_outs[0].shape[\n 3] == config.input_sz)\n\n # actual batch size\n actual_samples_curr = (\n flat_targets.shape[0] * config.input_sz * config.input_sz)\n num_samples += actual_samples_curr\n\n # vectorise: collapse from 2D to 1D\n start_i = b_i * samples_per_batch\n for i in xrange(config.num_sub_heads):\n x_outs_curr = x_outs[i]\n assert (not x_outs_curr.requires_grad)\n flat_preds_curr = torch.argmax(x_outs_curr, dim=1)\n flat_predss_all[i][\n start_i:(start_i + actual_samples_curr)] = flat_preds_curr.view(-1)\n\n flat_targets_all[\n start_i:(start_i + actual_samples_curr)] = flat_targets.view(-1)\n mask_all[start_i:(start_i + actual_samples_curr)] = mask.view(-1)\n\n if verbose > 0 and b_i < 3:\n batch_finish = datetime.now()\n print(\"finished batch %d, %s, took %s, of %d\" %\n (b_i, batch_finish, batch_finish - batch_start, num_batches))\n batch_start = batch_finish\n sys.stdout.flush()\n\n if verbose > 0:\n all_finish = datetime.now()\n print(\n \"finished all batches %s, took %s\" % (all_finish, all_finish - all_start))\n sys.stdout.flush()\n\n flat_predss_all = [flat_predss_all[i][:num_samples] for i in\n xrange(config.num_sub_heads)]\n flat_targets_all = flat_targets_all[:num_samples]\n mask_all = mask_all[:num_samples]\n\n flat_predss_all = [flat_predss_all[i].masked_select(mask=mask_all) for i in\n xrange(config.num_sub_heads)]\n flat_targets_all = flat_targets_all.masked_select(mask=mask_all)\n\n if verbose > 0:\n print(\"ended _segmentation_get_data %s\" % datetime.now())\n sys.stdout.flush()\n\n selected_samples = mask_all.sum()\n assert (len(flat_predss_all[0].shape) == 1 and\n len(flat_targets_all.shape) == 1)\n assert (flat_predss_all[0].shape[0] == selected_samples)\n assert (flat_targets_all.shape[0] == selected_samples)\n\n return flat_predss_all, flat_targets_all\n" ]
[ [ "torch.argmax", "torch.no_grad", "torch.cuda.empty_cache", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sa-mustafa/incubator-mxnet
[ "03654eeea3f3ab30dc43fabb7229945970a358b2" ]
[ "tests/python/unittest/test_gluon_data.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport tarfile\nimport unittest\nimport mxnet as mx\nimport numpy as np\nimport random\nfrom mxnet import gluon\nimport platform\nfrom common import setup_module, with_seed, teardown\nfrom mxnet.gluon.data import DataLoader\nimport mxnet.ndarray as nd\nfrom mxnet import context\nfrom mxnet.gluon.data.dataset import Dataset\nfrom mxnet.gluon.data.dataset import ArrayDataset\n\n@with_seed()\ndef test_array_dataset():\n X = np.random.uniform(size=(10, 20))\n Y = np.random.uniform(size=(10,))\n dataset = gluon.data.ArrayDataset(X, Y)\n loader = gluon.data.DataLoader(dataset, 2)\n for i, (x, y) in enumerate(loader):\n assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])\n assert mx.test_utils.almost_equal(y.asnumpy(), Y[i*2:(i+1)*2])\n\n dataset = gluon.data.ArrayDataset(X)\n loader = gluon.data.DataLoader(dataset, 2)\n\n for i, x in enumerate(loader):\n assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])\n\n\ndef prepare_record():\n if not os.path.isdir(\"data/test_images\"):\n os.makedirs('data/test_images')\n if not os.path.isdir(\"data/test_images/test_images\"):\n gluon.utils.download(\"http://data.mxnet.io/data/test_images.tar.gz\", \"data/test_images.tar.gz\")\n tarfile.open('data/test_images.tar.gz').extractall('data/test_images/')\n if not os.path.exists('data/test.rec'):\n imgs = os.listdir('data/test_images/test_images')\n record = mx.recordio.MXIndexedRecordIO('data/test.idx', 'data/test.rec', 'w')\n for i, img in enumerate(imgs):\n str_img = open('data/test_images/test_images/'+img, 'rb').read()\n s = mx.recordio.pack((0, i, i, 0), str_img)\n record.write_idx(i, s)\n return 'data/test.rec'\n\n\n@with_seed()\ndef test_recordimage_dataset():\n recfile = prepare_record()\n fn = lambda x, y : (x, y)\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(fn)\n loader = gluon.data.DataLoader(dataset, 1)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\ndef _dataset_transform_fn(x, y):\n \"\"\"Named transform function since lambda function cannot be pickled.\"\"\"\n return x, y\n\ndef _dataset_transform_first_fn(x):\n \"\"\"Named transform function since lambda function cannot be pickled.\"\"\"\n return x\n\n@with_seed()\ndef test_recordimage_dataset_with_data_loader_multiworker():\n recfile = prepare_record()\n dataset = gluon.data.vision.ImageRecordDataset(recfile)\n loader = gluon.data.DataLoader(dataset, 1, num_workers=5)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n # with transform\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(_dataset_transform_fn)\n loader = gluon.data.DataLoader(dataset, 1, num_workers=5)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n # with transform_first\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform_first(_dataset_transform_first_fn)\n loader = gluon.data.DataLoader(dataset, 1, num_workers=5)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n@with_seed()\ndef test_sampler():\n seq_sampler = gluon.data.SequentialSampler(10)\n assert list(seq_sampler) == list(range(10))\n rand_sampler = gluon.data.RandomSampler(10)\n assert sorted(list(rand_sampler)) == list(range(10))\n seq_batch_keep = gluon.data.BatchSampler(seq_sampler, 3, 'keep')\n assert sum(list(seq_batch_keep), []) == list(range(10))\n seq_batch_discard = gluon.data.BatchSampler(seq_sampler, 3, 'discard')\n assert sum(list(seq_batch_discard), []) == list(range(9))\n rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep')\n assert sorted(sum(list(rand_batch_keep), [])) == list(range(10))\n\n@with_seed()\ndef test_datasets():\n assert len(gluon.data.vision.MNIST(root='data/mnist')) == 60000\n assert len(gluon.data.vision.MNIST(root='data/mnist', train=False)) == 10000\n assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist')) == 60000\n assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist', train=False)) == 10000\n assert len(gluon.data.vision.CIFAR10(root='data/cifar10')) == 50000\n assert len(gluon.data.vision.CIFAR10(root='data/cifar10', train=False)) == 10000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100')) == 50000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100', fine_label=True)) == 50000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100', train=False)) == 10000\n\n@with_seed()\ndef test_image_folder_dataset():\n prepare_record()\n dataset = gluon.data.vision.ImageFolderDataset('data/test_images')\n assert dataset.synsets == ['test_images']\n assert len(dataset.items) == 16\n\n@with_seed()\ndef test_list_dataset():\n for num_worker in range(0, 3):\n data = mx.gluon.data.DataLoader([([1,2], 0), ([3, 4], 1)], batch_size=1, num_workers=num_worker)\n for d, l in data:\n pass\n\n\nclass Dataset(gluon.data.Dataset):\n def __len__(self):\n return 100\n def __getitem__(self, key):\n return mx.nd.full((10,), key)\n\n@with_seed()\ndef test_multi_worker():\n data = Dataset()\n for thread_pool in [True, False]:\n loader = gluon.data.DataLoader(data, batch_size=1, num_workers=5, thread_pool=thread_pool)\n for i, batch in enumerate(loader):\n assert (batch.asnumpy() == i).all()\n\n\n@with_seed()\ndef test_multi_worker_shape():\n for thread_pool in [True, False]:\n batch_size = 1024\n shape = (batch_size+1, 11, 12)\n\n data = ArrayDataset(np.ones(shape))\n loader = gluon.data.DataLoader(\n data, batch_size=batch_size, num_workers=5, last_batch='keep', thread_pool=thread_pool)\n for batch in loader:\n if shape[0] > batch_size:\n assert batch.shape == (batch_size, shape[1], shape[2])\n shape = (shape[0] - batch_size, shape[1], shape[2])\n else:\n assert batch.shape == shape\n\nclass _Dummy(Dataset):\n \"\"\"Dummy dataset for randomized shape arrays.\"\"\"\n def __init__(self, random_shape):\n self.random_shape = random_shape\n\n def __getitem__(self, idx):\n key = idx\n if self.random_shape:\n out = np.random.uniform(size=(random.randint(1000, 1100), 40))\n labels = np.random.uniform(size=(random.randint(10, 15)))\n else:\n out = np.random.uniform(size=(1000, 40))\n labels = np.random.uniform(size=(10))\n return key, out, labels\n\n def __len__(self):\n return 50\n\ndef _batchify_list(data):\n \"\"\"\n return list of ndarray without stack/concat/pad\n \"\"\"\n if isinstance(data, (tuple, list)):\n return list(data)\n if isinstance(data, mx.nd.NDArray):\n return [data]\n return data\n\ndef _batchify(data):\n \"\"\"\n Collate data into batch. Use shared memory for stacking.\n :param data: a list of array, with layout of 'NTC'.\n :return either x and x's unpadded lengths, or x, x's unpadded lengths, y and y's unpadded lengths\n if labels are not supplied.\n \"\"\"\n\n # input layout is NTC\n keys, inputs, labels = [item[0] for item in data], [item[1] for item in data], \\\n [item[2] for item in data]\n\n if len(data) > 1:\n max_data_len = max([seq.shape[0] for seq in inputs])\n max_labels_len = 0 if not labels else max([seq.shape[0] for seq in labels])\n else:\n max_data_len = inputs[0].shape[0]\n max_labels_len = 0 if not labels else labels[0].shape[0]\n\n x_lens = [item.shape[0] for item in inputs]\n y_lens = [item.shape[0] for item in labels]\n\n for i, seq in enumerate(inputs):\n pad_len = max_data_len - seq.shape[0]\n inputs[i] = np.pad(seq, ((0, pad_len), (0, 0)), 'constant', constant_values=0)\n labels[i] = np.pad(labels[i], (0, max_labels_len - labels[i].shape[0]),\n 'constant', constant_values=-1)\n\n inputs = np.asarray(inputs, dtype=np.float32)\n if labels is not None:\n labels = np.asarray(labels, dtype=np.float32)\n inputs = inputs.transpose((1, 0, 2))\n labels = labels.transpose((1, 0))\n\n return (nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),\n nd.array(x_lens, ctx=context.Context('cpu_shared', 0))) \\\n if labels is None else (\n nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),\n nd.array(x_lens, ctx=context.Context('cpu_shared', 0)),\n nd.array(labels, dtype=labels.dtype, ctx=context.Context('cpu_shared', 0)),\n nd.array(y_lens, ctx=context.Context('cpu_shared', 0)))\n\n@with_seed()\ndef test_multi_worker_forked_data_loader():\n data = _Dummy(False)\n loader = DataLoader(data, batch_size=40, batchify_fn=_batchify, num_workers=2)\n for epoch in range(1):\n for i, data in enumerate(loader):\n pass\n\n data = _Dummy(True)\n loader = DataLoader(data, batch_size=40, batchify_fn=_batchify_list, num_workers=2)\n for epoch in range(1):\n for i, data in enumerate(loader):\n pass\n\n@with_seed()\ndef test_multi_worker_dataloader_release_pool():\n # will trigger too many open file if pool is not released properly\n if os.name == 'nt':\n print('Skip for windows since spawn on windows is too expensive.')\n return\n\n from sys import platform\n if platform == 'darwin':\n print('Skip for MacOS due to https://github.com/apache/incubator-mxnet/issues/17782')\n return\n\n for _ in range(10):\n A = np.random.rand(999, 2000)\n D = mx.gluon.data.DataLoader(A, batch_size=8, num_workers=8)\n the_iter = iter(D)\n next(the_iter)\n del the_iter\n del D\n\n\ndef test_dataloader_context():\n X = np.random.uniform(size=(10, 20))\n dataset = gluon.data.ArrayDataset(X)\n default_dev_id = 0\n custom_dev_id = 1\n\n # use non-pinned memory\n loader1 = gluon.data.DataLoader(dataset, 8)\n for _, x in enumerate(loader1):\n assert x.context == context.cpu(default_dev_id)\n\n # use pinned memory with default device id\n loader2 = gluon.data.DataLoader(dataset, 8, pin_memory=True)\n for _, x in enumerate(loader2):\n assert x.context == context.cpu_pinned(default_dev_id)\n\n # use pinned memory with custom device id\n loader3 = gluon.data.DataLoader(dataset, 8, pin_memory=True,\n pin_device_id=custom_dev_id)\n for _, x in enumerate(loader3):\n assert x.context == context.cpu_pinned(custom_dev_id)\n\ndef batchify(a):\n return a\n\ndef test_dataset_filter():\n length = 100\n a = mx.gluon.data.SimpleDataset([i for i in range(length)])\n a_filtered = a.filter(lambda x: x % 10 == 0)\n assert(len(a_filtered) == 10)\n for idx, sample in enumerate(a_filtered):\n assert sample % 10 == 0\n a_xform_filtered = a.transform(lambda x: x + 1).filter(lambda x: x % 10 == 0)\n assert(len(a_xform_filtered) == 10)\n # the filtered data is already transformed\n for idx, sample in enumerate(a_xform_filtered):\n assert sample % 10 == 0\n\ndef test_dataset_shard():\n length = 9\n a = mx.gluon.data.SimpleDataset([i for i in range(length)])\n shard_0 = a.shard(4, 0)\n shard_1 = a.shard(4, 1)\n shard_2 = a.shard(4, 2)\n shard_3 = a.shard(4, 3)\n assert len(shard_0) + len(shard_1) + len(shard_2) + len(shard_3) == length\n assert len(shard_0) == 3\n assert len(shard_1) == 2\n assert len(shard_2) == 2\n assert len(shard_3) == 2\n total = 0\n for shard in [shard_0, shard_1, shard_2, shard_3]:\n for idx, sample in enumerate(shard):\n total += sample\n assert total == sum(a)\n\ndef test_dataset_take():\n length = 100\n a = mx.gluon.data.SimpleDataset([i for i in range(length)])\n a_take_full = a.take(1000)\n assert len(a_take_full) == length\n a_take_full = a.take(None)\n assert len(a_take_full) == length\n count = 10\n a_take_10 = a.take(count)\n assert len(a_take_10) == count\n expected_total = sum([i for i in range(count)])\n total = 0\n for idx, sample in enumerate(a_take_10):\n assert sample < count\n total += sample\n assert total == expected_total\n\n a_xform_take_10 = a.transform(lambda x: x * 10).take(count)\n assert len(a_xform_take_10) == count\n expected_total = sum([i * 10 for i in range(count)])\n total = 0\n for idx, sample in enumerate(a_xform_take_10):\n assert sample < count * 10\n total += sample\n assert total == expected_total\n\ndef test_dataloader_scope():\n \"\"\"\n Bug: Gluon DataLoader terminates the process pool early while\n _MultiWorkerIter is operating on the pool.\n\n Tests that DataLoader is not garbage collected while the iterator is\n in use.\n \"\"\"\n args = {'num_workers': 1, 'batch_size': 2}\n dataset = nd.ones(5)\n iterator = iter(DataLoader(\n dataset,\n batchify_fn=batchify,\n **args\n )\n )\n\n item = next(iterator)\n\n assert item is not None\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n" ]
[ [ "numpy.pad", "numpy.asarray", "numpy.ones", "numpy.random.rand", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
greyzor/dash-worldmap-metrics
[ "6895544edba0ccc0a00df9b88da0a3936d11c695" ]
[ "helpers.py" ]
[ "\"\"\"\nPrecomputing, App Layout (Layers/Markers), Callbacks helpers.\n\"\"\"\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n# import dash_colorscales\n# import cufflinks as cf\nimport pandas as pd\nimport numpy as np\nimport re\nimport json\nimport dash_dangerously_set_inner_html\n\nfrom conf import *\nfrom colors import (\n _color_from_bin,\n _opacity_from_bin,\n _border_color_from_bin\n)\n\n## helpers:\ndef _extract_lng(arr):\n \"\"\" Extract average longitude from array of geo-coordinates in format: (lng,lat)\"\"\"\n return np.mean([item[0] for item in arr[0]])\n\ndef _extract_lat(arr):\n \"\"\" Extract average latitude from array of geo-coordinates in format: (lng,lat)\"\"\"\n return np.mean([item[1] for item in arr[0]])\n\ndef create_country_geoloc_dataframe(source):\n \"\"\"Create geolocation Dataframe from source raw data.\n\n :param source: raw source data.\n :type source: dict.\n\n :returns: a geolocation DataFrame.\n :rtype: pd.DataFrame\n \"\"\"\n df_geo = pd.DataFrame(source['features'])\n df_geo['Country'] = df_geo['properties'].apply(lambda d: d['name'])\n\n ## For each polygon in MultiPolygon, explode into a new row.\n df_geo['geo_type'] = df_geo['geometry'].apply(lambda d: d['type'])\n df_geo['coord'] = df_geo['geometry'].apply(lambda d: d['coordinates'])\n df_geo = df_geo[['type','geo_type','Country','coord']]\n x = df_geo[df_geo.geo_type=='MultiPolygon']['coord'].apply(pd.Series)\n x = x.merge(df_geo, left_index=True, right_index=True).drop('coord',axis=1).melt(id_vars=['type', 'geo_type','Country'], value_name = \"coord\")\n x = x.dropna().sort_values(['Country','variable'])\n\n ## Merge new exploded MultiPolygon with non-exploded Polygon\n df_geo[df_geo.geo_type=='Polygon']['variable'] = 0\n df_geo = pd.concat([x,df_geo[df_geo.geo_type=='Polygon']]).sort_values(['Country','variable'])\n\n ## Compute geo-coordinates\n df_geo['lng'] = df_geo['coord'].apply(_extract_lng)\n df_geo['lat'] = df_geo['coord'].apply(_extract_lat)\n\n df_geo = df_geo[['Country','lng','lat']]\n return df_geo\n\ndef generate_random_country_partitions(source, scale=SCALE):\n \"\"\"Create random country partitions from source raw data.\n\n :param source: raw source data.\n :type source: dict.\n\n :returns: a partitions of countries.\n :rtype: dict of partitions, each key is the bin idx,\n each value is the list of countries for the bin.\n \"\"\"\n global N_BINS\n\n data = [(d['properties']['name'], d['id']) for d in source['features']]\n df = pd.DataFrame(data)\n df['count'] = (np.random.rand(df.shape[0])*scale).astype(int)\n # return df\n\n df['bin'] = (df['count']/N_BINS).astype(int)\n partitions = df.groupby('bin')[0].apply(list).to_json()\n partitions = json.loads(partitions)\n return partitions\n\ndef compute_country_airquality_scores(source, fpath='./data/air_quality_country.csv'):\n \"\"\"Compute Air Quality scores from source raw data.\n\n :param source: raw source data.\n :type source: dict.\n :param fpath: data file path.\n :type fpath: str\n\n :returns: a partitions of countries, and dataframe of scores\n :rtype: tuple (dict of partitions, pd.DataFrame of bin scores per country)\n \"\"\"\n ## Countries list\n all_countries = [d['properties']['name'] for d in source['features']]\n\n ## Air quality data\n df = pd.read_csv(fpath, sep=',')\n df_latest = df[df.Year==2015]\n\n ## Cleanify data\n mapping = {\n 'Guinea-Bissau': 'Guinea Bissau',\n \"Cote d'Ivoire\": 'Ivory Coast',\n 'Serbia': 'Republic of Serbia',\n 'Congo': 'Republic of the Congo',\n 'Russian Federation': 'Russia',\n 'Tanzania': 'United Republic of Tanzania',\n 'United States': 'United States of America',\n }\n def replace_country_name(x):\n \"\"\" Replace country name by its mapping using dict. \"\"\"\n if x in mapping.keys():\n return mapping[x]\n return x\n\n df_latest['Country'] = df_latest['Type'].apply(replace_country_name)\n\n ## Merge list of countries with Normalized Air quality estimate\n df_final = pd.DataFrame(all_countries, columns=['Country']).merge(df_latest[['Country', 'Exposure_Mean']])\n scaler = np.max(df_latest['Exposure_Mean'])*1.05\n df_final['Exposure_Norm'] = (100*df_final['Exposure_Mean']/scaler).astype(int)\n\n df_final['bin'] = (df_final['Exposure_Norm']/N_BINS).astype(int)\n partitions = df_final.groupby('bin')['Country'].apply(list).to_json()\n partitions = json.loads(partitions)\n return (partitions, df_final)\n\ndef build_mapbox_layers_for_countries(source, partitions, colors, layer_border_colors='white'):\n \"\"\"Build Mapbox layers struct.\n\n :param source: raw source data.\n :type source: dict.\n :param partitions: dict of partitions, key is bin, value is list of countries for bin.\n :type partitions: dict[list]\n :param colors: list of colors, one per layer.\n :type colors: list[str]\n :param layer_border_colors: borders color per layer.\n :type layer_border_colors: list[str] or str\n\n :returns: Mapbox layers inner struct.\n :rtype: list[dict], each dict being an inner map layer.\n \"\"\"\n first_symbol_id = None\n\n layers = []\n for _bin in partitions.keys():\n countries = partitions[_bin]\n\n _source = {}\n _source.setdefault('type', source['type'])\n _source['features'] = filter(\n lambda d: d['properties']['name'] in countries,\n source['features']\n )\n\n layer = dict(\n sourcetype='geojson',\n source=_source,\n type='fill',\n color=colors[int(_bin)],\n opacity=DEFAULT_OPACITY,\n # below=\"water\"\n # below=\"state-label-sm\",\n # below=\"mapbox\"\n )\n layers.append(layer)\n\n layer = dict(\n sourcetype='geojson',\n source=_source,\n type='line',\n color=layer_border_colors[int(_bin)],\n opacity=1.0,\n )\n layers.append(layer)\n\n return layers\n\ndef build_app_layout(app, data, layers, mapbox_access_token, default_style_value='custom'):\n \"\"\"Build Application Layout.\n\n :param app: dash app.\n :type app: dash.dash.Dash\n :param data: mapbox data inner struct.\n :type data: list[dict]\n :param layers: mapbox layers inner struct.\n :type layers: list[dict], each dict being an inner map layer.\n :param mapbox_access_token: mapbox access token.\n :type mapbox_access_token: str\n :param default_style_value: default style.\n :type default_style_value: str.\n\n :returns: app object with layout field updated.\n :rtype: dash.dash.Dash\n \"\"\"\n ## Main layout\n app.layout = html.Div(children=[\n\n html.Div([\n ## Header\n html.Div(\n [\n html.H4(\n 'World Map Metrics',\n style={'text-align':'center', 'display':'inline-block', 'margin':'20px 0px 20px 40px'}\n ),\n html.Div(\n # [\n # dash_dangerously_set_inner_html.DangerouslySetInnerHTML('''\n # <a class=\"github-button\" href=\"https://github.com/greyzor/dash-worldmap-metrics\" data-size=\"large\" data-show-count=\"true\" aria-label=\"Star greyzor/dash-worldmap-metrics on GitHub\">Star</a>\n # ''')\n # ],\n html.A(\n html.Button('Show on Github!'),\n href='https://github.com/greyzor/dash-worldmap-metrics',\n target='_blank'\n ),\n style={'float':'right', 'background-color':'white', 'margin':'20px 40px 20px 0px'},\n )\n ],\n style={'background-color':'#e51b79', 'color':'white'}\n ),\n\n ## Inputs and selection dropdowns\n html.Div(\n [\n html.Div(\n [\n dcc.Dropdown(\n id='metric-1-dropdown',\n options=[\n {'label': 'PM25 pollution exposure', 'value': 'PM25'},\n {'label': 'Other metric', 'value': 'OTHER'},\n ],\n value='PM25',\n ),\n ],\n className='three columns'\n ),\n html.Div(\n 'Exposure to PM25 air pollution for 2015, with data from: www.stateofglobalair.org',\n className='six columns',\n style={'font-weight':'bold', 'font-size':'16px'}\n ),\n html.Div(\n dcc.Dropdown(\n id='map-style-selector',\n options=[\n {'label': 'Style: Default', 'value': 'default'},\n {'label': 'Style: Custom', 'value': 'custom'},\n ],\n value=default_style_value,\n ),\n className='three columns'\n ),\n ],\n style={'background-color':'white', 'text-align':'center', 'padding':'1.5rem'},\n className='row'\n ),\n\n ## The Map\n dcc.Graph(\n id='world-map',\n figure=build_map_figure(\n data,\n None,\n mapbox_access_token,\n DEFAULT_COLORSCALE,\n map_style=VALUE_TO_MAPBOX_STYLE[default_style_value]\n ),\n style={'height':'80vh'}\n ),\n ], style={'height':'100%'}),\n ], className='twelve columns', style={'margin':0, 'height':'98vh', 'background-color':'white'})\n return app\n\ndef build_mapbox_geo_data(df_geo, text_col='description', markers=None):\n \"\"\"Build Mapbox geolocation inner data struct.\n\n :param df_geo: a geolocation DataFrame.\n :type df_geo: pd.DataFrame\n :param text_col: column name for text.\n :type text_col: str\n :param markers: markers to be displayed on map.\n :type markers: dict\n\n :returns: mapbox data inner struct.\n :rtype: list[dict]\n \"\"\"\n data = [\n dict(\n lat=df_geo['lat'],\n lon=df_geo['lng'],\n text=df_geo[text_col],\n type='scattermapbox',\n hoverinfo='text',\n selected = dict(marker = dict(opacity=1)),\n unselected = dict(marker = dict(opacity = 0)),\n # mode='markers+text',\n mode='markers+text',\n marker=markers,\n )\n ]\n return data\n\n\ndef build_map_figure(data, layers, mapbox_access_token, annot_colors, map_style='light'):\n \"\"\"Build Mapbox figure.\n\n :param data: mapbox data inner struct.\n :type data: list[dict]\n :param layers: mapbox layers inner struct.\n :type layers: list[dict], each dict being an inner map layer.\n :param mapbox_access_token: mapbox access token.\n :type mapbox_access_token: str\n :param annot_colors: annotation colors used to show a legend.\n :type annot_colors: list\n :param map_style: default map style.\n :type map_style: str.\n\n :returns: dash dcc.Graph figure field.\n :rtype: dict\n \"\"\"\n annotations = None\n if layers is not None and len(layers) > 0:\n annotations = [dict(\n showarrow=False,\n align='right',\n text='<b>PM25 level ranges:</b>',\n x=0.975,\n y=0.95,\n bgcolor='white'\n )]\n\n for k, color in enumerate(annot_colors):\n annotations.append(\n dict(\n arrowcolor = color,\n text='range: %s-%s'%(10*k, 10*(k+1)),\n x = 0.975,\n y = 0.90-0.3*k/N_BINS,\n ax = -90,\n ay = 0,\n arrowwidth=12,\n arrowhead=0,\n bgcolor = '#EFEFEE'\n )\n )\n\n return dict(\n data=data,\n layout=dict(\n mapbox=dict(\n layers=layers,\n accesstoken=mapbox_access_token,\n style=map_style,\n center=dict(\n lat=30, #38.72490,\n lon=-1.67571, #-95.61446,\n ),\n pitch=0,\n zoom=1.5,\n ),\n annotations=annotations,\n margin=dict(r=0, l=0, t=0, b=0),\n showlegend=False,\n # **{'height':'900px','min-height':'300px','max-height':'70vh'}\n )\n )\n\ndef build_app(app):\n \"\"\"From default dash.dash.Dash application, return build and customized app.\"\"\"\n ## load: source data\n with open('data/countries.geo.json') as f:\n source = json.load(f)\n\n df_geo = create_country_geoloc_dataframe(source)\n # partitions = generate_random_country_partitions(source, scale=SCALE)\n (partitions, df_scores) = compute_country_airquality_scores(source, fpath='./data/air_quality_country.csv')\n\n df_geo = df_geo.merge(df_scores[['Country','Exposure_Mean', 'bin']])\n df_geo['description'] = df_geo['Country']+': '+df_geo['Exposure_Mean'].astype(str)\n\n ## colors for markers (one per country), and borders (one color per layer)\n marker_colors = df_geo['bin'].apply(lambda idx: _color_from_bin(idx, N_BINS))\n layer_border_colors = [_border_color_from_bin(int(_bin), N_BINS) for _bin in partitions.keys()]\n\n markers = dict(\n size=25,\n color=marker_colors,\n # opacity=df_geo['bin'].apply(lambda idx: _opacity_from_bin(idx, N_BINS))\n opacity=1.\n )\n\n ## build: map data and layers\n layers = build_mapbox_layers_for_countries(\n source, partitions, DEFAULT_COLORSCALE,\n layer_border_colors=layer_border_colors\n )\n data = build_mapbox_geo_data(df_geo, text_col='description', markers=markers)\n\n ## build: layout\n app = build_app_layout(app, data, layers, MAPBOX_ACCESS_TOKEN, default_style_value='custom')\n\n ## styling: external\n app.css.append_css({'external_url': 'https://codepen.io/plotly/pen/EQZeaW.css'})\n\n ## callbacks\n def _change_map_style_callback(value):\n \"\"\" Callback to change map style, according to value.\"\"\"\n map_style = VALUE_TO_MAPBOX_STYLE[value]\n\n return build_map_figure(\n data,\n layers,\n MAPBOX_ACCESS_TOKEN,\n DEFAULT_COLORSCALE,\n map_style=map_style\n )\n\n app.callback(\n Output('world-map', 'figure'),\n [Input('map-style-selector', 'value')]\n )(_change_map_style_callback)\n\n return app" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.DataFrame", "numpy.max", "numpy.mean", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
MalikIdreesHasanKhan/NeMo
[ "984fd34921e81659c4594a22ab142311808b3bb7" ]
[ "nemo/collections/asr/models/clustering_diarizer.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\nimport pickle as pkl\nimport shutil\nimport tarfile\nimport tempfile\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom typing import List, Optional\n\nimport torch\nfrom omegaconf import DictConfig, OmegaConf\nfrom pytorch_lightning.utilities import rank_zero_only\nfrom tqdm import tqdm\n\nfrom nemo.collections.asr.models.classification_models import EncDecClassificationModel\nfrom nemo.collections.asr.models.label_models import EncDecSpeakerLabelModel\nfrom nemo.collections.asr.parts.mixins.mixins import DiarizationMixin\nfrom nemo.collections.asr.parts.utils.speaker_utils import (\n audio_rttm_map,\n get_uniqname_from_filepath,\n perform_clustering,\n score_labels,\n segments_manifest_to_subsegments_manifest,\n write_rttm2manifest,\n)\nfrom nemo.collections.asr.parts.utils.vad_utils import (\n generate_overlap_vad_seq,\n generate_vad_segment_table,\n get_vad_stream_status,\n prepare_manifest,\n)\nfrom nemo.core.classes import Model\nfrom nemo.utils import logging, model_utils\n\ntry:\n from torch.cuda.amp import autocast\nexcept ImportError:\n from contextlib import contextmanager\n\n @contextmanager\n def autocast(enabled=None):\n yield\n\n\n__all__ = ['ClusteringDiarizer']\n\n_MODEL_CONFIG_YAML = \"model_config.yaml\"\n_VAD_MODEL = \"vad_model.nemo\"\n_SPEAKER_MODEL = \"speaker_model.nemo\"\n\n\ndef get_available_model_names(class_name):\n \"lists available pretrained model names from NGC\"\n available_models = class_name.list_available_models()\n return list(map(lambda x: x.pretrained_model_name, available_models))\n\n\nclass ClusteringDiarizer(Model, DiarizationMixin):\n \"\"\"\n Inference model Class for offline speaker diarization. \n This class handles required functionality for diarization : Speech Activity Detection, Segmentation, \n Extract Embeddings, Clustering, Resegmentation and Scoring. \n All the parameters are passed through config file \n \"\"\"\n\n def __init__(self, cfg: DictConfig):\n cfg = model_utils.convert_model_config_to_dict_config(cfg)\n # Convert config to support Hydra 1.0+ instantiation\n cfg = model_utils.maybe_update_config_version(cfg)\n self._cfg = cfg\n\n # Diarizer set up\n self._diarizer_params = self._cfg.diarizer\n\n # init vad model\n self.has_vad_model = False\n if not self._diarizer_params.oracle_vad:\n if self._cfg.diarizer.vad.model_path is not None:\n self._vad_params = self._cfg.diarizer.vad.parameters\n self._init_vad_model()\n\n # init speaker model\n self._init_speaker_model()\n self._speaker_params = self._cfg.diarizer.speaker_embeddings.parameters\n self._speaker_dir = os.path.join(self._diarizer_params.out_dir, 'speaker_outputs')\n shutil.rmtree(self._speaker_dir, ignore_errors=True)\n os.makedirs(self._speaker_dir)\n\n # Clustering params\n self._cluster_params = self._diarizer_params.clustering.parameters\n\n self._device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n @classmethod\n def list_available_models(cls):\n pass\n\n def _init_vad_model(self):\n \"\"\"\n Initialize vad model with model name or path passed through config\n \"\"\"\n model_path = self._cfg.diarizer.vad.model_path\n if model_path.endswith('.nemo'):\n self._vad_model = EncDecClassificationModel.restore_from(model_path)\n logging.info(\"VAD model loaded locally from {}\".format(model_path))\n else:\n if model_path not in get_available_model_names(EncDecClassificationModel):\n logging.warning(\n \"requested {} model name not available in pretrained models, instead\".format(model_path)\n )\n model_path = \"vad_telephony_marblenet\"\n logging.info(\"Loading pretrained {} model from NGC\".format(model_path))\n self._vad_model = EncDecClassificationModel.from_pretrained(model_name=model_path)\n\n self._vad_window_length_in_sec = self._vad_params.window_length_in_sec\n self._vad_shift_length_in_sec = self._vad_params.shift_length_in_sec\n self.has_vad_model = True\n\n def _init_speaker_model(self):\n \"\"\"\n Initialize speaker embedding model with model name or path passed through config\n \"\"\"\n model_path = self._cfg.diarizer.speaker_embeddings.model_path\n if model_path is not None and model_path.endswith('.nemo'):\n self._speaker_model = EncDecSpeakerLabelModel.restore_from(model_path)\n logging.info(\"Speaker Model restored locally from {}\".format(model_path))\n elif model_path.endswith('.ckpt'):\n self._speaker_model = EncDecSpeakerLabelModel.load_from_checkpoint(model_path)\n logging.info(\"Speaker Model restored locally from {}\".format(model_path))\n else:\n if model_path not in get_available_model_names(EncDecSpeakerLabelModel):\n logging.warning(\n \"requested {} model name not available in pretrained models, instead\".format(model_path)\n )\n model_path = \"ecapa_tdnn\"\n logging.info(\"Loading pretrained {} model from NGC\".format(model_path))\n self._speaker_model = EncDecSpeakerLabelModel.from_pretrained(model_name=model_path)\n\n def _setup_vad_test_data(self, manifest_vad_input):\n vad_dl_config = {\n 'manifest_filepath': manifest_vad_input,\n 'sample_rate': self._cfg.sample_rate,\n 'batch_size': self._cfg.get('batch_size'),\n 'vad_stream': True,\n 'labels': ['infer',],\n 'time_length': self._vad_window_length_in_sec,\n 'shift_length': self._vad_shift_length_in_sec,\n 'trim_silence': False,\n 'num_workers': self._cfg.num_workers,\n }\n self._vad_model.setup_test_data(test_data_config=vad_dl_config)\n\n def _setup_spkr_test_data(self, manifest_file):\n spk_dl_config = {\n 'manifest_filepath': manifest_file,\n 'sample_rate': self._cfg.sample_rate,\n 'batch_size': self._cfg.get('batch_size'),\n 'time_length': self._speaker_params.window_length_in_sec,\n 'shift_length': self._speaker_params.shift_length_in_sec,\n 'trim_silence': False,\n 'labels': None,\n 'task': \"diarization\",\n 'num_workers': self._cfg.num_workers,\n }\n self._speaker_model.setup_test_data(spk_dl_config)\n\n def _run_vad(self, manifest_file):\n \"\"\"\n Run voice activity detection. \n Get log probability of voice activity detection and smoothes using the post processing parameters. \n Using generated frame level predictions generated manifest file for later speaker embedding extraction.\n input:\n manifest_file (str) : Manifest file containing path to audio file and label as infer\n\n \"\"\"\n\n shutil.rmtree(self._vad_dir, ignore_errors=True)\n os.makedirs(self._vad_dir)\n\n self._vad_model = self._vad_model.to(self._device)\n self._vad_model.eval()\n\n time_unit = int(self._vad_window_length_in_sec / self._vad_shift_length_in_sec)\n trunc = int(time_unit / 2)\n trunc_l = time_unit - trunc\n all_len = 0\n data = []\n for line in open(manifest_file, 'r'):\n file = json.loads(line)['audio_filepath']\n data.append(get_uniqname_from_filepath(file))\n\n status = get_vad_stream_status(data)\n for i, test_batch in enumerate(tqdm(self._vad_model.test_dataloader())):\n test_batch = [x.to(self._device) for x in test_batch]\n with autocast():\n log_probs = self._vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1])\n probs = torch.softmax(log_probs, dim=-1)\n pred = probs[:, 1]\n if status[i] == 'start':\n to_save = pred[:-trunc]\n elif status[i] == 'next':\n to_save = pred[trunc:-trunc_l]\n elif status[i] == 'end':\n to_save = pred[trunc_l:]\n else:\n to_save = pred\n all_len += len(to_save)\n outpath = os.path.join(self._vad_dir, data[i] + \".frame\")\n with open(outpath, \"a\") as fout:\n for f in range(len(to_save)):\n fout.write('{0:0.4f}\\n'.format(to_save[f]))\n del test_batch\n if status[i] == 'end' or status[i] == 'single':\n all_len = 0\n\n if not self._vad_params.smoothing:\n # Shift the window by 10ms to generate the frame and use the prediction of the window to represent the label for the frame;\n self.vad_pred_dir = self._vad_dir\n else:\n # Generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.\n # smoothing_method would be either in majority vote (median) or average (mean)\n logging.info(\"Generating predictions with overlapping input segments\")\n smoothing_pred_dir = generate_overlap_vad_seq(\n frame_pred_dir=self._vad_dir,\n smoothing_method=self._vad_params.smoothing,\n overlap=self._vad_params.overlap,\n seg_len=self._vad_window_length_in_sec,\n shift_len=self._vad_shift_length_in_sec,\n num_workers=self._cfg.num_workers,\n )\n self.vad_pred_dir = smoothing_pred_dir\n\n logging.info(\"Converting frame level prediction to speech/no-speech segment in start and end times format.\")\n\n table_out_dir = generate_vad_segment_table(\n vad_pred_dir=self.vad_pred_dir,\n postprocessing_params=self._vad_params,\n shift_len=self._vad_shift_length_in_sec,\n num_workers=self._cfg.num_workers,\n )\n AUDIO_VAD_RTTM_MAP = deepcopy(self.AUDIO_RTTM_MAP.copy())\n for key in AUDIO_VAD_RTTM_MAP:\n AUDIO_VAD_RTTM_MAP[key]['rttm_filepath'] = os.path.join(table_out_dir, key + \".txt\")\n\n write_rttm2manifest(AUDIO_VAD_RTTM_MAP, self._vad_out_file)\n self._speaker_manifest_path = self._vad_out_file\n\n def _run_segmentation(self):\n\n self.subsegments_manifest_path = os.path.join(self._speaker_dir, 'subsegments.json')\n self.subsegments_manifest_path = segments_manifest_to_subsegments_manifest(\n segments_manifest_file=self._speaker_manifest_path,\n subsegments_manifest_file=self.subsegments_manifest_path,\n window=self._speaker_params.window_length_in_sec,\n shift=self._speaker_params.shift_length_in_sec,\n )\n\n return None\n\n def _perform_speech_activity_detection(self):\n \"\"\"\n Checks for type of speech activity detection from config. Choices are NeMo VAD,\n external vad manifest and oracle VAD (generates speech activity labels from provided RTTM files)\n \"\"\"\n if self.has_vad_model:\n self._dont_auto_split = False\n self._split_duration = 50\n manifest_vad_input = self._diarizer_params.manifest_filepath\n\n if not self._dont_auto_split:\n logging.info(\"Split long audio file to avoid CUDA memory issue\")\n logging.debug(\"Try smaller split_duration if you still have CUDA memory issue\")\n config = {\n 'manifest_filepath': manifest_vad_input,\n 'time_length': self._vad_window_length_in_sec,\n 'split_duration': self._split_duration,\n 'num_workers': self._cfg.num_workers,\n }\n manifest_vad_input = prepare_manifest(config)\n else:\n logging.warning(\n \"If you encounter CUDA memory issue, try splitting manifest entry by split_duration to avoid it.\"\n )\n\n self._setup_vad_test_data(manifest_vad_input)\n self._run_vad(manifest_vad_input)\n\n elif self._diarizer_params.vad.external_vad_manifest is not None:\n self._speaker_manifest_path = self._diarizer_params.vad.external_vad_manifest\n elif self._diarizer_params.oracle_vad:\n self._speaker_manifest_path = os.path.join(self._speaker_dir, 'oracle_vad_manifest.json')\n self._speaker_manifest_path = write_rttm2manifest(self.AUDIO_RTTM_MAP, self._speaker_manifest_path)\n else:\n raise ValueError(\n \"Only one of diarizer.oracle_vad, vad.model_path or vad.external_vad_manifest must be passed\"\n )\n\n def _extract_embeddings(self, manifest_file):\n \"\"\"\n This method extracts speaker embeddings from segments passed through manifest_file\n Optionally you may save the intermediate speaker embeddings for debugging or any use. \n \"\"\"\n logging.info(\"Extracting embeddings for Diarization\")\n self._setup_spkr_test_data(manifest_file)\n self.embeddings = defaultdict(list)\n self._speaker_model = self._speaker_model.to(self._device)\n self._speaker_model.eval()\n self.time_stamps = {}\n\n all_embs = []\n for test_batch in tqdm(self._speaker_model.test_dataloader()):\n test_batch = [x.to(self._device) for x in test_batch]\n audio_signal, audio_signal_len, labels, slices = test_batch\n with autocast():\n _, embs = self._speaker_model.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n emb_shape = embs.shape[-1]\n embs = embs.view(-1, emb_shape)\n all_embs.extend(embs.cpu().detach().numpy())\n del test_batch\n\n with open(manifest_file, 'r') as manifest:\n for i, line in enumerate(manifest.readlines()):\n line = line.strip()\n dic = json.loads(line)\n uniq_name = get_uniqname_from_filepath(dic['audio_filepath'])\n self.embeddings[uniq_name].extend([all_embs[i]])\n if uniq_name not in self.time_stamps:\n self.time_stamps[uniq_name] = []\n start = dic['offset']\n end = start + dic['duration']\n stamp = '{:.3f} {:.3f} '.format(start, end)\n self.time_stamps[uniq_name].append(stamp)\n\n if self._speaker_params.save_embeddings:\n embedding_dir = os.path.join(self._speaker_dir, 'embeddings')\n if not os.path.exists(embedding_dir):\n os.makedirs(embedding_dir, exist_ok=True)\n\n prefix = get_uniqname_from_filepath(manifest_file)\n\n name = os.path.join(embedding_dir, prefix)\n self._embeddings_file = name + '_embeddings.pkl'\n pkl.dump(self.embeddings, open(self._embeddings_file, 'wb'))\n logging.info(\"Saved embedding files to {}\".format(embedding_dir))\n\n def path2audio_files_to_manifest(self, paths2audio_files, manifest_filepath):\n with open(manifest_filepath, 'w') as fp:\n for audio_file in paths2audio_files:\n audio_file = audio_file.strip()\n entry = {'audio_filepath': audio_file, 'offset': 0.0, 'duration': None, 'text': '-', 'label': 'infer'}\n fp.write(json.dumps(entry) + '\\n')\n\n def diarize(self, paths2audio_files: List[str] = None, batch_size: int = 0):\n \"\"\"\n Diarize files provided thorugh paths2audio_files or manifest file\n input:\n paths2audio_files (List[str]): list of paths to file containing audio file\n batch_size (int): batch_size considered for extraction of speaker embeddings and VAD computation\n \"\"\"\n\n self._out_dir = self._diarizer_params.out_dir\n if not os.path.exists(self._out_dir):\n os.mkdir(self._out_dir)\n\n self._vad_dir = os.path.join(self._out_dir, 'vad_outputs')\n self._vad_out_file = os.path.join(self._vad_dir, \"vad_out.json\")\n\n if batch_size:\n self._cfg.batch_size = batch_size\n\n if paths2audio_files:\n if type(paths2audio_files) is list:\n self._diarizer_params.manifest_filepath = os.path.json(self._out_dir, 'paths2audio_filepath.json')\n self.path2audio_files_to_manifest(paths2audio_files, self._diarizer_params.manifest_filepath)\n else:\n raise ValueError(\"paths2audio_files must be of type list of paths to file containing audio file\")\n\n self.AUDIO_RTTM_MAP = audio_rttm_map(self._diarizer_params.manifest_filepath)\n\n # Speech Activity Detection\n self._perform_speech_activity_detection()\n\n # Segmentation\n self._run_segmentation()\n\n # Embedding Extraction\n self._extract_embeddings(self.subsegments_manifest_path)\n\n out_rttm_dir = os.path.join(self._out_dir, 'pred_rttms')\n os.makedirs(out_rttm_dir, exist_ok=True)\n\n # Clustering\n all_reference, all_hypothesis = perform_clustering(\n embeddings=self.embeddings,\n time_stamps=self.time_stamps,\n AUDIO_RTTM_MAP=self.AUDIO_RTTM_MAP,\n out_rttm_dir=out_rttm_dir,\n clustering_params=self._cluster_params,\n )\n\n # TODO Resegmentation -> Coming Soon\n\n # Scoring\n score = score_labels(\n self.AUDIO_RTTM_MAP,\n all_reference,\n all_hypothesis,\n collar=self._diarizer_params.collar,\n ignore_overlap=self._diarizer_params.ignore_overlap,\n )\n\n logging.info(\"Outputs are saved in {} directory\".format(os.path.abspath(self._diarizer_params.out_dir)))\n return score\n\n @staticmethod\n def __make_nemo_file_from_folder(filename, source_dir):\n with tarfile.open(filename, \"w:gz\") as tar:\n tar.add(source_dir, arcname=\"./\")\n\n @rank_zero_only\n def save_to(self, save_path: str):\n \"\"\"\n Saves model instance (weights and configuration) into EFF archive or .\n You can use \"restore_from\" method to fully restore instance from .nemo file.\n\n .nemo file is an archive (tar.gz) with the following:\n model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor\n model_wights.chpt - model checkpoint\n\n Args:\n save_path: Path to .nemo file where model instance should be saved\n \"\"\"\n\n # TODO: Why does this override the main save_to?\n\n with tempfile.TemporaryDirectory() as tmpdir:\n config_yaml = os.path.join(tmpdir, _MODEL_CONFIG_YAML)\n spkr_model = os.path.join(tmpdir, _SPEAKER_MODEL)\n\n self.to_config_file(path2yaml_file=config_yaml)\n if self.has_vad_model:\n vad_model = os.path.join(tmpdir, _VAD_MODEL)\n self._vad_model.save_to(vad_model)\n self._speaker_model.save_to(spkr_model)\n self.__make_nemo_file_from_folder(filename=save_path, source_dir=tmpdir)\n\n @staticmethod\n def __unpack_nemo_file(path2file: str, out_folder: str) -> str:\n if not os.path.exists(path2file):\n raise FileNotFoundError(f\"{path2file} does not exist\")\n tar = tarfile.open(path2file, \"r:gz\")\n tar.extractall(path=out_folder)\n tar.close()\n return out_folder\n\n @classmethod\n def restore_from(\n cls,\n restore_path: str,\n override_config_path: Optional[str] = None,\n map_location: Optional[torch.device] = None,\n strict: bool = False,\n ):\n # Get path where the command is executed - the artifacts will be \"retrieved\" there\n # (original .nemo behavior)\n cwd = os.getcwd()\n\n with tempfile.TemporaryDirectory() as tmpdir:\n try:\n cls.__unpack_nemo_file(path2file=restore_path, out_folder=tmpdir)\n os.chdir(tmpdir)\n if override_config_path is None:\n config_yaml = os.path.join(tmpdir, _MODEL_CONFIG_YAML)\n else:\n config_yaml = override_config_path\n conf = OmegaConf.load(config_yaml)\n if os.path.exists(os.path.join(tmpdir, _VAD_MODEL)):\n conf.diarizer.vad.model_path = os.path.join(tmpdir, _VAD_MODEL)\n else:\n logging.info(\n f'Model {cls.__name__} does not contain a VAD model. A VAD model or manifest file with'\n f'speech segments need for diarization with this model'\n )\n\n conf.diarizer.speaker_embeddings.model_path = os.path.join(tmpdir, _SPEAKER_MODEL)\n conf.restore_map_location = map_location\n OmegaConf.set_struct(conf, True)\n instance = cls(cfg=conf)\n\n logging.info(f'Model {cls.__name__} was successfully restored from {restore_path}.')\n finally:\n os.chdir(cwd)\n\n return instance\n" ]
[ [ "torch.softmax", "torch.cuda.is_available", "torch.cuda.amp.autocast" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZephyrZhuQi/vqa-maskrcnn-benchmark
[ "16f6c7c9a2e75e8877901e17d6536c108b66e694" ]
[ "maskrcnn_benchmark/modeling/detector/generalized_rcnn.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# Modified by Qi Zhu, November 2019\n\"\"\"\nImplements the Generalized R-CNN framework\n\"\"\"\n\nimport torch\nfrom torch import nn\n\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\n\nfrom ..backbone import build_backbone\nfrom ..rpn.rpn import build_rpn\nfrom ..roi_heads.roi_heads import build_roi_heads\n\n\nimport numpy as np\n\n\nclass GeneralizedRCNN(nn.Module):\n \"\"\"\n Main class for Generalized R-CNN. Currently supports boxes and masks.\n It consists of three main parts:\n - backbone\n = rpn\n - heads: takes the features + the proposals from the RPN and computes\n detections / masks from it.\n - disable the functionality of rpn, use ground truth bounding box instead\n \"\"\"\n\n def __init__(self, cfg):\n super(GeneralizedRCNN, self).__init__()\n\n self.backbone = build_backbone(cfg)\n self.rpn = build_rpn(cfg)\n self.roi_heads = build_roi_heads(cfg)\n self.return_feats = cfg.MODEL.ROI_BOX_HEAD.RETURN_FC_FEATS\n\n def forward(self, images, targets=None):\n \"\"\"\n Arguments:\n images (list[Tensor] or ImageList): images to be processed\n targets (list[BoxList]): ground-truth boxes present in the image (optional)\n\n Returns:\n result (list[BoxList] or dict[Tensor]): the output from the model.\n During training, it returns a dict[Tensor] which contains the losses.\n During testing, it returns list[BoxList] contains additional fields\n like `scores`, `labels` and `mask` (for Mask R-CNN models).\n\n \"\"\"\n if self.training and targets is None:\n raise ValueError(\"In training mode, targets should be passed\")\n # images = to_image_list(images)\n features = self.backbone(images.tensors)\n # proposals, proposal_losses = self.rpn(images, features, targets)\n # use gt as proposals instead of rpn\n proposals = []\n for image_index in range(len(images.image_sizes)):\n image_size = images.image_sizes[image_index]\n image_width = image_size[1]\n image_height = image_size[0]\n image_bboxes = images.image_bboxes[image_index]\n # multiply height & width\n image_bboxes = np.asarray(image_bboxes, dtype='float32')\n image_bboxes[:,0] *= image_width\n image_bboxes[:,1] *= image_width\n image_bboxes[:,2] *= image_height\n image_bboxes[:,3] *= image_height\n # xxyy to xyxy\n image_bboxes = image_bboxes[:,[0,2,1,3]]\n b_row = image_bboxes.shape[0]\n b_col = image_bboxes.shape[1]\n pad_col = b_col\n pad_row = b_row if b_row<100 else 100\n bbox_temp = np.zeros((100,4))\n bbox_temp[:pad_row,:pad_col]= image_bboxes[:pad_row,:pad_col] \n bbox_temp = torch.from_numpy(bbox_temp) \n bbox_temp = bbox_temp.cuda()\n #print('bbox', bbox_temp)\n proposal = BoxList(bbox_temp, (image_width,image_height), mode=\"xyxy\")\n proposals.append(proposal)\n\t\t\n \n if self.roi_heads:\n x, result, detector_losses = self.roi_heads(features, proposals, targets)\n else:\n # RPN-only models don't have roi_heads\n x = features\n result = proposals\n detector_losses = {}\n\n if self.training:\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n return losses\n\n if self.return_feats and not self.training:\n #print('result', result[0].bbox)\n return (x, result)\n\n return result\n" ]
[ [ "numpy.asarray", "numpy.zeros", "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kin-Zhang/LAV
[ "0a5068c0fad3ecc2f2616801c6d3b00bc0ff03f3", "0a5068c0fad3ecc2f2616801c6d3b00bc0ff03f3" ]
[ "team_code/planner.py", "team_code/waypointer.py" ]
[ "import os\nfrom collections import deque\n\nimport numpy as np\nimport math\n\nclass RoutePlanner(object):\n \n EARTH_RADIUS = 6371e3 # 6371km\n\n def __init__(self, global_plan, curr_threshold=20, next_threshold=75, debug=False):\n self.route = deque()\n self.curr_threshold = curr_threshold\n self.next_threshold = next_threshold\n\n # Convert lat,lon to x,y\n cos_0 = 0.\n for gnss, _ in global_plan:\n cos_0 += gnss['lat'] * (math.pi / 180)\n cos_0 = cos_0 / (len(global_plan))\n self.cos_0 = cos_0\n \n for node in global_plan:\n gnss, cmd = node\n\n x, y = self.latlon_to_xy(gnss['lat'], gnss['lon'])\n self.route.append((x, y))\n\n self.debug = debug\n\n self.current_idx = 0\n self.checkpoint = self.route[0]\n\n def run_step(self, gnss):\n\n x, y = self.latlon_to_xy(gnss[0], gnss[1])\n \n wx, wy = np.array(self.checkpoint)\n curr_distance = np.linalg.norm([wx-x, wy-y])\n\n for i, (wx, wy) in enumerate(self.route):\n \n distance = np.linalg.norm([wx-x, wy-y])\n \n if distance < self.next_threshold and i - self.current_idx==1 and curr_distance < self.curr_threshold:\n self.checkpoint = [wx, wy]\n self.current_idx += 1\n break\n \n return np.array(self.checkpoint) - [x,y]\n\n\n def latlon_to_xy(self, lat, lon):\n\n x = self.EARTH_RADIUS * lat * (math.pi / 180)\n y = self.EARTH_RADIUS * lon * (math.pi / 180) * math.cos(self.cos_0)\n\n return x, y\n", "import math\nimport numpy as np\n\nfrom agents.navigation.local_planner import RoadOption\n\n\nclass Waypointer:\n \n EARTH_RADIUS = 6371e3 # 6371km\n \n def __init__(self, \n global_plan, \n current_gnss, \n threshold_lane=10., \n threshold_before=4.5, \n threshold_after=3.0,\n threshold_max=50.,\n ):\n self._threshold_before = threshold_before\n self._threshold_after = threshold_after\n self._threshold_lane = threshold_lane\n self._threshold_max = threshold_max\n\n self._lane_change_counter = 0\n \n # Convert lat,lon to x,y\n cos_0 = 0.\n for gnss, _ in global_plan:\n cos_0 += gnss['lat'] * (math.pi / 180)\n cos_0 = cos_0 / (len(global_plan))\n self.cos_0 = cos_0\n \n self.global_plan = []\n for node in global_plan:\n gnss, cmd = node\n\n x, y = self.latlon_to_xy(gnss['lat'], gnss['lon'])\n self.global_plan.append((x, y, cmd))\n\n lat, lon, _ = current_gnss\n cx, cy = self.latlon_to_xy(lat, lon)\n self.checkpoint = (cx, cy, RoadOption.LANEFOLLOW)\n \n self.current_idx = -1\n\n def tick(self, gnss):\n \n lat, lon, _ = gnss\n cur_x, cur_y = self.latlon_to_xy(lat, lon)\n \n c_wx, c_wy = np.array(self.checkpoint[:2])\n curr_distance = np.linalg.norm([c_wx-cur_x, c_wy-cur_y])\n\n for i, (wx, wy, cmd) in enumerate(self.global_plan):\n\n # CMD remap... HACK...\n distance = np.linalg.norm([cur_x-wx, cur_y-wy])\n\n if self.checkpoint[2] == RoadOption.LANEFOLLOW and cmd != RoadOption.LANEFOLLOW:\n threshold = self._threshold_before\n else:\n threshold = self._threshold_after\n\n if distance < threshold and i-self.current_idx == 1:\n self.checkpoint = (wx, wy, cmd)\n self.current_idx += 1\n break\n if curr_distance > self._threshold_max and distance < threshold \\\n and i>self.current_idx and cmd in [RoadOption.LEFT, RoadOption.RIGHT]:\n self.checkpoint = (wx, wy, cmd)\n self.current_idx = i\n break\n\n wx, wy, cmd = self.checkpoint\n return wx-cur_x, wy-cur_y, cmd\n\n def latlon_to_xy(self, lat, lon):\n \n x = self.EARTH_RADIUS * lat * (math.pi / 180)\n y = self.EARTH_RADIUS * lon * (math.pi / 180) * math.cos(self.cos_0)\n\n return x, y\n" ]
[ [ "numpy.array", "numpy.linalg.norm" ], [ "numpy.array", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
chengzee/disease_predict
[ "d7a3c57b710ab2e93d56c8d73aeaa21120d3e98c", "d7a3c57b710ab2e93d56c8d73aeaa21120d3e98c", "d7a3c57b710ab2e93d56c8d73aeaa21120d3e98c", "d7a3c57b710ab2e93d56c8d73aeaa21120d3e98c", "d7a3c57b710ab2e93d56c8d73aeaa21120d3e98c" ]
[ "orchid_DiseasePredict/datasets/Attention_in_several_test/LSTM64_LSTM64_LSTM64_LSTM64_A_LSTM64_LSTM64.py", "orchid_DiseasePredict/datasets/Attention_in_several_test/LSTM128_LSTM128_LSTM128_LSTM128_A_LSTM64_LSTM64_LSTM64.py", "orchid_DiseasePredict/datasets/Attention_in_several_test/LSTM32_LSTM32_LSTM32_LSTM32_A_LSTM128_LSTM128_LSTM128_LSTM128.py", "orchid_DiseasePredict/datasets/Attention_in_several_test/LSTM128_LSTM128_LSTM128_LSTM128_A.py", "orchid_DiseasePredict/datasets/Attention_in_several_test/LSTM128_LSTM128_LSTM128_LSTM128_LSTM128_A_LSTM64_LSTM64.py" ]
[ "import pandas as pd\nimport numpy as np\nimport csv\nfrom keras.layers import Dense, Lambda, dot, Activation, concatenate\nfrom keras.layers import Layer\nimport keras.backend as K\n\n# Parameters\n# -------------------------------------------------------------------------------------------------------------------\nbed = [631, 742, 701, 759, 765, 698]\nlookback_days = 3\ndatasInADay = 288\ninput_dim = 3\nsecondsInADay = 60*60*24 \n\n# 定義 attention 機制 (return_sequence=True)\nclass attention(Layer):\n def __init__(self,**kwargs):\n super(attention,self).__init__(**kwargs)\n def build(self,input_shape):\n self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n super(attention, self).build(input_shape)\n def call(self,x):\n et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n at=K.softmax(et)\n at=K.expand_dims(at,axis=-1)\n output=x*at\n return K.sum(output,axis=1, keepdims=True)\n def compute_output_shape(self,input_shape):\n return (input_shape)\n def get_config(self):\n return super(attention,self).get_config()\n\n\n# # 定義 attention 機制 (return_sequence=False)\n# class attention(Layer):\n# def __init__(self,**kwargs):\n# super(attention,self).__init__(**kwargs)\n# def build(self,input_shape):\n# self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n# self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n# super(attention, self).build(input_shape)\n# def call(self,x):\n# et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n# at=K.softmax(et)\n# at=K.expand_dims(at,axis=-1)\n# output=x*at\n# return K.sum(output,axis=1)\n# def compute_output_shape(self,input_shape):\n# return (input_shape[0],input_shape[-1])\n# def get_config(self):\n# return super(attention,self).get_config()\n\n# np.random.seed(1)\n# 讀取 「(統計近期三日)近期死亡csv」\ntargetRecent = pd.read_csv(\"targetRecent.csv\")\n# 轉為 numpy array\ntargetRecent_arr = np.array(targetRecent)\n# print(targetRecent_arr)\n# -------------------------------------------------------------------------------------------------------------------\n# 生成資料集\ndef generator_with_augmentation(inputdata, starttime, lookback, dead_recently, samp_list_1, samp_list_0, targ_list_1, targ_list_0): # 輸入資料 samp_list = []; 輸出結果 targ_list = []\n for i in range(datasInADay):\n rows = np.arange(i+starttime, i+starttime+lookback)\n if np.count_nonzero(inputdata[rows, 4] == 0) <= 316:\n if dead_recently == 1:\n samp_list_1.append(inputdata[rows, 1:4])\n targ_list_1.append(dead_recently)\n if dead_recently == 0:\n samp_list_0.append(inputdata[rows, 1:4])\n targ_list_0.append(dead_recently)\n return samp_list_1, samp_list_0, targ_list_1, targ_list_0\n\nsamples_1 = []\nsamples_0 = []\ntargets_1 = []\ntargets_0 = []\n\n# 測試結果csv建立\nwith open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n writer.writerow([\"第n次,LSTM64_64_64_64_A_LSTM64_64\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n\nfor n in range(len(targetRecent_arr)): # 近期死亡統計數量\n for m in range(len(bed)): # 試驗植床總共六床\n if targetRecent_arr[n, 2] == bed[m]:\n paddeddata_arr = np.array(pd.read_csv(\"addfeature9{}.csv\".format(m+1)))\n # print(\"BedPlant:{}\".format(m+1))\n # ----------------------------------------------------------------------------------------------------------------------------------------\n # 平均值正規化 [-1, 1]\n data_min = np.min(paddeddata_arr[:, 1:4], axis=0)\n data_max = np.max(paddeddata_arr[:, 1:4], axis=0)\n data_mean = np.mean(paddeddata_arr[:, 1:4], axis=0)\n # print(data_min)\n # print(data_max)\n # print(data_mean)\n paddeddata_arr[:, 1:4] = (paddeddata_arr[:, 1:4]-data_mean)/(data_max-data_min)\n # ----------------------------------------------------------------------------------------------------------------------------------------\n where = np.searchsorted(paddeddata_arr[:, 0], targetRecent_arr[n, 0]-secondsInADay*lookback_days) # 604800 是七天的秒數; 432000 是五天的秒數; 259200 是三天的秒數\n # print(\"where:{}\".format(where))\n samples_1, samples_0, targets_1, targets_0 = generator_with_augmentation(paddeddata_arr, starttime=where, lookback=datasInADay*lookback_days, dead_recently=targetRecent_arr[n, 1], samp_list_1=samples_1, samp_list_0=samples_0, targ_list_1=targets_1, targ_list_0=targets_0)\n# 轉為 numpy array\nsamples_1_arr = np.array(samples_1)\nsamples_0_arr = np.array(samples_0)\ntargets_1_arr = np.array(targets_1)\ntargets_0_arr = np.array(targets_0)\nprint(\"samples_1_arr.shape:{}\".format(samples_1_arr.shape))\nprint(\"samples_0_arr.shape:{}\".format(samples_0_arr.shape))\nprint(\"targets_1_arr.shape:{}\".format(targets_1_arr.shape))\nprint(\"targets_0_arr.shape:{}\".format(targets_0_arr.shape))\n\nprint(np.count_nonzero(targets_1_arr==1))\nprint(np.count_nonzero(targets_0_arr==1))\n\n# # -------------------------------------------------------------------------------------------------------------------\n# # # train test split\nx_train_arr = np.concatenate((samples_1_arr[:int(len(samples_1_arr)*0.7)], samples_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\ny_train_arr = np.concatenate((targets_1_arr[:int(len(samples_1_arr)*0.7)], targets_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\nx_test_arr = np.concatenate((samples_1_arr[int(len(samples_1_arr)*0.7):], samples_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\ny_test_arr = np.concatenate((targets_1_arr[int(len(samples_1_arr)*0.7):], targets_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\nprint(\"x_train_arr.shape:{}\".format(x_train_arr.shape))\nprint(\"y_train_arr.shape:{}\".format(y_train_arr.shape))\nprint(\"x_test_arr.shape:{}\".format(x_test_arr.shape))\nprint(\"y_test_arr.shape:{}\".format(y_test_arr.shape))\n\n# -------------------------------------------------------------------------------------------------------------------\n# tf.keras model\nfor t in range(10): # 做幾遍\n # LSTM 模型的訓練與驗證\n from keras.models import Sequential\n from keras import layers\n from keras.optimizers import RMSprop, Adam\n from keras.callbacks import ModelCheckpoint\n model = Sequential()\n model.add(layers.LSTM(64,\n input_shape=(datasInADay*lookback_days, input_dim), # (288*3, 3)\n return_sequences=True,\n # dropout=0.2\n ))\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(attention())\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(layers.LSTM(64,\n return_sequences=False,\n ))\n model.add(layers.Dense(1, activation='sigmoid'))\n model.summary()\n model.compile(optimizer=Adam(),\n loss = 'binary_crossentropy',\n metrics=['accuracy'])\n# -------------------------------------------------------------------------------------------------------------------\n # checkpoint\n filepath=\"weights.best.hdf5\"\n checkpoint = ModelCheckpoint(filepath, \n monitor='val_accuracy', \n verbose=1, \n save_best_only=True,\n mode='max')\n callbacks_list = [checkpoint]\n # fit the model\n history = model.fit(x_train_arr, y_train_arr,\n epochs=200,\n batch_size=256,\n # validation_data=(x_val_arr, y_val_arr),\n validation_split=0.4, \n callbacks=callbacks_list,\n verbose=1)\n model.load_weights(\"weights.best.hdf5\")\n print(\"第{}次結果,選用最好的val_acc來對testSet做預測:\".format(t+1))\n test_score = model.evaluate(x_test_arr, y_test_arr)\n print(\"test_score:{}\".format(test_score))\n # 預測結果\n pred = model.predict(x_test_arr)\n TrueP = 0\n TrueN = 0\n FalseP = 0\n FalseN = 0 \n for pp in range(len(pred)):\n if(pred[pp]>0.5 and y_test_arr[pp]==1):\n TrueP += 1\n if(pred[pp]>0.5 and y_test_arr[pp]==0):\n FalseP += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==1):\n FalseN += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==0):\n TrueN += 1\n print(\"test數量:{}\".format(len(x_test_arr)))\n print(\"True_Positive:{}\".format(TrueP))\n print(\"True_Nagitive:{}\".format(TrueN))\n print(\"False_Positive:{}\".format(FalseP))\n print(\"False_Nagitive:{}\".format(FalseN))\n precision = TrueP/(TrueP+FalseP)\n recall = TrueP/(TrueP+FalseN)\n print(\"Precision:{}\".format(precision))\n print(\"Recall:{}\".format(recall))\n with open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n # writer.writerow([\"第n次\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n writer.writerow([t+1, test_score[1], TrueP, TrueN, FalseP, FalseN, precision, recall])", "import pandas as pd\nimport numpy as np\nimport csv\nfrom keras.layers import Dense, Lambda, dot, Activation, concatenate\nfrom keras.layers import Layer\nimport keras.backend as K\n\n# Parameters\n# -------------------------------------------------------------------------------------------------------------------\nbed = [631, 742, 701, 759, 765, 698]\nlookback_days = 3\ndatasInADay = 288\ninput_dim = 3\nsecondsInADay = 60*60*24 \n\n# 定義 attention 機制 (return_sequence=True)\nclass attention(Layer):\n def __init__(self,**kwargs):\n super(attention,self).__init__(**kwargs)\n def build(self,input_shape):\n self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n super(attention, self).build(input_shape)\n def call(self,x):\n et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n at=K.softmax(et)\n at=K.expand_dims(at,axis=-1)\n output=x*at\n return K.sum(output,axis=1, keepdims=True)\n def compute_output_shape(self,input_shape):\n return (input_shape)\n def get_config(self):\n return super(attention,self).get_config()\n\n\n# # 定義 attention 機制 (return_sequence=False)\n# class attention(Layer):\n# def __init__(self,**kwargs):\n# super(attention,self).__init__(**kwargs)\n# def build(self,input_shape):\n# self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n# self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n# super(attention, self).build(input_shape)\n# def call(self,x):\n# et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n# at=K.softmax(et)\n# at=K.expand_dims(at,axis=-1)\n# output=x*at\n# return K.sum(output,axis=1)\n# def compute_output_shape(self,input_shape):\n# return (input_shape[0],input_shape[-1])\n# def get_config(self):\n# return super(attention,self).get_config()\n\n# np.random.seed(1)\n# 讀取 「(統計近期三日)近期死亡csv」\ntargetRecent = pd.read_csv(\"targetRecent.csv\")\n# 轉為 numpy array\ntargetRecent_arr = np.array(targetRecent)\n# print(targetRecent_arr)\n# -------------------------------------------------------------------------------------------------------------------\n# 生成資料集\ndef generator_with_augmentation(inputdata, starttime, lookback, dead_recently, samp_list_1, samp_list_0, targ_list_1, targ_list_0): # 輸入資料 samp_list = []; 輸出結果 targ_list = []\n for i in range(datasInADay):\n rows = np.arange(i+starttime, i+starttime+lookback)\n if np.count_nonzero(inputdata[rows, 4] == 0) <= 316:\n if dead_recently == 1:\n samp_list_1.append(inputdata[rows, 1:4])\n targ_list_1.append(dead_recently)\n if dead_recently == 0:\n samp_list_0.append(inputdata[rows, 1:4])\n targ_list_0.append(dead_recently)\n return samp_list_1, samp_list_0, targ_list_1, targ_list_0\n\nsamples_1 = []\nsamples_0 = []\ntargets_1 = []\ntargets_0 = []\n\n# 測試結果csv建立\nwith open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n writer.writerow([\"第n次,LSTM128_128_128_128_A_LSTM64_64_64\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n\nfor n in range(len(targetRecent_arr)): # 近期死亡統計數量\n for m in range(len(bed)): # 試驗植床總共六床\n if targetRecent_arr[n, 2] == bed[m]:\n paddeddata_arr = np.array(pd.read_csv(\"addfeature9{}.csv\".format(m+1)))\n # print(\"BedPlant:{}\".format(m+1))\n # ----------------------------------------------------------------------------------------------------------------------------------------\n # 平均值正規化 [-1, 1]\n data_min = np.min(paddeddata_arr[:, 1:4], axis=0)\n data_max = np.max(paddeddata_arr[:, 1:4], axis=0)\n data_mean = np.mean(paddeddata_arr[:, 1:4], axis=0)\n # print(data_min)\n # print(data_max)\n # print(data_mean)\n paddeddata_arr[:, 1:4] = (paddeddata_arr[:, 1:4]-data_mean)/(data_max-data_min)\n # ----------------------------------------------------------------------------------------------------------------------------------------\n where = np.searchsorted(paddeddata_arr[:, 0], targetRecent_arr[n, 0]-secondsInADay*lookback_days) # 604800 是七天的秒數; 432000 是五天的秒數; 259200 是三天的秒數\n # print(\"where:{}\".format(where))\n samples_1, samples_0, targets_1, targets_0 = generator_with_augmentation(paddeddata_arr, starttime=where, lookback=datasInADay*lookback_days, dead_recently=targetRecent_arr[n, 1], samp_list_1=samples_1, samp_list_0=samples_0, targ_list_1=targets_1, targ_list_0=targets_0)\n# 轉為 numpy array\nsamples_1_arr = np.array(samples_1)\nsamples_0_arr = np.array(samples_0)\ntargets_1_arr = np.array(targets_1)\ntargets_0_arr = np.array(targets_0)\nprint(\"samples_1_arr.shape:{}\".format(samples_1_arr.shape))\nprint(\"samples_0_arr.shape:{}\".format(samples_0_arr.shape))\nprint(\"targets_1_arr.shape:{}\".format(targets_1_arr.shape))\nprint(\"targets_0_arr.shape:{}\".format(targets_0_arr.shape))\n\nprint(np.count_nonzero(targets_1_arr==1))\nprint(np.count_nonzero(targets_0_arr==1))\n\n# # -------------------------------------------------------------------------------------------------------------------\n# # # train test split\nx_train_arr = np.concatenate((samples_1_arr[:int(len(samples_1_arr)*0.7)], samples_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\ny_train_arr = np.concatenate((targets_1_arr[:int(len(samples_1_arr)*0.7)], targets_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\nx_test_arr = np.concatenate((samples_1_arr[int(len(samples_1_arr)*0.7):], samples_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\ny_test_arr = np.concatenate((targets_1_arr[int(len(samples_1_arr)*0.7):], targets_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\nprint(\"x_train_arr.shape:{}\".format(x_train_arr.shape))\nprint(\"y_train_arr.shape:{}\".format(y_train_arr.shape))\nprint(\"x_test_arr.shape:{}\".format(x_test_arr.shape))\nprint(\"y_test_arr.shape:{}\".format(y_test_arr.shape))\n\n# -------------------------------------------------------------------------------------------------------------------\n# tf.keras model\nfor t in range(10): # 做幾遍\n # LSTM 模型的訓練與驗證\n from keras.models import Sequential\n from keras import layers\n from keras.optimizers import RMSprop, Adam\n from keras.callbacks import ModelCheckpoint\n model = Sequential()\n model.add(layers.LSTM(128,\n input_shape=(datasInADay*lookback_days, input_dim), # (288*3, 3)\n return_sequences=True,\n # dropout=0.2\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(attention())\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(layers.LSTM(64,\n return_sequences=False,\n ))\n model.add(layers.Dense(1, activation='sigmoid'))\n model.summary()\n model.compile(optimizer=Adam(),\n loss = 'binary_crossentropy',\n metrics=['accuracy'])\n# -------------------------------------------------------------------------------------------------------------------\n # checkpoint\n filepath=\"weights.best.hdf5\"\n checkpoint = ModelCheckpoint(filepath, \n monitor='val_accuracy', \n verbose=1, \n save_best_only=True,\n mode='max')\n callbacks_list = [checkpoint]\n # fit the model\n history = model.fit(x_train_arr, y_train_arr,\n epochs=200,\n batch_size=256,\n # validation_data=(x_val_arr, y_val_arr),\n validation_split=0.4, \n callbacks=callbacks_list,\n verbose=1)\n model.load_weights(\"weights.best.hdf5\")\n print(\"第{}次結果,選用最好的val_acc來對testSet做預測:\".format(t+1))\n test_score = model.evaluate(x_test_arr, y_test_arr)\n print(\"test_score:{}\".format(test_score))\n # 預測結果\n pred = model.predict(x_test_arr)\n TrueP = 0\n TrueN = 0\n FalseP = 0\n FalseN = 0 \n for pp in range(len(pred)):\n if(pred[pp]>0.5 and y_test_arr[pp]==1):\n TrueP += 1\n if(pred[pp]>0.5 and y_test_arr[pp]==0):\n FalseP += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==1):\n FalseN += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==0):\n TrueN += 1\n print(\"test數量:{}\".format(len(x_test_arr)))\n print(\"True_Positive:{}\".format(TrueP))\n print(\"True_Nagitive:{}\".format(TrueN))\n print(\"False_Positive:{}\".format(FalseP))\n print(\"False_Nagitive:{}\".format(FalseN))\n precision = TrueP/(TrueP+FalseP)\n recall = TrueP/(TrueP+FalseN)\n print(\"Precision:{}\".format(precision))\n print(\"Recall:{}\".format(recall))\n with open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n # writer.writerow([\"第n次\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n writer.writerow([t+1, test_score[1], TrueP, TrueN, FalseP, FalseN, precision, recall])", "import pandas as pd\nimport numpy as np\nimport csv\nfrom keras.layers import Dense, Lambda, dot, Activation, concatenate\nfrom keras.layers import Layer\nimport keras.backend as K\n\n# Parameters\n# -------------------------------------------------------------------------------------------------------------------\nbed = [631, 742, 701, 759, 765, 698]\nlookback_days = 3\ndatasInADay = 288\ninput_dim = 3\nsecondsInADay = 60*60*24 \n\n# 定義 attention 機制 (return_sequence=True)\nclass attention(Layer):\n def __init__(self,**kwargs):\n super(attention,self).__init__(**kwargs)\n def build(self,input_shape):\n self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n super(attention, self).build(input_shape)\n def call(self,x):\n et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n at=K.softmax(et)\n at=K.expand_dims(at,axis=-1)\n output=x*at\n return K.sum(output,axis=1, keepdims=True)\n def compute_output_shape(self,input_shape):\n return (input_shape)\n def get_config(self):\n return super(attention,self).get_config()\n\n\n# # 定義 attention 機制 (return_sequence=False)\n# class attention(Layer):\n# def __init__(self,**kwargs):\n# super(attention,self).__init__(**kwargs)\n# def build(self,input_shape):\n# self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n# self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n# super(attention, self).build(input_shape)\n# def call(self,x):\n# et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n# at=K.softmax(et)\n# at=K.expand_dims(at,axis=-1)\n# output=x*at\n# return K.sum(output,axis=1)\n# def compute_output_shape(self,input_shape):\n# return (input_shape[0],input_shape[-1])\n# def get_config(self):\n# return super(attention,self).get_config()\n\n# np.random.seed(1)\n# 讀取 「(統計近期三日)近期死亡csv」\ntargetRecent = pd.read_csv(\"targetRecent.csv\")\n# 轉為 numpy array\ntargetRecent_arr = np.array(targetRecent)\n# print(targetRecent_arr)\n# -------------------------------------------------------------------------------------------------------------------\n# 生成資料集\ndef generator_with_augmentation(inputdata, starttime, lookback, dead_recently, samp_list_1, samp_list_0, targ_list_1, targ_list_0): # 輸入資料 samp_list = []; 輸出結果 targ_list = []\n for i in range(datasInADay):\n rows = np.arange(i+starttime, i+starttime+lookback)\n if np.count_nonzero(inputdata[rows, 4] == 0) <= 316:\n if dead_recently == 1:\n samp_list_1.append(inputdata[rows, 1:4])\n targ_list_1.append(dead_recently)\n if dead_recently == 0:\n samp_list_0.append(inputdata[rows, 1:4])\n targ_list_0.append(dead_recently)\n return samp_list_1, samp_list_0, targ_list_1, targ_list_0\n\nsamples_1 = []\nsamples_0 = []\ntargets_1 = []\ntargets_0 = []\n\n# 測試結果csv建立\nwith open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n writer.writerow([\"第n次,LSTM32_32_32_32_A_LSTM128_128_128_128\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n\nfor n in range(len(targetRecent_arr)): # 近期死亡統計數量\n for m in range(len(bed)): # 試驗植床總共六床\n if targetRecent_arr[n, 2] == bed[m]:\n paddeddata_arr = np.array(pd.read_csv(\"addfeature9{}.csv\".format(m+1)))\n # print(\"BedPlant:{}\".format(m+1))\n # ----------------------------------------------------------------------------------------------------------------------------------------\n # 平均值正規化 [-1, 1]\n data_min = np.min(paddeddata_arr[:, 1:4], axis=0)\n data_max = np.max(paddeddata_arr[:, 1:4], axis=0)\n data_mean = np.mean(paddeddata_arr[:, 1:4], axis=0)\n # print(data_min)\n # print(data_max)\n # print(data_mean)\n paddeddata_arr[:, 1:4] = (paddeddata_arr[:, 1:4]-data_mean)/(data_max-data_min)\n # ----------------------------------------------------------------------------------------------------------------------------------------\n where = np.searchsorted(paddeddata_arr[:, 0], targetRecent_arr[n, 0]-secondsInADay*lookback_days) # 604800 是七天的秒數; 432000 是五天的秒數; 259200 是三天的秒數\n # print(\"where:{}\".format(where))\n samples_1, samples_0, targets_1, targets_0 = generator_with_augmentation(paddeddata_arr, starttime=where, lookback=datasInADay*lookback_days, dead_recently=targetRecent_arr[n, 1], samp_list_1=samples_1, samp_list_0=samples_0, targ_list_1=targets_1, targ_list_0=targets_0)\n# 轉為 numpy array\nsamples_1_arr = np.array(samples_1)\nsamples_0_arr = np.array(samples_0)\ntargets_1_arr = np.array(targets_1)\ntargets_0_arr = np.array(targets_0)\nprint(\"samples_1_arr.shape:{}\".format(samples_1_arr.shape))\nprint(\"samples_0_arr.shape:{}\".format(samples_0_arr.shape))\nprint(\"targets_1_arr.shape:{}\".format(targets_1_arr.shape))\nprint(\"targets_0_arr.shape:{}\".format(targets_0_arr.shape))\n\nprint(np.count_nonzero(targets_1_arr==1))\nprint(np.count_nonzero(targets_0_arr==1))\n\n# # -------------------------------------------------------------------------------------------------------------------\n# # # train test split\nx_train_arr = np.concatenate((samples_1_arr[:int(len(samples_1_arr)*0.7)], samples_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\ny_train_arr = np.concatenate((targets_1_arr[:int(len(samples_1_arr)*0.7)], targets_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\nx_test_arr = np.concatenate((samples_1_arr[int(len(samples_1_arr)*0.7):], samples_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\ny_test_arr = np.concatenate((targets_1_arr[int(len(samples_1_arr)*0.7):], targets_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\nprint(\"x_train_arr.shape:{}\".format(x_train_arr.shape))\nprint(\"y_train_arr.shape:{}\".format(y_train_arr.shape))\nprint(\"x_test_arr.shape:{}\".format(x_test_arr.shape))\nprint(\"y_test_arr.shape:{}\".format(y_test_arr.shape))\n\n# -------------------------------------------------------------------------------------------------------------------\n# tf.keras model\nfor t in range(10): # 做幾遍\n # LSTM 模型的訓練與驗證\n from keras.models import Sequential\n from keras import layers\n from keras.optimizers import RMSprop, Adam\n from keras.callbacks import ModelCheckpoint\n model = Sequential()\n model.add(layers.LSTM(32,\n input_shape=(datasInADay*lookback_days, input_dim), # (288*3, 3)\n return_sequences=True,\n ))\n model.add(layers.LSTM(32,\n return_sequences=True,\n ))\n model.add(layers.LSTM(32,\n return_sequences=True,\n ))\n model.add(layers.LSTM(32,\n return_sequences=True,\n ))\n model.add(attention())\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=False,\n ))\n model.add(layers.Dense(1, activation='sigmoid'))\n model.summary()\n model.compile(optimizer=Adam(),\n loss = 'binary_crossentropy',\n metrics=['accuracy'])\n# -------------------------------------------------------------------------------------------------------------------\n # checkpoint\n filepath=\"weights.best.hdf5\"\n checkpoint = ModelCheckpoint(filepath, \n monitor='val_accuracy', \n verbose=1, \n save_best_only=True,\n mode='max')\n callbacks_list = [checkpoint]\n # fit the model\n history = model.fit(x_train_arr, y_train_arr,\n epochs=200,\n batch_size=256,\n validation_split=0.4, \n callbacks=callbacks_list,\n verbose=1)\n model.load_weights(\"weights.best.hdf5\")\n print(\"第{}次結果,選用最好的val_acc來對testSet做預測:\".format(t+1))\n test_score = model.evaluate(x_test_arr, y_test_arr)\n print(\"test_score:{}\".format(test_score))\n # 預測結果\n pred = model.predict(x_test_arr)\n TrueP = 0\n TrueN = 0\n FalseP = 0\n FalseN = 0 \n for pp in range(len(pred)):\n if(pred[pp]>0.5 and y_test_arr[pp]==1):\n TrueP += 1\n if(pred[pp]>0.5 and y_test_arr[pp]==0):\n FalseP += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==1):\n FalseN += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==0):\n TrueN += 1\n print(\"test數量:{}\".format(len(x_test_arr)))\n print(\"True_Positive:{}\".format(TrueP))\n print(\"True_Nagitive:{}\".format(TrueN))\n print(\"False_Positive:{}\".format(FalseP))\n print(\"False_Nagitive:{}\".format(FalseN))\n precision = TrueP/(TrueP+FalseP)\n recall = TrueP/(TrueP+FalseN)\n print(\"Precision:{}\".format(precision))\n print(\"Recall:{}\".format(recall))\n with open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n # writer.writerow([\"第n次\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n writer.writerow([t+1, test_score[1], TrueP, TrueN, FalseP, FalseN, precision, recall])", "import pandas as pd\nimport numpy as np\nimport csv\nfrom keras.layers import Dense, Lambda, dot, Activation, concatenate\nfrom keras.layers import Layer\nimport keras.backend as K\n\n# Parameters\n# -------------------------------------------------------------------------------------------------------------------\nbed = [631, 742, 701, 759, 765, 698]\nlookback_days = 3\ndatasInADay = 288\ninput_dim = 3\nsecondsInADay = 60*60*24 \n\n# # 定義 attention 機制 (return_sequence=True)\n# class attention(Layer):\n# def __init__(self,**kwargs):\n# super(attention,self).__init__(**kwargs)\n# def build(self,input_shape):\n# self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n# self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n# super(attention, self).build(input_shape)\n# def call(self,x):\n# et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n# at=K.softmax(et)\n# at=K.expand_dims(at,axis=-1)\n# output=x*at\n# return K.sum(output,axis=1, keepdims=True)\n# def compute_output_shape(self,input_shape):\n# return (input_shape)\n# def get_config(self):\n# return super(attention,self).get_config()\n\n\n# 定義 attention 機制 (return_sequence=False)\nclass attention(Layer):\n def __init__(self,**kwargs):\n super(attention,self).__init__(**kwargs)\n def build(self,input_shape):\n self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n super(attention, self).build(input_shape)\n def call(self,x):\n et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n at=K.softmax(et)\n at=K.expand_dims(at,axis=-1)\n output=x*at\n return K.sum(output,axis=1)\n def compute_output_shape(self,input_shape):\n return (input_shape[0],input_shape[-1])\n def get_config(self):\n return super(attention,self).get_config()\n\n# np.random.seed(1)\n# 讀取 「(統計近期三日)近期死亡csv」\ntargetRecent = pd.read_csv(\"targetRecent.csv\")\n# 轉為 numpy array\ntargetRecent_arr = np.array(targetRecent)\n# print(targetRecent_arr)\n# -------------------------------------------------------------------------------------------------------------------\n# 生成資料集\ndef generator_with_augmentation(inputdata, starttime, lookback, dead_recently, samp_list_1, samp_list_0, targ_list_1, targ_list_0): # 輸入資料 samp_list = []; 輸出結果 targ_list = []\n for i in range(datasInADay):\n rows = np.arange(i+starttime, i+starttime+lookback)\n if np.count_nonzero(inputdata[rows, 4] == 0) <= 316:\n if dead_recently == 1:\n samp_list_1.append(inputdata[rows, 1:4])\n targ_list_1.append(dead_recently)\n if dead_recently == 0:\n samp_list_0.append(inputdata[rows, 1:4])\n targ_list_0.append(dead_recently)\n return samp_list_1, samp_list_0, targ_list_1, targ_list_0\n\nsamples_1 = []\nsamples_0 = []\ntargets_1 = []\ntargets_0 = []\n\n# 測試結果csv建立\nwith open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n writer.writerow([\"第n次,LSTM128_128_128_128_A\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n\nfor n in range(len(targetRecent_arr)): # 近期死亡統計數量\n for m in range(len(bed)): # 試驗植床總共六床\n if targetRecent_arr[n, 2] == bed[m]:\n paddeddata_arr = np.array(pd.read_csv(\"addfeature9{}.csv\".format(m+1)))\n # print(\"BedPlant:{}\".format(m+1))\n # ----------------------------------------------------------------------------------------------------------------------------------------\n # 平均值正規化 [-1, 1]\n data_min = np.min(paddeddata_arr[:, 1:4], axis=0)\n data_max = np.max(paddeddata_arr[:, 1:4], axis=0)\n data_mean = np.mean(paddeddata_arr[:, 1:4], axis=0)\n # print(data_min)\n # print(data_max)\n # print(data_mean)\n paddeddata_arr[:, 1:4] = (paddeddata_arr[:, 1:4]-data_mean)/(data_max-data_min)\n # ----------------------------------------------------------------------------------------------------------------------------------------\n where = np.searchsorted(paddeddata_arr[:, 0], targetRecent_arr[n, 0]-secondsInADay*lookback_days) # 604800 是七天的秒數; 432000 是五天的秒數; 259200 是三天的秒數\n # print(\"where:{}\".format(where))\n samples_1, samples_0, targets_1, targets_0 = generator_with_augmentation(paddeddata_arr, starttime=where, lookback=datasInADay*lookback_days, dead_recently=targetRecent_arr[n, 1], samp_list_1=samples_1, samp_list_0=samples_0, targ_list_1=targets_1, targ_list_0=targets_0)\n# 轉為 numpy array\nsamples_1_arr = np.array(samples_1)\nsamples_0_arr = np.array(samples_0)\ntargets_1_arr = np.array(targets_1)\ntargets_0_arr = np.array(targets_0)\nprint(\"samples_1_arr.shape:{}\".format(samples_1_arr.shape))\nprint(\"samples_0_arr.shape:{}\".format(samples_0_arr.shape))\nprint(\"targets_1_arr.shape:{}\".format(targets_1_arr.shape))\nprint(\"targets_0_arr.shape:{}\".format(targets_0_arr.shape))\n\nprint(np.count_nonzero(targets_1_arr==1))\nprint(np.count_nonzero(targets_0_arr==1))\n\n# # -------------------------------------------------------------------------------------------------------------------\n# # # train test split\nx_train_arr = np.concatenate((samples_1_arr[:int(len(samples_1_arr)*0.7)], samples_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\ny_train_arr = np.concatenate((targets_1_arr[:int(len(samples_1_arr)*0.7)], targets_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\nx_test_arr = np.concatenate((samples_1_arr[int(len(samples_1_arr)*0.7):], samples_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\ny_test_arr = np.concatenate((targets_1_arr[int(len(samples_1_arr)*0.7):], targets_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\nprint(\"x_train_arr.shape:{}\".format(x_train_arr.shape))\nprint(\"y_train_arr.shape:{}\".format(y_train_arr.shape))\nprint(\"x_test_arr.shape:{}\".format(x_test_arr.shape))\nprint(\"y_test_arr.shape:{}\".format(y_test_arr.shape))\n\n# -------------------------------------------------------------------------------------------------------------------\n# tf.keras model\nfor t in range(10): # 做幾遍\n # LSTM 模型的訓練與驗證\n from keras.models import Sequential\n from keras import layers\n from keras.optimizers import RMSprop, Adam\n from keras.callbacks import ModelCheckpoint\n model = Sequential()\n model.add(layers.LSTM(128,\n input_shape=(datasInADay*lookback_days, input_dim), # (288*3, 3)\n return_sequences=True,\n # dropout=0.2\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(attention())\n model.add(layers.Dense(1, activation='sigmoid'))\n model.summary()\n model.compile(optimizer=Adam(),\n loss = 'binary_crossentropy',\n metrics=['accuracy'])\n# -------------------------------------------------------------------------------------------------------------------\n # checkpoint\n filepath=\"weights.best.hdf5\"\n checkpoint = ModelCheckpoint(filepath, \n monitor='val_accuracy', \n verbose=1, \n save_best_only=True,\n mode='max')\n callbacks_list = [checkpoint]\n # fit the model\n history = model.fit(x_train_arr, y_train_arr,\n epochs=200,\n batch_size=256,\n # validation_data=(x_val_arr, y_val_arr),\n validation_split=0.4, \n callbacks=callbacks_list,\n verbose=1)\n model.load_weights(\"weights.best.hdf5\")\n print(\"第{}次結果,選用最好的val_acc來對testSet做預測:\".format(t+1))\n test_score = model.evaluate(x_test_arr, y_test_arr)\n print(\"test_score:{}\".format(test_score))\n # 預測結果\n pred = model.predict(x_test_arr)\n TrueP = 0\n TrueN = 0\n FalseP = 0\n FalseN = 0 \n for pp in range(len(pred)):\n if(pred[pp]>0.5 and y_test_arr[pp]==1):\n TrueP += 1\n if(pred[pp]>0.5 and y_test_arr[pp]==0):\n FalseP += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==1):\n FalseN += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==0):\n TrueN += 1\n print(\"test數量:{}\".format(len(x_test_arr)))\n print(\"True_Positive:{}\".format(TrueP))\n print(\"True_Nagitive:{}\".format(TrueN))\n print(\"False_Positive:{}\".format(FalseP))\n print(\"False_Nagitive:{}\".format(FalseN))\n precision = TrueP/(TrueP+FalseP)\n recall = TrueP/(TrueP+FalseN)\n print(\"Precision:{}\".format(precision))\n print(\"Recall:{}\".format(recall))\n with open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n # writer.writerow([\"第n次\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n writer.writerow([t+1, test_score[1], TrueP, TrueN, FalseP, FalseN, precision, recall])", "import pandas as pd\nimport numpy as np\nimport csv\nfrom keras.layers import Dense, Lambda, dot, Activation, concatenate\nfrom keras.layers import Layer\nimport keras.backend as K\n\n# Parameters\n# -------------------------------------------------------------------------------------------------------------------\nbed = [631, 742, 701, 759, 765, 698]\nlookback_days = 3\ndatasInADay = 288\ninput_dim = 3\nsecondsInADay = 60*60*24 \n\n# 定義 attention 機制 (return_sequence=True)\nclass attention(Layer):\n def __init__(self,**kwargs):\n super(attention,self).__init__(**kwargs)\n def build(self,input_shape):\n self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n super(attention, self).build(input_shape)\n def call(self,x):\n et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n at=K.softmax(et)\n at=K.expand_dims(at,axis=-1)\n output=x*at\n return K.sum(output,axis=1, keepdims=True)\n def compute_output_shape(self,input_shape):\n return (input_shape)\n def get_config(self):\n return super(attention,self).get_config()\n\n\n# # 定義 attention 機制 (return_sequence=False)\n# class attention(Layer):\n# def __init__(self,**kwargs):\n# super(attention,self).__init__(**kwargs)\n# def build(self,input_shape):\n# self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n# self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n# super(attention, self).build(input_shape)\n# def call(self,x):\n# et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n# at=K.softmax(et)\n# at=K.expand_dims(at,axis=-1)\n# output=x*at\n# return K.sum(output,axis=1)\n# def compute_output_shape(self,input_shape):\n# return (input_shape[0],input_shape[-1])\n# def get_config(self):\n# return super(attention,self).get_config()\n\n# np.random.seed(1)\n# 讀取 「(統計近期三日)近期死亡csv」\ntargetRecent = pd.read_csv(\"targetRecent.csv\")\n# 轉為 numpy array\ntargetRecent_arr = np.array(targetRecent)\n# print(targetRecent_arr)\n# -------------------------------------------------------------------------------------------------------------------\n# 生成資料集\ndef generator_with_augmentation(inputdata, starttime, lookback, dead_recently, samp_list_1, samp_list_0, targ_list_1, targ_list_0): # 輸入資料 samp_list = []; 輸出結果 targ_list = []\n for i in range(datasInADay):\n rows = np.arange(i+starttime, i+starttime+lookback)\n if np.count_nonzero(inputdata[rows, 4] == 0) <= 316:\n if dead_recently == 1:\n samp_list_1.append(inputdata[rows, 1:4])\n targ_list_1.append(dead_recently)\n if dead_recently == 0:\n samp_list_0.append(inputdata[rows, 1:4])\n targ_list_0.append(dead_recently)\n return samp_list_1, samp_list_0, targ_list_1, targ_list_0\n\nsamples_1 = []\nsamples_0 = []\ntargets_1 = []\ntargets_0 = []\n\n# 測試結果csv建立\nwith open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n writer.writerow([\"第n次,LSTM128_128_128_128_128_A_LSTM64_64\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n\nfor n in range(len(targetRecent_arr)): # 近期死亡統計數量\n for m in range(len(bed)): # 試驗植床總共六床\n if targetRecent_arr[n, 2] == bed[m]:\n paddeddata_arr = np.array(pd.read_csv(\"addfeature9{}.csv\".format(m+1)))\n # print(\"BedPlant:{}\".format(m+1))\n # ----------------------------------------------------------------------------------------------------------------------------------------\n # 平均值正規化 [-1, 1]\n data_min = np.min(paddeddata_arr[:, 1:4], axis=0)\n data_max = np.max(paddeddata_arr[:, 1:4], axis=0)\n data_mean = np.mean(paddeddata_arr[:, 1:4], axis=0)\n # print(data_min)\n # print(data_max)\n # print(data_mean)\n paddeddata_arr[:, 1:4] = (paddeddata_arr[:, 1:4]-data_mean)/(data_max-data_min)\n # ----------------------------------------------------------------------------------------------------------------------------------------\n where = np.searchsorted(paddeddata_arr[:, 0], targetRecent_arr[n, 0]-secondsInADay*lookback_days) # 604800 是七天的秒數; 432000 是五天的秒數; 259200 是三天的秒數\n # print(\"where:{}\".format(where))\n samples_1, samples_0, targets_1, targets_0 = generator_with_augmentation(paddeddata_arr, starttime=where, lookback=datasInADay*lookback_days, dead_recently=targetRecent_arr[n, 1], samp_list_1=samples_1, samp_list_0=samples_0, targ_list_1=targets_1, targ_list_0=targets_0)\n# 轉為 numpy array\nsamples_1_arr = np.array(samples_1)\nsamples_0_arr = np.array(samples_0)\ntargets_1_arr = np.array(targets_1)\ntargets_0_arr = np.array(targets_0)\nprint(\"samples_1_arr.shape:{}\".format(samples_1_arr.shape))\nprint(\"samples_0_arr.shape:{}\".format(samples_0_arr.shape))\nprint(\"targets_1_arr.shape:{}\".format(targets_1_arr.shape))\nprint(\"targets_0_arr.shape:{}\".format(targets_0_arr.shape))\n\nprint(np.count_nonzero(targets_1_arr==1))\nprint(np.count_nonzero(targets_0_arr==1))\n\n# # -------------------------------------------------------------------------------------------------------------------\n# # # train test split\nx_train_arr = np.concatenate((samples_1_arr[:int(len(samples_1_arr)*0.7)], samples_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\ny_train_arr = np.concatenate((targets_1_arr[:int(len(samples_1_arr)*0.7)], targets_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\nx_test_arr = np.concatenate((samples_1_arr[int(len(samples_1_arr)*0.7):], samples_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\ny_test_arr = np.concatenate((targets_1_arr[int(len(samples_1_arr)*0.7):], targets_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\nprint(\"x_train_arr.shape:{}\".format(x_train_arr.shape))\nprint(\"y_train_arr.shape:{}\".format(y_train_arr.shape))\nprint(\"x_test_arr.shape:{}\".format(x_test_arr.shape))\nprint(\"y_test_arr.shape:{}\".format(y_test_arr.shape))\n\n# -------------------------------------------------------------------------------------------------------------------\n# tf.keras model\nfor t in range(10): # 做幾遍\n # LSTM 模型的訓練與驗證\n from keras.models import Sequential\n from keras import layers\n from keras.optimizers import RMSprop, Adam\n from keras.callbacks import ModelCheckpoint\n model = Sequential()\n model.add(layers.LSTM(128,\n input_shape=(datasInADay*lookback_days, input_dim), # (288*3, 3)\n return_sequences=True,\n # dropout=0.2\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(layers.LSTM(128,\n return_sequences=True,\n ))\n model.add(attention())\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(layers.LSTM(64,\n return_sequences=False,\n ))\n model.add(layers.Dense(1, activation='sigmoid'))\n model.summary()\n model.compile(optimizer=Adam(),\n loss = 'binary_crossentropy',\n metrics=['accuracy'])\n# -------------------------------------------------------------------------------------------------------------------\n # checkpoint\n filepath=\"weights.best.hdf5\"\n checkpoint = ModelCheckpoint(filepath, \n monitor='val_accuracy', \n verbose=1, \n save_best_only=True,\n mode='max')\n callbacks_list = [checkpoint]\n # fit the model\n history = model.fit(x_train_arr, y_train_arr,\n epochs=200,\n batch_size=256,\n # validation_data=(x_val_arr, y_val_arr),\n validation_split=0.4, \n callbacks=callbacks_list,\n verbose=1)\n model.load_weights(\"weights.best.hdf5\")\n print(\"第{}次結果,選用最好的val_acc來對testSet做預測:\".format(t+1))\n test_score = model.evaluate(x_test_arr, y_test_arr)\n print(\"test_score:{}\".format(test_score))\n # 預測結果\n pred = model.predict(x_test_arr)\n TrueP = 0\n TrueN = 0\n FalseP = 0\n FalseN = 0 \n for pp in range(len(pred)):\n if(pred[pp]>0.5 and y_test_arr[pp]==1):\n TrueP += 1\n if(pred[pp]>0.5 and y_test_arr[pp]==0):\n FalseP += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==1):\n FalseN += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==0):\n TrueN += 1\n print(\"test數量:{}\".format(len(x_test_arr)))\n print(\"True_Positive:{}\".format(TrueP))\n print(\"True_Nagitive:{}\".format(TrueN))\n print(\"False_Positive:{}\".format(FalseP))\n print(\"False_Nagitive:{}\".format(FalseN))\n precision = TrueP/(TrueP+FalseP)\n recall = TrueP/(TrueP+FalseN)\n print(\"Precision:{}\".format(precision))\n print(\"Recall:{}\".format(recall))\n with open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n # writer.writerow([\"第n次\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n writer.writerow([t+1, test_score[1], TrueP, TrueN, FalseP, FalseN, precision, recall])" ]
[ [ "pandas.read_csv", "numpy.min", "numpy.arange", "numpy.max", "numpy.mean", "numpy.count_nonzero", "numpy.searchsorted", "numpy.array" ], [ "pandas.read_csv", "numpy.min", "numpy.arange", "numpy.max", "numpy.mean", "numpy.count_nonzero", "numpy.searchsorted", "numpy.array" ], [ "pandas.read_csv", "numpy.min", "numpy.arange", "numpy.max", "numpy.mean", "numpy.count_nonzero", "numpy.searchsorted", "numpy.array" ], [ "pandas.read_csv", "numpy.min", "numpy.arange", "numpy.max", "numpy.mean", "numpy.count_nonzero", "numpy.searchsorted", "numpy.array" ], [ "pandas.read_csv", "numpy.min", "numpy.arange", "numpy.max", "numpy.mean", "numpy.count_nonzero", "numpy.searchsorted", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
aemoser/PyWake
[ "889a2c10882195af21339e9bcf2ede0db9b58319", "889a2c10882195af21339e9bcf2ede0db9b58319", "889a2c10882195af21339e9bcf2ede0db9b58319", "889a2c10882195af21339e9bcf2ede0db9b58319", "889a2c10882195af21339e9bcf2ede0db9b58319", "889a2c10882195af21339e9bcf2ede0db9b58319" ]
[ "py_wake/utils/area_overlapping_factor.py", "py_wake/deficit_models/gcl.py", "py_wake/flow_map.py", "py_wake/rotor_avg_models/rotor_avg_model.py", "py_wake/deflection_models/jimenez.py", "py_wake/examples/data/hornsrev1.py" ]
[ "from numpy import newaxis as na\n\nimport numpy as np\n\n\nclass AreaOverlappingFactor():\n\n def overlapping_area_factor(self, wake_radius_ijlk, dw_ijlk, cw_ijlk, D_src_il, D_dst_ijl):\n \"\"\"Calculate overlapping factor\n\n Parameters\n ----------\n dw_jl : array_like\n down wind distance [m]\n cw_jl : array_like\n cross wind distance [m]\n D_src_l : array_like\n Diameter of source turbines [m]\n D_dst_jl : array_like or None\n Diameter of destination turbines [m]. If None destination is assumed to be a point\n\n Returns\n -------\n A_ol_factor_jl : array_like\n area overlaping factor\n \"\"\"\n\n if np.all(D_dst_ijl == 0) or D_dst_ijl is None:\n return wake_radius_ijlk > cw_ijlk\n else:\n if wake_radius_ijlk.ndim == 5:\n return self._cal_overlapping_area_factor(\n np.broadcast_to(wake_radius_ijlk, cw_ijlk.shape),\n np.broadcast_to(D_dst_ijl[..., na, na] / 2, cw_ijlk.shape),\n np.abs(cw_ijlk))\n else:\n return self._cal_overlapping_area_factor(wake_radius_ijlk,\n (D_dst_ijl[..., na] / 2),\n np.abs(cw_ijlk))\n\n def _cal_overlapping_area_factor(self, R1, R2, d):\n \"\"\" Calculate the overlapping area of two circles with radius R1 and\n R2, centers distanced d.\n\n The calculation formula can be found in Eq. (A1) of :\n [Ref] Feng J, Shen WZ, Solving the wind farm layout optimization\n problem using Random search algorithm, Renewable Energy 78 (2015)\n 182-192\n Note that however there are typos in Equation (A1), '2' before alpha\n and beta should be 1.\n\n Parameters\n ----------\n R1: array:float\n Radius of the first circle [m]\n\n R2: array:float\n Radius of the second circle [m]\n\n d: array:float\n Distance between two centers [m]\n\n Returns\n -------\n A_ol: array:float\n Overlapping area [m^2]\n \"\"\"\n # treat all input as array\n R1, R2, d = [np.asarray(a) for a in [R1, R2, d]]\n if R2.shape != R1.shape:\n R2 = np.zeros_like(R1) + R2\n if d.shape != R1.shape:\n d = np.zeros_like(R1) + d\n A_ol_f = np.zeros(np.maximum(R1.shape, R2.shape))\n p = (R1 + R2 + d) / 2.0\n\n # make sure R_big >= R_small\n Rmax = np.where(R1 < R2, R2, R1)\n Rmin = np.where(R1 < R2, R1, R2)\n\n # full wake cases\n index_fullwake = (d <= (Rmax - Rmin))\n A_ol_f[index_fullwake] = 1\n\n # partial wake cases\n mask = (d > (Rmax - Rmin)) & (d < (Rmin + Rmax))\n\n # in somecases cos_alpha or cos_beta can be larger than 1 or less than\n # -1.0, cause problem to arccos(), resulting nan values, here fix this\n # issue.\n def arccos_lim(x):\n return np.arccos(np.maximum(np.minimum(x, 1), -1))\n\n alpha = arccos_lim((Rmax[mask]**2.0 + d[mask]**2 - Rmin[mask]**2) /\n (2.0 * Rmax[mask] * d[mask]))\n\n beta = arccos_lim((Rmin[mask]**2.0 + d[mask]**2 - Rmax[mask]**2) /\n (2.0 * Rmin[mask] * d[mask]))\n\n A_triangle = np.sqrt(p[mask] * (p[mask] - Rmin[mask]) *\n (p[mask] - Rmax[mask]) * (p[mask] - d[mask]))\n\n A_ol_f[mask] = (alpha * Rmax[mask]**2 + beta * Rmin[mask]**2 -\n 2.0 * A_triangle) / (R2[mask]**2 * np.pi)\n\n return A_ol_f\n", "import numpy as np\nfrom py_wake.deficit_models.deficit_model import WakeDeficitModel\nfrom py_wake.wind_farm_models.engineering_models import PropagateDownwind\nfrom py_wake.superposition_models import LinearSum\nfrom py_wake.rotor_avg_models.rotor_avg_model import RotorCenter\nna = np.newaxis\n\n\ndef my_power(term, factor):\n with np.warnings.catch_warnings():\n # if term is 0, exp(log(0))=0 as expected for a positive factor\n np.warnings.filterwarnings('ignore', r'divide by zero encountered in log')\n return np.exp(factor * np.log(term))\n\n\ndef get_r96(D, CT, TI):\n \"\"\"Computes the wake radius at 9.6D downstream location of a turbine from empirical relation\n\n .. math::\n R_{9.6D} = a_1 \\\\exp (a_2 C_T^2 + a_3 C_T + a_4) (b_1 TI + b_2) D\n\n Inputs\n ----------\n D: float\n Wind turbine diameter\n CT: float\n Outputs WindTurbine object's thrust coefficient\n TI: float\n Ambient turbulence intensity\n pars: list\n GCL Model parameters [a1, a2, a3, a4, b1, b2]\n\n Returns\n -------\n R96: float\n Wake radius at 9.6D downstream location\n \"\"\"\n a1, a2, a3, a4, b1, b2 = [0.435449861, 0.797853685, -0.124807893, 0.136821858, 15.6298, 1.0]\n R96 = a1 * (np.exp(a2 * CT * CT + a3 * CT + a4)) * (b1 * TI + b2) * D\n\n return R96\n\n\ndef get_Rw(x, R, TI, CT):\n \"\"\"Computes the wake radius at a location.\n [1]-eq.3\n\n .. math::\n R_w = \\\\left(\\\\frac{105 c_1^2 }{2 \\\\pi}\\\\right)^{0.2} (C_T A (x + x_0))^{1/3}\n\n with A, the area, and x_0 and c_1 defined as\n\n .. math::\n x_0 = \\\\frac{9.6 D}{\\\\left(\\\\frac{2 R_96}{k D} \\\\right)^3 - 1}\n\n c_1 = \\\\left(\\\\frac{k D}{2}\\\\right)^{5/2}\n \\\\left(\\\\frac{105}{2 \\\\pi} \\\\right)^{-1/2}\n (C_T A x_0)^{-5/6}\n\n with k and m defined as\n\n .. math::\n k = \\\\sqrt{\\\\frac{m + 1}{2}}\n\n m = \\\\frac{1}{\\\\sqrt{1 - C_T}}\n\n Inputs\n ----------\n x: float or ndarray\n Distance between turbines and wake location in the wind direction\n R: float\n Wind turbine radius\n TI: float\n Ambient turbulence intensity\n CT: float\n Outputs WindTurbine object's thrust coefficient\n\n Returns\n -------\n Rw: float or ndarray\n Wake radius at a location\n \"\"\"\n D = 2.0 * R\n Area = np.pi * R * R\n\n m = 1.0 / (np.sqrt(1.0 - CT))\n k = np.sqrt((m + 1.0) / 2.0)\n\n R96 = get_r96(D, CT, TI)\n x0 = (9.6 * D) / (my_power(2.0 * R96 / (k * D), 3.0) - 1.0)\n xx0 = x + x0\n term1 = my_power(k * D / 2.0, 2.5)\n term2 = my_power(105.0 / (2.0 * np.pi), -0.5)\n term3 = my_power(CT * Area * x0, -5.0 / 6.0)\n c1 = term1 * term2 * term3\n\n Rw = my_power(105.0 * c1 * c1 / (2.0 * np.pi), 0.2) * my_power(CT * Area * xx0, 1.0 / 3.0)\n\n Rw = np.where(x + x0 <= 0., 0., Rw)\n return Rw, xx0, c1\n\n\ndef get_dU(x, r, R, CT, TI):\n \"\"\"Computes the wake velocity deficit at a location\n\n Inputs\n ----------\n x: float\n Distance between turbines and wake location in the wind direction\n r: float\n Radial distance between the turbine and the location\n R: float\n Wake producing turbine's radius [m]\n CT: float\n Outputs WindTurbine object's thrust coefficient\n TI: float\n Ambient turbulence intensity [-]\n order: int, optional\n\n Returns\n -------\n dU: float\n Wake velocity deficit at a location\n \"\"\"\n\n CT = np.maximum(CT, np.finfo(float).eps)\n Area = np.pi * R * R\n Rw, xx0, c1 = get_Rw(x, R, TI, CT)\n c1s = c1 * c1\n\n term10 = (1 / 9)\n term20 = my_power(CT * Area / (xx0 * xx0), 1. / 3.)\n\n term310 = my_power(r, 1.5)\n term320 = 1.0 / np.sqrt(3. * c1s * CT * Area * xx0)\n term30 = term310 * term320\n term41 = my_power(35. / (2. * np.pi), .3)\n term42 = my_power(3. * c1s, -0.2)\n term40 = term41 * term42\n t4 = term30 - term40\n dU1 = -term10 * term20 * t4 * t4\n\n dU = dU1\n\n dU = np.where((Rw < r) | (x <= 0), 0, dU)\n return dU\n\n\nclass GCLDeficit(WakeDeficitModel):\n \"\"\"\n Implemented according to:\n Larsen, G. C. (2009). A simple stationary semi-analytical wake model.\n Risoe National Laboratory for Sustainable Energy,\n Technical University of Denmark. Denmark.\n Forskningscenter Risoe. Risoe-R, No. 1713(EN)\n\n Description:\n based on an analytical solution of the thin shear layer approximation of the NS equations.\n The wake flow fields are assumed rotationally symmetric, and the rotor inflow fields\n are consistently assumed uniform.\n The effect of expansion is approximately accounted for by imposing suitable\n empirical downstream boundary conditions on the wake expansion that depend\n on the rotor thrust and the ambient turbulence conditions, respectively.\n \"\"\"\n\n def __init__(self, use_effective_ws=False, use_effective_ti=False):\n self.use_effective_ws = use_effective_ws\n self.use_effective_ti = use_effective_ti\n self.args4deficit = ['WS_ilk', 'D_src_il', 'dw_ijlk', 'cw_ijlk', 'ct_ilk', 'TI_ilk']\n if use_effective_ws:\n self.args4deficit.append('WS_eff_ilk', )\n if use_effective_ti:\n self.args4deficit.append('TI_eff_ilk')\n\n def wake_radius(self, dw_ijlk, D_src_il, TI_ilk, ct_ilk, **kwargs):\n if self.use_effective_ti:\n TI_ilk = kwargs['TI_eff_ilk']\n with np.warnings.catch_warnings():\n # if term is 0, exp(log(0))=0 as expected for a positive factor\n np.warnings.filterwarnings('ignore', r'invalid value encountered in log')\n return get_Rw(x=dw_ijlk, R=(D_src_il / 2)[:, na, :, na], TI=TI_ilk[:, na], CT=ct_ilk[:, na])[0]\n\n def calc_deficit(self, WS_ilk, D_src_il, dw_ijlk, cw_ijlk, ct_ilk, TI_ilk, **kwargs):\n if self.use_effective_ws:\n WS_ilk = kwargs['WS_eff_ilk']\n if self.use_effective_ti:\n TI_ilk = kwargs['TI_eff_ilk']\n eps = 1e-10\n dw_ijlk_gt0 = np.maximum(dw_ijlk, eps)\n R_src_il = D_src_il / 2.\n dU = -get_dU(x=dw_ijlk_gt0, r=cw_ijlk, R=R_src_il[:, na, :, na],\n CT=ct_ilk[:, na], TI=TI_ilk[:, na])\n return WS_ilk[:, na] * dU * (dw_ijlk > eps)\n\n\nclass GCL(PropagateDownwind):\n def __init__(self, site, windTurbines, rotorAvgModel=RotorCenter(), superpositionModel=LinearSum(),\n deflectionModel=None, turbulenceModel=None, groundModel=None):\n PropagateDownwind.__init__(self, site, windTurbines, wake_deficitModel=GCLDeficit(),\n rotorAvgModel=rotorAvgModel, superpositionModel=superpositionModel,\n deflectionModel=deflectionModel, turbulenceModel=turbulenceModel,\n groundModel=groundModel)\n\n\nclass GCLLocal(PropagateDownwind):\n def __init__(self, site, windTurbines, rotorAvgModel=RotorCenter(), superpositionModel=LinearSum(),\n deflectionModel=None, turbulenceModel=None, groundModel=None):\n\n PropagateDownwind.__init__(self, site, windTurbines,\n wake_deficitModel=GCLDeficit(use_effective_ws=True, use_effective_ti=True),\n rotorAvgModel=rotorAvgModel, superpositionModel=superpositionModel,\n deflectionModel=deflectionModel, turbulenceModel=turbulenceModel,\n groundModel=groundModel)\n\n\ndef main():\n if __name__ == '__main__':\n from py_wake.examples.data.iea37._iea37 import IEA37Site\n from py_wake.examples.data.iea37._iea37 import IEA37_WindTurbines\n from py_wake.turbulence_models import GCLTurbulence\n import matplotlib.pyplot as plt\n\n # setup site, turbines and wind farm model\n site = IEA37Site(16)\n x, y = site.initial_position.T\n windTurbines = IEA37_WindTurbines()\n\n wf_model = GCL(site, windTurbines)\n wf_model_local = GCLLocal(site, windTurbines, turbulenceModel=GCLTurbulence())\n # run wind farm simulation\n sim_res = wf_model(x, y)\n sim_res_local = wf_model_local(x, y)\n # calculate AEP\n aep = sim_res.aep().sum()\n aep_local = sim_res_local.aep().sum()\n\n # plot wake map\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5), tight_layout=True)\n levels = np.arange(0, 10.5, 0.5)\n print(wf_model)\n flow_map = sim_res.flow_map(wd=30, ws=9.8)\n flow_map.plot_wake_map(levels=levels, ax=ax1, plot_colorbar=False)\n flow_map.plot_windturbines(ax=ax1)\n ax1.set_title('Original Larsen, AEP: %.2f GWh' % aep)\n\n # plot wake map\n print(wf_model_local)\n flow_map = sim_res_local.flow_map(wd=30, ws=9.8)\n flow_map.plot_wake_map(levels=levels, ax=ax2, plot_colorbar=False)\n flow_map.plot_windturbines(ax=ax2)\n ax2.set_title('Local Larsen, AEP: %.2f GWh' % aep_local)\n\n plt.figure()\n flow_map.plot_ti_map(levels=np.arange(0, 1, .01))\n plt.title('TI map for GCLLocal with GCL turbulence model')\n plt.show()\n\n\nmain()\n", "import numpy as np\nimport xarray as xr\nfrom numpy import newaxis as na\nfrom scipy.interpolate.fitpack2 import InterpolatedUnivariateSpline\nimport matplotlib.pyplot as plt\n\n\nclass FlowBox(xr.Dataset):\n __slots__ = ('simulationResult', 'windFarmModel')\n\n def __init__(self, simulationResult, X, Y, H, localWind_j, WS_eff_jlk, TI_eff_jlk):\n self.simulationResult = simulationResult\n self.windFarmModel = self.simulationResult.windFarmModel\n lw_j = localWind_j\n wd, ws = lw_j.wd, lw_j.ws\n\n if X is None and Y is None and H is None:\n coords = localWind_j.coords\n X = localWind_j.i\n else:\n coords = {'x': X[0, :, 0], 'y': Y[:, 0, 0], 'h': H[0, 0, :], 'wd': wd, 'ws': ws}\n\n def get_da(arr_jlk):\n if len(X.shape) == 1:\n return xr.DataArray(arr_jlk.reshape(X.shape + (len(wd), len(ws))), coords, dims=['i', 'wd', 'ws'])\n else:\n return xr.DataArray(arr_jlk.reshape(X.shape + (len(wd), len(ws))),\n coords, dims=['y', 'x', 'h', 'wd', 'ws'])\n JLK = WS_eff_jlk.shape\n xr.Dataset.__init__(self, data_vars={k: get_da(v) for k, v in [\n ('WS_eff', WS_eff_jlk), ('TI_eff', TI_eff_jlk),\n ('WD', lw_j.WD.ilk(JLK)), ('WS', lw_j.WS.ilk(JLK)), ('TI', lw_j.TI.ilk(JLK)), ('P', lw_j.P.ilk(JLK))]})\n\n\nclass FlowMap(FlowBox):\n __slots__ = ('simulationResult', 'windFarmModel', 'X', 'Y', 'plane', 'WS_eff_xylk', 'TI_eff_xylk')\n\n def __init__(self, simulationResult, X, Y, localWind_j, WS_eff_jlk, TI_eff_jlk, plane):\n self.X = X\n self.Y = Y\n self.plane = plane\n\n if plane[0] == 'XY':\n X = X[:, :, na]\n Y = Y[:, :, na]\n H = np.reshape(localWind_j.h.data, X.shape)\n elif plane[0] == 'YZ':\n H = Y.T[:, na, :]\n Y = X.T[:, na, :]\n X = np.reshape(localWind_j.x.data, Y.shape)\n elif plane[0] == 'xyz':\n X = None\n Y = None\n H = None\n else:\n raise NotImplementedError()\n FlowBox.__init__(self, simulationResult, X, Y, H, localWind_j, WS_eff_jlk, TI_eff_jlk)\n\n if plane[0] == \"XY\":\n # set flowMap.WS_xylk etc.\n for k in ['WS_eff', 'TI_eff', 'WS', 'WD', 'TI', 'P']:\n setattr(self.__class__, \"%s_xylk\" % k, property(lambda self, k=k: self[k].isel(h=0)))\n if plane[0] == \"YZ\":\n # set flowMap.WS_xylk etc.\n for k in ['WS_eff', 'TI_eff', 'WS', 'WD', 'TI', 'P']:\n self[k] = self[k].transpose('h', 'y', ...)\n setattr(self.__class__, \"%s_xylk\" % k,\n property(lambda self, k=k: self[k].isel(x=0).transpose('y', 'h', ...)))\n\n @property\n def XY(self):\n return self.X, self.Y\n\n def power_xylk(self, wt_type=0, with_wake_loss=True):\n if with_wake_loss:\n ws = self.WS_eff_xylk\n\n else:\n ws = self.WS_xylk\n\n type = {'type': wt_type} if wt_type != 0 else {}\n\n power_xylk = self.windFarmModel.windTurbines.power(ws, **type)\n return xr.DataArray(power_xylk[:, :, na], self.coords, dims=['y', 'x', 'h', 'wd', 'ws'])\n\n def aep_xylk(self, wt_type=0, normalize_probabilities=False, with_wake_loss=True):\n \"\"\"Anual Energy Production of a potential wind turbine at all grid positions (x,y)\n for all wind directions (l) and wind speeds (k) in GWh.\n\n Parameters\n ----------\n wt_type : Optional int, defaults to 0\n Type of potential wind turbine\n normalize_propabilities : Optional bool, defaults to False\n In case only a subset of all wind speeds and/or wind directions is simulated,\n this parameter determines whether the returned AEP represents the energy produced in the fraction\n of a year where these flow cases occurs or a whole year of northern wind.\n If for example, wd=[0], then\n - False means that the AEP only includes energy from the faction of year\\n\n with northern wind (359.5-0.5deg), i.e. no power is produced the rest of the year.\n - True means that the AEP represents a whole year of northen wind.\n default is False\n with_wake_loss : Optional bool, defaults to True\n If True, wake loss is included, i.e. power is calculated using local effective wind speed\\n\n If False, wake loss is neglected, i.e. power is calculated using local free flow wind speed\n \"\"\"\n power_xylk = self.power_xylk(wt_type, with_wake_loss)\n P_xylk = self.P_xylk # .isel.ilk((1,) + power_xylk.shape[2:])\n if normalize_probabilities:\n P_xylk = P_xylk / P_xylk.sum(['wd', 'ws'])\n return power_xylk * P_xylk * 24 * 365 * 1e-9\n\n def aep_xy(self, wt_type=0, normalize_probabilities=False, with_wake_loss=True):\n \"\"\"Anual Energy Production of a potential wind turbine at all grid positions (x,y)\n (sum of all wind directions and wind speeds) in GWh.\n\n see aep_xylk\n \"\"\"\n return self.aep_xylk(wt_type, normalize_probabilities, with_wake_loss).sum(['wd', 'ws'])\n\n def plot(self, data, clabel, levels=100, cmap=None, plot_colorbar=True, plot_windturbines=True,\n normalize_with=1, ax=None):\n \"\"\"Plot data as contouf map\n\n Parameters\n ----------\n data : array_like\n 2D data array to plot\n clabel : str\n colorbar label\n levels : int or array-like, default 100\n Determines the number and positions of the contour lines / regions.\n If an int n, use n data intervals; i.e. draw n+1 contour lines. The level heights are automatically chosen.\n If array-like, draw contour lines at the specified levels. The values must be in increasing order.\n cmap : str or Colormap, defaults 'Blues_r'.\n A Colormap instance or registered colormap name.\n The colormap maps the level values to colors.\n plot_colorbar : bool, default True\n if True (default), colorbar is drawn\n plot_windturbines : bool, default True\n if True (default), lines/circles showing the wind turbine rotors are plotted\n ax : pyplot or matplotlib axes object, default None\n \"\"\"\n import matplotlib.pyplot as plt\n if cmap is None:\n cmap = 'Blues_r'\n if ax is None:\n ax = plt.gca()\n n = normalize_with\n if self.plane[0] == \"YZ\":\n y = self.X[0]\n x = np.zeros_like(y) + self.plane[1]\n z = self.simulationResult.windFarmModel.site.elevation(x, y)\n c = ax.contourf(self.X, self.Y + z, data.isel(x=0), levels=levels, cmap=cmap)\n if plot_colorbar:\n plt.colorbar(c, label=clabel, ax=ax)\n # plot terrain\n y = np.arange(y.min(), y.max())\n x = np.zeros_like(y) + self.plane[1]\n z = self.simulationResult.windFarmModel.site.elevation(x, y)\n ax.plot(y / n, z / n, 'k')\n else:\n # xarray gives strange levels\n # c = data.isel(h=0).plot(levels=levels, cmap=cmap, ax=ax, add_colorbar=plot_colorbar)\n c = ax.contourf(self.X / n, self.Y / n, data.isel(h=0).data, levels=levels, cmap=cmap)\n if plot_colorbar:\n plt.colorbar(c, label=clabel, ax=ax)\n\n if plot_windturbines:\n self.plot_windturbines(normalize_with=normalize_with, ax=ax)\n\n return c\n\n def plot_windturbines(self, normalize_with=1, ax=None):\n fm = self.windFarmModel\n yaw = self.simulationResult.yaw.sel(wd=self.wd[0]).mean(['ws']).data\n tilt = self.simulationResult.tilt.sel(wd=self.wd[0]).mean(['ws']).data\n if self.plane[0] == \"YZ\":\n x_i, y_i = self.simulationResult.x.values, self.simulationResult.y.values\n h_i = self.simulationResult.h.values\n z_i = self.simulationResult.windFarmModel.site.elevation(x_i, y_i)\n fm.windTurbines.plot_yz(y_i, z_i, h_i, wd=self.wd, yaw=yaw, tilt=tilt, normalize_with=normalize_with, ax=ax)\n else: # self.plane[0] == \"XY\":\n fm.windTurbines.plot_xy(self.simulationResult.x, self.simulationResult.y, self.simulationResult.type.data,\n wd=self.wd, yaw=yaw, tilt=tilt, normalize_with=normalize_with, ax=ax)\n\n def plot_wake_map(self, levels=100, cmap=None, plot_colorbar=True, plot_windturbines=True,\n normalize_with=1, ax=None):\n \"\"\"Plot effective wind speed contourf map\n\n Parameters\n ----------\n levels : int or array-like, default 100\n Determines the number and positions of the contour lines / regions.\n If an int n, use n data intervals; i.e. draw n+1 contour lines. The level heights are automatically chosen.\n If array-like, draw contour lines at the specified levels. The values must be in increasing order.\n cmap : str or Colormap, defaults 'Blues_r'.\n A Colormap instance or registered colormap name.\n The colormap maps the level values to colors.\n plot_colorbar : bool, default True\n if True (default), colorbar is drawn\n plot_windturbines : bool, default True\n if True (default), lines/circles showing the wind turbine rotors are plotted\n ax : pyplot or matplotlib axes object, default None\n \"\"\"\n return self.plot((self.WS_eff * self.P / self.P.sum(['wd', 'ws'])).sum(['wd', 'ws']), clabel='wind speed [m/s]',\n levels=levels, cmap=cmap, plot_colorbar=plot_colorbar,\n plot_windturbines=plot_windturbines, normalize_with=normalize_with, ax=ax)\n\n def plot_ti_map(self, levels=100, cmap=None, plot_colorbar=True, plot_windturbines=True, ax=None):\n \"\"\"Plot effective turbulence intensity contourf map\n\n Parameters\n ----------\n levels : int or array-like, default 100\n Determines the number and positions of the contour lines / regions.\n If an int n, use n data intervals; i.e. draw n+1 contour lines. The level heights are automatically chosen.\n If array-like, draw contour lines at the specified levels. The values must be in increasing order.\n cmap : str or Colormap, defaults 'Blues'.\n A Colormap instance or registered colormap name.\n The colormap maps the level values to colors.\n plot_colorbar : bool, default True\n if True (default), colorbar is drawn\n plot_windturbines : bool, default True\n if True (default), lines/circles showing the wind turbine rotors are plotted\n ax : pyplot or matplotlib axes object, default None\n\n \"\"\"\n if cmap is None:\n cmap = 'Blues'\n c = self.plot(self.TI_eff.mean(['wd', 'ws']), clabel=\"Turbulence intensity [-]\",\n levels=levels, cmap=cmap, plot_colorbar=plot_colorbar,\n plot_windturbines=plot_windturbines, ax=ax)\n\n return c\n\n def min_WS_eff(self, x=None, h=None):\n if x is None:\n x = self.x\n if h is None:\n h = self.h[0].item()\n WS_eff = self.WS_eff.sel_interp_all(xr.Dataset(coords={'x': x, 'h': h}))\n y = WS_eff.y.values\n\n def get_min(y, v):\n i = np.argmin(v)\n s = slice(i - 3, i + 4)\n if len(v[s]) < 7 or len(np.unique(v[s])) == 1:\n return np.nan\n# import matplotlib.pyplot as plt\n# plt.plot(y, v)\n# y_ = np.linspace(y[s][0], y[s][-1], 100)\n# plt.plot(y_, InterpolatedUnivariateSpline(y[s], v[s])(y_))\n# plt.axvline(np.interp(0, InterpolatedUnivariateSpline(y[s], v[s]).derivative()(y[s]), y[s]))\n# plt.axvline(0, color='k')\n# plt.show()\n return np.interp(0, InterpolatedUnivariateSpline(y[s], v[s]).derivative()(y[s]), y[s])\n\n y_min_ws = [get_min(y, ws) for ws in WS_eff.squeeze(['ws', 'wd']).T.values]\n return xr.DataArray(y_min_ws, coords={'x': x, 'h': h}, dims='x')\n\n def plot_deflection_grid(self, normalize_with=1, ax=None):\n assert self.windFarmModel.deflectionModel is not None\n assert len(self.simulationResult.wt) == 1\n assert len(self.simulationResult.ws) == 1\n assert len(self.simulationResult.wd) == 1\n x, y = self.x, self.y\n y = y[::len(y) // 10]\n\n X, Y = np.meshgrid(x, y)\n\n from py_wake.utils.model_utils import get_model_input\n kwargs = get_model_input(self.windFarmModel, X.flatten(), Y.flatten(), ws=self.ws, wd=self.wd,\n yaw=self.simulationResult.yaw.ilk())\n dw, hcw, dh = self.windFarmModel.deflectionModel.calc_deflection(**kwargs)\n Yp = -hcw[0, :, 0, 0].reshape(X.shape)\n ax = ax or plt.gca()\n X, Y, Yp = [v / normalize_with for v in [X, Y, Yp]]\n # ax.plot(X[255, :], Y[255, :], 'grey', lw=3)\n for x, y, yp in zip(X, Y, Yp):\n ax.plot(x, y, 'grey', lw=1, zorder=-32)\n ax.plot(x, yp, 'k', lw=1)\n\n\nclass Grid():\n default_resolution = 500\n\n\nclass HorizontalGrid(Grid):\n\n def __init__(self, x=None, y=None, h=None, resolution=None, extend=.2):\n \"\"\"Generate a horizontal grid for a flow map\n\n Parameters\n ----------\n x : array_like, optional\n x coordinates used for generating meshgrid\\n\n y : array_like, optional\n y coordinates used for generating meshgrid\n h : array_like, optional\n height above ground, defaults to mean wind turbine hub height\n resolution : int or None, optional\n grid resolution if x or y is not specified. defaults to self.default_resolution\n extend : float, optional\n defines the oversize of the grid if x or y is not specified\n\n Notes\n -----\n if x or y is not specified then a grid with <resolution> number of points\n covering the wind turbines + <extend> x range\n \"\"\"\n self.resolution = resolution or self.default_resolution\n self.x = x\n self.y = y\n self.h = h\n self.extend = extend\n self.plane = \"XY\", h\n\n def __call__(self, x_i, y_i, h_i, **_):\n # setup horizontal X,Y grid\n def f(x, N=self.resolution, ext=self.extend):\n ext *= np.max([1000, (np.max(x) - np.min(x))])\n return np.linspace(np.min(x) - ext, np.max(x) + ext, N)\n x, y, h = self.x, self.y, self.h\n if x is None:\n x = f(x_i)\n if y is None:\n y = f(y_i)\n if self.h is None:\n h = np.mean(h_i)\n else:\n h = self.h\n self.plane = \"XY\", h\n\n X, Y = np.meshgrid(x, y)\n H = np.broadcast_to(h, X.shape)\n return X, Y, X.flatten(), Y.flatten(), H.flatten()\n\n\nXYGrid = HorizontalGrid\n\n\nclass YZGrid(Grid):\n\n def __init__(self, x, y=None, z=None, resolution=None, extend=.2):\n \"\"\"Generate a vertical grid for a flow map in the yz-plane\n\n Parameters\n ----------\n x : array_like, optional\n x coordinates for the yz-grid\\n\n y : array_like, optional\n y coordinates used for generating meshgrid\n z : array_like, optional\n z coordinates(height above ground) used for generating meshgrid\n resolution : int or None, optional\n grid resolution if x or y is not specified. defaults to self.default_resolution\n extend : float, optional\n defines the oversize of the grid if x or y is not specified\n\n Notes\n -----\n if y or z is not specified then a grid with <resolution> number of points\n covering the wind turbines + <extend> * range\n \"\"\"\n self.resolution = resolution or self.default_resolution\n self.x = x\n self.y = y\n self.z = z\n self.extend = extend\n self.plane = \"YZ\", x\n\n def __call__(self, x_i, y_i, h_i, d_i):\n # setup horizontal X,Y grid\n def f(x, N=self.resolution, ext=self.extend):\n ext *= max(1000, (max(x) - min(x)))\n return np.linspace(min(x) - ext, max(x) + ext, N)\n x, y, z = self.x, self.y, self.z\n if y is None:\n y = f(y_i)\n if self.z is None:\n z = np.arange(0, (1 + self.extend) * (h_i.max() + d_i.max() / 2), np.diff(y[:2])[0])\n else:\n z = self.z\n\n Y, Z = np.meshgrid(y, z)\n X = np.zeros_like(Y) + x\n return Y, Z, X.T.flatten(), Y.T.flatten(), Z.T.flatten()\n\n\nclass Points(Grid):\n def __init__(self, x, y, h):\n assert len(x) == len(y) == len(h)\n self.x = x\n self.y = y\n self.h = h\n\n def __call__(self, **_):\n return None, None, self.x, self.y, self.h\n", "import numpy as np\nfrom numpy import newaxis as na\n\n\nclass RotorAvgModel():\n \"\"\"Wrap a DeficitModel.\n The RotorAvgModel\n - add an extra dimension (one or more points covering the downstream rotors)\n - Call the wrapped DeficitModel to calculate the deficit at all points\n - Compute a (weighted) mean of the deficit values covering the downstream rotors\n \"\"\"\n args4rotor_avg_deficit = ['hcw_ijlk', 'dh_ijlk', 'D_dst_ijl']\n\n def __init__(self):\n pass\n\n def calc_deficit_convection(self, deficitModel, D_dst_ijl, **kwargs):\n self.deficitModel = deficitModel\n return self.deficitModel.calc_deficit_convection(D_dst_ijl=D_dst_ijl, **kwargs)\n\n def __call__(self, func, D_dst_ijl, **kwargs):\n # add extra dimension, p, with 40 points distributed over the destination rotors\n kwargs = self._update_kwargs(D_dst_ijl=D_dst_ijl, **kwargs)\n\n values_ijlkp = func(**kwargs)\n # Calculate weighted sum of deficit over the destination rotors\n if self.nodes_weight is None:\n return np.mean(values_ijlkp, -1)\n return np.sum(self.nodes_weight[na, na, na, na, :] * values_ijlkp, -1)\n\n\nclass RotorCenter(RotorAvgModel):\n args4rotor_avg_deficit = ['D_dst_ijl']\n nodes_x = [0]\n nodes_y = [0]\n nodes_weight = [1]\n\n def __call__(self, func, **kwargs):\n return func(**kwargs)\n\n def _calc_layout_terms(self, deficitModel, **kwargs):\n deficitModel._calc_layout_terms(**kwargs)\n\n\nclass GridRotorAvg(RotorAvgModel):\n nodes_weight = None\n\n def __init__(self, nodes_x, nodes_y, nodes_weight=None):\n self.nodes_x = np.asarray(nodes_x)\n self.nodes_y = np.asarray(nodes_y)\n if nodes_weight is not None:\n self.nodes_weight = np.asarray(nodes_weight)\n\n def _update_kwargs(self, hcw_ijlk, dh_ijlk, D_dst_ijl, **kwargs):\n # add extra dimension, p, with 40 points distributed over the destination rotors\n R_dst_ijl = D_dst_ijl / 2\n hcw_ijlkp = hcw_ijlk[..., na] + R_dst_ijl[:, :, :, na, na] * self.nodes_x[na, na, na, na, :]\n dh_ijlkp = dh_ijlk[..., na] + R_dst_ijl[:, :, :, na, na] * self.nodes_y[na, na, na, na, :]\n new_kwargs = {'dh_ijlk': dh_ijlkp, 'hcw_ijlk': hcw_ijlkp, 'D_dst_ijl': D_dst_ijl[..., na]}\n\n new_kwargs['cw_ijlk'] = np.sqrt(hcw_ijlkp**2 + dh_ijlkp**2)\n new_kwargs['D_dst_ijl'] = D_dst_ijl\n\n new_kwargs.update({k: v[..., na] for k, v in kwargs.items() if k not in new_kwargs})\n return new_kwargs\n\n def _calc_layout_terms(self, deficitModel, **kwargs):\n self.deficitModel = deficitModel\n self.deficitModel._calc_layout_terms(**self._update_kwargs(**kwargs))\n\n\nclass EqGridRotorAvg(GridRotorAvg):\n def __init__(self, n):\n X, Y = np.meshgrid(np.linspace(-1, 1, n + 2)[1:-1], np.linspace(-1, 1, n + 2)[1:-1])\n m = (X**2 + Y**2) < 1\n GridRotorAvg.__init__(self,\n nodes_x=X[m].flatten(),\n nodes_y=Y[m].flatten())\n\n\nclass GQGridRotorAvg(GridRotorAvg):\n \"\"\"Gauss Quadrature grid rotor average model\"\"\"\n\n def __init__(self, n_x, n_y):\n x, y, w = gauss_quadrature(n_x, n_y)\n m = (x**2 + y**2) < 1\n w = w[m]\n w /= w.sum()\n GridRotorAvg.__init__(self, nodes_x=x[m], nodes_y=y[m], nodes_weight=w)\n\n\nclass PolarGridRotorAvg(GridRotorAvg):\n def __init__(self, nodes_r, nodes_theta, nodes_weight):\n self.nodes_x = nodes_r * np.cos(-nodes_theta - np.pi / 2)\n self.nodes_y = nodes_r * np.sin(-nodes_theta - np.pi / 2)\n self.nodes_weight = nodes_weight\n\n\nclass CGIRotorAvg(GridRotorAvg):\n \"\"\"Circular Gauss Integration\"\"\"\n\n def __init__(self, n=7):\n \"\"\"Circular Gauss Integration\n\n Parameters\n ----------\n n : {4, 7, 9, 21}\n Number of points.\n \"\"\"\n pm = np.array([[-1, -1, 1], [-1, 1, 1], [1, -1, 1], [1, 1, 1]])\n nodes_x, nodes_y, nodes_weight = {\n # 1: np.array([[0, 0, .5], [-1, 0, 1 / 8], [1, 0, 1 / 8], [0, -1, 1 / 8], [0, 1, 1 / 8]]),\n 4: pm * [0.5, 0.5, 1 / 4],\n # 3: np.r_[[[0, 0, 1 / 2], [-1, 0, 1 / 12], [1, 0, 1 / 12]], pm * [1 / 2, np.sqrt(3) / 2, 1 / 12]],\n 7: np.r_[[[0, 0, 1 / 4], [-np.sqrt(2 / 3), 0, 1 / 8], [np.sqrt(2 / 3), 0, 1 / 8]],\n pm * [np.sqrt(1 / 6), np.sqrt(1 / 2), 1 / 8]],\n 9: np.r_[[[0, 0, 1 / 6], [-1, 0, 1 / 24], [1, 0, 1 / 24], [0, -1, 1 / 24], [0, 1, 1 / 24]],\n pm * [1 / 2, 1 / 2, 1 / 6]],\n 21: np.r_[[[0, 0, 1 / 9]],\n [[np.sqrt((6 - np.sqrt(6)) / 10) * np.cos(2 * np.pi * k / 10),\n np.sqrt((6 - np.sqrt(6)) / 10) * np.sin(2 * np.pi * k / 10),\n (16 + np.sqrt(6)) / 360] for k in range(1, 11)],\n [[np.sqrt((6 + np.sqrt(6)) / 10) * np.cos(2 * np.pi * k / 10),\n np.sqrt((6 + np.sqrt(6)) / 10) * np.sin(2 * np.pi * k / 10),\n (16 - np.sqrt(6)) / 360] for k in range(1, 11)]]\n }[n].T\n GridRotorAvg.__init__(self, nodes_x, nodes_y, nodes_weight=nodes_weight)\n\n\ndef gauss_quadrature(n_x, n_y):\n nodes_x, nodes_x_weight = np.polynomial.legendre.leggauss(n_x)\n nodes_y, nodes_y_weight = np.polynomial.legendre.leggauss(n_y)\n X, Y = np.meshgrid(nodes_x, nodes_y)\n weights = np.prod(np.meshgrid(nodes_x_weight, nodes_y_weight), 0) / 4\n return X.flatten(), Y.flatten(), weights.flatten()\n\n\ndef polar_gauss_quadrature(n_r, n_theta):\n x, y, w = gauss_quadrature(n_r, n_theta)\n return (x + 1) / 2, (y + 1) * np.pi, w\n", "from numpy import newaxis as na\nimport numpy as np\nfrom py_wake.deflection_models import DeflectionModel\n\n\nclass JimenezWakeDeflection(DeflectionModel):\n \"\"\"Implemented according to\n Jiménez, Á., Crespo, A. and Migoya, E. (2010), Application of a LES technique to characterize\n the wake deflection of a wind turbine in yaw. Wind Energ., 13: 559-572. doi:10.1002/we.380\n \"\"\"\n\n args4deflection = ['D_src_il', 'yaw_ilk', 'ct_ilk', 'tilt_ilk']\n\n def __init__(self, N=20, beta=.1):\n self.beta = beta\n self.N = N\n\n def calc_deflection(self, dw_ijl, hcw_ijl, dh_ijl, D_src_il, yaw_ilk, tilt_ilk, ct_ilk, **kwargs):\n dw_lst = (np.logspace(0, 1.1, self.N) - 1) / (10**1.1 - 1)\n dw_ijxl = dw_ijl[:, :, na] * dw_lst[na, na, :, na]\n theta_yaw_ilk, theta_tilt_ilk = np.deg2rad(yaw_ilk), np.deg2rad(-tilt_ilk)\n theta_ilk = np.sqrt(theta_yaw_ilk**2 + theta_tilt_ilk**2)\n theta_deflection_ilk = np.arctan2(theta_tilt_ilk, theta_yaw_ilk)\n denominator_ilk = np.cos(theta_ilk)**2 * np.sin(theta_ilk) * (ct_ilk / 2)\n nominator_ijxl = (1 + (self.beta / D_src_il)[:, na, na, :] * np.maximum(dw_ijxl, 0))**2\n alpha = denominator_ilk[:, na, na] / nominator_ijxl[..., na]\n deflection_ijlk = np.trapz(np.sin(alpha), dw_ijxl[..., na], axis=2)\n self.hcw_ijlk = hcw_ijl[..., na] + deflection_ijlk * np.cos(theta_deflection_ilk[:, na])\n self.dh_ijlk = dh_ijl[..., na] + deflection_ijlk * np.sin(theta_deflection_ilk[:, na])\n return dw_ijl[..., na], self.hcw_ijlk, self.dh_ijlk\n\n\ndef main():\n if __name__ == '__main__':\n from py_wake import Fuga\n from py_wake.examples.data.iea37._iea37 import IEA37Site, IEA37_WindTurbines\n site = IEA37Site(16)\n x, y = [0, 600, 1200], [0, 0, 0] # site.initial_position[:2].T\n windTurbines = IEA37_WindTurbines()\n from py_wake.tests.test_files import tfp\n path = tfp + 'fuga/2MW/Z0=0.03000000Zi=00401Zeta0=0.00E+00/'\n noj = Fuga(path, site, windTurbines, deflectionModel=JimenezWakeDeflection())\n yaw = [-30, 30, 0]\n noj(x, y, yaw=yaw, wd=270, ws=10).flow_map().plot_wake_map()\n import matplotlib.pyplot as plt\n plt.show()\n\n\nmain()\n", "import numpy as np\nfrom py_wake.site._site import UniformWeibullSite\nfrom py_wake.wind_turbines import WindTurbine\nfrom py_wake.wind_turbines.power_ct_functions import PowerCtTabular\n\nwt_x = [423974, 424042, 424111, 424179, 424247, 424315, 424384, 424452, 424534,\n 424602, 424671, 424739, 424807, 424875, 424944, 425012, 425094, 425162,\n 425231, 425299, 425367, 425435, 425504, 425572, 425654, 425722, 425791,\n 425859, 425927, 425995, 426064, 426132, 426214, 426282, 426351, 426419,\n 426487, 426555, 426624, 426692, 426774, 426842, 426911, 426979, 427047,\n 427115, 427184, 427252, 427334, 427402, 427471, 427539, 427607, 427675,\n 427744, 427812, 427894, 427962, 428031, 428099, 428167, 428235, 428304,\n 428372, 428454, 428522, 428591, 428659, 428727, 428795, 428864, 428932,\n 429014, 429082, 429151, 429219, 429287, 429355, 429424, 429492]\nwt_y = [6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,\n 6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,\n 6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,\n 6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,\n 6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,\n 6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,\n 6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,\n 6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,\n 6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556,\n 6151447, 6150891, 6150335, 6149779, 6149224, 6148668, 6148112, 6147556]\nwt9_x = np.array(wt_x)[[0, 1, 2, 8, 9, 10, 16, 17, 18]]\nwt9_y = np.array(wt_y)[[0, 1, 2, 8, 9, 10, 16, 17, 18]]\ni16 = [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27]\nwt16_x = np.array(wt_x)[i16]\nwt16_y = np.array(wt_y)[i16]\n\n\npower_curve = np.array([[3.0, 0.0],\n [4.0, 66.6],\n [5.0, 154.0],\n [6.0, 282.0],\n [7.0, 460.0],\n [8.0, 696.0],\n [9.0, 996.0],\n [10.0, 1341.0],\n [11.0, 1661.0],\n [12.0, 1866.0],\n [13.0, 1958.0],\n [14.0, 1988.0],\n [15.0, 1997.0],\n [16.0, 1999.0],\n [17.0, 2000.0],\n [18.0, 2000.0],\n [19.0, 2000.0],\n [20.0, 2000.0],\n [21.0, 2000.0],\n [22.0, 2000.0],\n [23.0, 2000.0],\n [24.0, 2000.0],\n [25.0, 2000.0]]) * [1, 1000]\nct_curve = np.array([[3.0, 0.0],\n [4.0, 0.818],\n [5.0, 0.806],\n [6.0, 0.804],\n [7.0, 0.805],\n [8.0, 0.806],\n [9.0, 0.807],\n [10.0, 0.793],\n [11.0, 0.739],\n [12.0, 0.709],\n [13.0, 0.409],\n [14.0, 0.314],\n [15.0, 0.249],\n [16.0, 0.202],\n [17.0, 0.167],\n [18.0, 0.14],\n [19.0, 0.119],\n [20.0, 0.102],\n [21.0, 0.088],\n [22.0, 0.077],\n [23.0, 0.067],\n [24.0, 0.06],\n [25.0, 0.053]])\n\n\nclass V80(WindTurbine):\n def __init__(self, method='linear'):\n \"\"\"\n Parameters\n ----------\n method : {'linear', 'pchip'}\n linear(fast) or pchip(smooth and gradient friendly) interpolation\n \"\"\"\n WindTurbine.__init__(self, name='V80', diameter=80, hub_height=70,\n powerCtFunction=PowerCtTabular(power_curve[:, 0], power_curve[:, 1], 'w',\n ct_curve[:, 1], method=method))\n\n\nHornsrevV80 = V80\n\n\nclass Hornsrev1Site(UniformWeibullSite):\n def __init__(self, ti=.1, shear=None):\n f = [3.597152, 3.948682, 5.167395, 7.000154, 8.364547, 6.43485,\n 8.643194, 11.77051, 15.15757, 14.73792, 10.01205, 5.165975]\n a = [9.176929, 9.782334, 9.531809, 9.909545, 10.04269, 9.593921,\n 9.584007, 10.51499, 11.39895, 11.68746, 11.63732, 10.08803]\n k = [2.392578, 2.447266, 2.412109, 2.591797, 2.755859, 2.595703,\n 2.583984, 2.548828, 2.470703, 2.607422, 2.626953, 2.326172]\n UniformWeibullSite.__init__(self, np.array(f) / np.sum(f), a, k, ti=ti, shear=shear)\n self.initial_position = np.array([wt_x, wt_y]).T\n\n\ndef main():\n wt = V80()\n print('Diameter', wt.diameter())\n print('Hub height', wt.hub_height())\n\n import matplotlib.pyplot as plt\n ws = np.linspace(3, 20, 100)\n plt.plot(ws, wt.power(ws) * 1e-3, label='Power')\n c = plt.plot([], [], label='Ct')[0].get_color()\n plt.ylabel('Power [kW]')\n ax = plt.gca().twinx()\n ax.plot(ws, wt.ct(ws), color=c)\n ax.set_ylabel('Ct')\n plt.xlabel('Wind speed [m/s]')\n plt.gcf().axes[0].legend(loc=1)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.maximum", "numpy.sqrt", "numpy.abs", "numpy.minimum", "numpy.asarray", "numpy.all", "numpy.zeros_like", "numpy.broadcast_to", "numpy.where" ], [ "numpy.log", "numpy.maximum", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.finfo", "numpy.warnings.catch_warnings", "numpy.warnings.filterwarnings", "numpy.exp", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.gca", "numpy.min", "numpy.reshape", "numpy.unique", "matplotlib.pyplot.colorbar", "numpy.max", "numpy.argmin", "numpy.broadcast_to", "numpy.mean", "numpy.zeros_like", "numpy.diff", "numpy.meshgrid", "scipy.interpolate.fitpack2.InterpolatedUnivariateSpline" ], [ "numpy.polynomial.legendre.leggauss", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.cos", "numpy.sin", "numpy.mean", "numpy.array", "numpy.meshgrid", "numpy.sum" ], [ "numpy.maximum", "numpy.sqrt", "numpy.logspace", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.deg2rad", "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.gca", "numpy.linspace", "matplotlib.pyplot.gcf", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.sum", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
keyboardbear/IzunaDSP
[ "2e4a93dc1e09775f23aebec70e2f51f8706a7635" ]
[ "izunadsp/parts/apply_eq.py" ]
[ "# External Libraries\nfrom essentia.standard import FFT, IFFT\nimport numpy as np\n\n# IzunaDSP\nfrom izunadsp import DSPPart, AudioSequence\n\n\nclass ApplyEQ(DSPPart):\n def __init__(self):\n super().__init__()\n self._eq = np.array([1])\n self.eq = [1]\n self.fft = FFT()\n self.ifft = IFFT()\n\n @property\n def eq(self):\n return self._eq\n\n @eq.setter\n def eq(self, value: list):\n group_size = 513 // len(value) + 1\n v = np.array(value).repeat(group_size)\n too_many = len(v) - 513\n for i in range(too_many):\n v = np.delete(v, i * (group_size - 1))\n\n self._eq = v\n\n def set_eq(self, eq: list):\n if not len or len(eq) > 512:\n raise ValueError(\"Expected a list of size 0 < n <= 512\")\n self.eq = eq\n\n def bands_to_eq_size(self, frame: np.array) -> np.array:\n frame *= self.eq\n return frame / 1000\n\n def transform(self, frame: np.ndarray) -> np.ndarray:\n fftified = self.fft(frame.copy())\n eq_applied = self.bands_to_eq_size(fftified)\n return self.ifft(eq_applied)\n\n def handle(self, audio: AudioSequence) -> AudioSequence:\n left, right = audio / 2\n\n new_left = []\n new_right = []\n\n for old, new in zip([left, right], [new_left, new_right]):\n for frame in old:\n new.append(frame.apply(self.transform, seq=True))\n\n return sum(new_left) * sum(new_right)\n" ]
[ [ "numpy.delete", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leelabcnbc/keras-vis
[ "0af75c03b8eed2e488c122fcd3f535d27a4ede8f" ]
[ "vis/visualization/__init__.py" ]
[ "from __future__ import absolute_import\n\n\nfrom .activation_maximization import visualize_activation_with_losses\nfrom .activation_maximization import visualize_activation\n\nfrom .saliency import visualize_saliency_with_losses\nfrom .saliency import visualize_saliency\nfrom .saliency import visualize_cam_with_losses\nfrom .saliency import visualize_cam\n\nfrom tensorflow.keras import backend as K\n\n\ndef get_num_filters(layer):\n \"\"\"Determines the number of filters within the given `layer`.\n\n Args:\n layer: The keras layer to use.\n\n Returns:\n Total number of filters within `layer`.\n For `keras.layers.Dense` layer, this is the total number of outputs.\n \"\"\"\n # Handle layers with no channels.\n if K.ndim(layer.output) == 2:\n return K.int_shape(layer.output)[-1]\n\n channel_idx = 1 if K.image_data_format() == 'channels_first' else -1\n return K.int_shape(layer.output)[channel_idx]\n\n\ndef overlay(array1, array2, alpha=0.5):\n \"\"\"Overlays `array1` onto `array2` with `alpha` blending.\n\n Args:\n array1: The first numpy array.\n array2: The second numpy array.\n alpha: The alpha value of `array1` as overlayed onto `array2`. This value needs to be between [0, 1],\n with 0 being `array2` only to 1 being `array1` only (Default value = 0.5).\n\n Returns:\n The `array1`, overlayed with `array2` using `alpha` blending.\n \"\"\"\n if alpha < 0. or alpha > 1.:\n raise ValueError(\"`alpha` needs to be between [0, 1]\")\n if array1.shape != array2.shape:\n raise ValueError('`array1` and `array2` must have the same shapes')\n\n return (array1 * alpha + array2 * (1. - alpha)).astype(array1.dtype)\n" ]
[ [ "tensorflow.keras.backend.ndim", "tensorflow.keras.backend.int_shape", "tensorflow.keras.backend.image_data_format" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
tomspur/scipy
[ "5309706537dbd96e0409f890a20fc6f5badfbac3", "5309706537dbd96e0409f890a20fc6f5badfbac3" ]
[ "scipy/sparse/csc.py", "scipy/optimize/benchmarks/bench_optimizers.py" ]
[ "\"\"\"Compressed Sparse Column matrix format\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n__docformat__ = \"restructuredtext en\"\n\n__all__ = ['csc_matrix', 'isspmatrix_csc']\n\nfrom warnings import warn\n\nimport numpy as np\nfrom scipy.lib.six import xrange\n\nfrom .base import isspmatrix\nfrom .sparsetools import csc_tocsr\nfrom . import sparsetools\nfrom .sputils import upcast, isintlike, IndexMixin, get_index_dtype\n\nfrom .compressed import _cs_matrix\n\n\nclass csc_matrix(_cs_matrix, IndexMixin):\n \"\"\"\n Compressed Sparse Column matrix\n\n This can be instantiated in several ways:\n\n csc_matrix(D)\n with a dense matrix or rank-2 ndarray D\n\n csc_matrix(S)\n with another sparse matrix S (equivalent to S.tocsc())\n\n csc_matrix((M, N), [dtype])\n to construct an empty matrix with shape (M, N)\n dtype is optional, defaulting to dtype='d'.\n\n csc_matrix((data, ij), [shape=(M, N)])\n where ``data`` and ``ij`` satisfy the relationship\n ``a[ij[0, k], ij[1, k]] = data[k]``\n\n csc_matrix((data, indices, indptr), [shape=(M, N)])\n is the standard CSC representation where the row indices for\n column i are stored in ``indices[indptr[i]:indptr[i+1]]``\n and their corresponding values are stored in\n ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is\n not supplied, the matrix dimensions are inferred from\n the index arrays.\n\n Attributes\n ----------\n dtype : dtype\n Data type of the matrix\n shape : 2-tuple\n Shape of the matrix\n ndim : int\n Number of dimensions (this is always 2)\n nnz\n Number of nonzero elements\n data\n Data array of the matrix\n indices\n CSC format index array\n indptr\n CSC format index pointer array\n has_sorted_indices\n Whether indices are sorted\n\n Notes\n -----\n\n Sparse matrices can be used in arithmetic operations: they support\n addition, subtraction, multiplication, division, and matrix power.\n\n Advantages of the CSC format\n - efficient arithmetic operations CSC + CSC, CSC * CSC, etc.\n - efficient column slicing\n - fast matrix vector products (CSR, BSR may be faster)\n\n Disadvantages of the CSC format\n - slow row slicing operations (consider CSR)\n - changes to the sparsity structure are expensive (consider LIL or DOK)\n\n\n Examples\n --------\n\n >>> from scipy.sparse import *\n >>> from scipy import *\n >>> csc_matrix( (3,4), dtype=int8 ).todense()\n matrix([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=int8)\n\n >>> row = array([0,2,2,0,1,2])\n >>> col = array([0,0,1,2,2,2])\n >>> data = array([1,2,3,4,5,6])\n >>> csc_matrix( (data,(row,col)), shape=(3,3) ).todense()\n matrix([[1, 0, 4],\n [0, 0, 5],\n [2, 3, 6]])\n\n >>> indptr = array([0,2,3,6])\n >>> indices = array([0,2,2,0,1,2])\n >>> data = array([1,2,3,4,5,6])\n >>> csc_matrix( (data,indices,indptr), shape=(3,3) ).todense()\n matrix([[1, 0, 4],\n [0, 0, 5],\n [2, 3, 6]])\n\n \"\"\"\n\n def transpose(self, copy=False):\n from .csr import csr_matrix\n M,N = self.shape\n return csr_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy)\n\n def __iter__(self):\n csr = self.tocsr()\n for r in xrange(self.shape[0]):\n yield csr[r,:]\n\n def tocsc(self, copy=False):\n if copy:\n return self.copy()\n else:\n return self\n\n def tocsr(self):\n M,N = self.shape\n idx_dtype = get_index_dtype((self.indptr, self.indices),\n maxval=max(self.nnz, N))\n indptr = np.empty(M + 1, dtype=idx_dtype)\n indices = np.empty(self.nnz, dtype=idx_dtype)\n data = np.empty(self.nnz, dtype=upcast(self.dtype))\n\n csc_tocsr(M, N,\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n self.data,\n indptr,\n indices,\n data)\n\n from .csr import csr_matrix\n A = csr_matrix((data, indices, indptr), shape=self.shape)\n A.has_sorted_indices = True\n return A\n\n def __getitem__(self, key):\n # Use CSR to implement fancy indexing.\n\n row, col = self._unpack_index(key)\n # Things that return submatrices. row or col is a int or slice.\n if (isinstance(row, slice) or isinstance(col, slice) or\n isintlike(row) or isintlike(col)):\n return self.T[col, row].T\n # Things that return a sequence of values.\n else:\n return self.T[col, row]\n\n def nonzero(self):\n # CSC can't use _cs_matrix's .nonzero method because it\n # returns the indices sorted for self transposed.\n\n # Get row and col indices, from _cs_matrix.tocoo\n major_dim, minor_dim = self._swap(self.shape)\n minor_indices = self.indices\n major_indices = np.empty(len(minor_indices), dtype=self.indptr.dtype)\n sparsetools.expandptr(major_dim, self.indptr, major_indices)\n row, col = self._swap((major_indices, minor_indices))\n\n # Sort them to be in C-style order\n ind = np.lexsort((col, row))\n row = row[ind]\n col = col[ind]\n\n return row, col\n\n nonzero.__doc__ = _cs_matrix.nonzero.__doc__\n\n def getrow(self, i):\n \"\"\"Returns a copy of row i of the matrix, as a (1 x n)\n CSR matrix (row vector).\n \"\"\"\n # transpose to use CSR code\n # we convert to CSR to maintain compatibility with old impl.\n # in spmatrix.getrow()\n return self.T.getcol(i).T.tocsr()\n\n def getcol(self, i):\n \"\"\"Returns a copy of column i of the matrix, as a (m x 1)\n CSC matrix (column vector).\n \"\"\"\n # transpose to use CSR code\n return self.T.getrow(i).T\n\n # these functions are used by the parent class (_cs_matrix)\n # to remove redudancy between csc_matrix and csr_matrix\n def _swap(self,x):\n \"\"\"swap the members of x if this is a column-oriented matrix\n \"\"\"\n return (x[1],x[0])\n\n\ndef isspmatrix_csc(x):\n return isinstance(x, csc_matrix)\n", "import time\nfrom collections import defaultdict\n\nimport numpy as np\nfrom numpy.testing import Tester, TestCase\n\nimport scipy.optimize\nfrom scipy.optimize.optimize import rosen, rosen_der, rosen_hess\nimport test_functions as funcs\n\n\nclass _BenchOptimizers(object):\n \"\"\"a framework for benchmarking the optimizer\n \n Parameters\n ----------\n function_name : string\n fun : callable\n der : callable\n function that returns the derivative (jacobian, gradient) of fun\n hess : callable\n function that returns the hessian of fun\n minimizer_kwargs : kwargs\n additional keywords passed to the minimizer. e.g. tol, maxiter\n \"\"\"\n def __init__(self, function_name, fun, der=None, hess=None,\n **minimizer_kwargs):\n self.function_name = function_name\n self.fun = fun\n self.der = der\n self.hess = hess\n self.minimizer_kwargs = minimizer_kwargs\n if \"tol\" not in minimizer_kwargs:\n minimizer_kwargs[\"tol\"] = 1e-4\n \n self.results = []\n\n def reset(self):\n self.results = []\n\n def add_result(self, result, t, name):\n \"\"\"add a result to the list\"\"\"\n result.time = t\n result.name = name\n if not hasattr(result, \"njev\"):\n result.njev = 0\n if not hasattr(result, \"nhev\"):\n result.nhev = 0\n self.results.append(result)\n \n def print_results(self):\n \"\"\"print the current list of results\"\"\"\n results = self.average_results()\n results = sorted(results, key=lambda x: (x.nfail, x.mean_time))\n print(\"\")\n print(\"=========================================================\")\n print(\"Optimizer benchmark: %s\" % (self.function_name))\n print(\"dimensions: %d, extra kwargs: %s\" % (results[0].ndim, str(self.minimizer_kwargs)))\n print(\"averaged over %d starting configurations\" % (results[0].ntrials))\n print(\" Optimizer nfail nfev njev nhev time\")\n print(\"---------------------------------------------------------\")\n for res in results:\n print(\"%11s | %4d | %4d | %4d | %4d | %.6g\" % \n (res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time))\n \n def average_results(self):\n \"\"\"group the results by minimizer and average over the runs\"\"\"\n grouped_results = defaultdict(list)\n for res in self.results:\n grouped_results[res.name].append(res)\n \n averaged_results = dict()\n for name, result_list in grouped_results.items():\n newres = scipy.optimize.Result()\n newres.name = name\n newres.mean_nfev = np.mean([r.nfev for r in result_list])\n newres.mean_njev = np.mean([r.njev for r in result_list])\n newres.mean_nhev = np.mean([r.nhev for r in result_list])\n newres.mean_time = np.mean([r.time for r in result_list])\n newres.ntrials = len(result_list)\n newres.nfail = len([r for r in result_list if not r.success])\n try:\n newres.ndim = len(result_list[0].x)\n except TypeError:\n newres.ndim = 1\n averaged_results[name] = newres\n return averaged_results.values()\n \n def bench_run(self, x0, **minimizer_kwargs):\n \"\"\"do an optimization test starting at x0 for all the optimizers\"\"\"\n kwargs = self.minimizer_kwargs\n \n fonly_methods = [\"COBYLA\", 'Powell']\n for method in fonly_methods:\n t0 = time.time()\n res = scipy.optimize.minimize(self.fun, x0, method=method, \n **kwargs)\n t1 = time.time()\n self.add_result(res, t1-t0, method)\n \n \n gradient_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP']\n if self.der is not None:\n for method in gradient_methods:\n t0 = time.time()\n res = scipy.optimize.minimize(self.fun, x0, method=method, \n jac=self.der, **kwargs)\n t1 = time.time()\n self.add_result(res, t1-t0, method)\n\n hessian_methods = [\"Newton-CG\", 'dogleg', 'trust-ncg']\n if self.hess is not None:\n for method in hessian_methods:\n t0 = time.time()\n res = scipy.optimize.minimize(self.fun, x0, method=method, \n jac=self.der, hess=self.hess, \n **kwargs)\n t1 = time.time()\n self.add_result(res, t1-t0, method)\n\nclass BenchSmoothUnbounded(TestCase):\n \"\"\"Benchmark the optimizers with smooth, unbounded, functions\"\"\"\n def bench_rosenbrock(self):\n b = _BenchOptimizers(\"Rosenbrock function\",\n fun=rosen, der=rosen_der, hess=rosen_hess)\n for i in range(10):\n b.bench_run(np.random.uniform(-3,3,3))\n b.print_results()\n \n def bench_rosenbrock_tight(self):\n b = _BenchOptimizers(\"Rosenbrock function\",\n fun=rosen, der=rosen_der, hess=rosen_hess,\n tol=1e-8)\n for i in range(10):\n b.bench_run(np.random.uniform(-3,3,3))\n b.print_results()\n \n def bench_simple_quadratic(self):\n s = funcs.SimpleQuadratic()\n # print \"checking gradient\", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))\n b = _BenchOptimizers(\"simple quadratic function\",\n fun=s.fun, der=s.der, hess=s.hess)\n for i in range(10):\n b.bench_run(np.random.uniform(-2,2,3))\n b.print_results()\n \n def bench_asymetric_quadratic(self):\n s = funcs.AsymmetricQuadratic()\n # print \"checking gradient\", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))\n b = _BenchOptimizers(\"function sum(x**2) + x[0]\",\n fun=s.fun, der=s.der, hess=s.hess)\n for i in range(10):\n b.bench_run(np.random.uniform(-2,2,3))\n b.print_results()\n \n def bench_sin_1d(self):\n fun = lambda x: np.sin(x[0])\n der = lambda x: np.array([np.cos(x[0])])\n b = _BenchOptimizers(\"1d sin function\",\n fun=fun, der=der, hess=None)\n for i in range(10):\n b.bench_run(np.random.uniform(-2,2,1))\n b.print_results()\n \n def bench_booth(self):\n s = funcs.Booth()\n # print \"checking gradient\", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))\n b = _BenchOptimizers(\"Booth's function\",\n fun=s.fun, der=s.der, hess=None)\n for i in range(10):\n b.bench_run(np.random.uniform(0,10,2))\n b.print_results()\n \n def bench_beale(self):\n s = funcs.Beale()\n # print \"checking gradient\", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))\n b = _BenchOptimizers(\"Beale's function\",\n fun=s.fun, der=s.der, hess=None)\n for i in range(10):\n b.bench_run(np.random.uniform(0,10,2))\n b.print_results()\n \n def bench_LJ(self):\n s = funcs.LJ()\n # print \"checking gradient\", scipy.optimize.check_grad(s.get_energy, s.get_gradient, np.random.uniform(-2,2,3*4))\n natoms = 4\n b = _BenchOptimizers(\"%d atom Lennard Jones potential\" % (natoms),\n fun=s.get_energy, der=s.get_gradient, hess=None)\n for i in range(10):\n b.bench_run(np.random.uniform(-2,2,natoms*3))\n b.print_results()\n \n \n#def main():\n# bench_rosenbrock()\n# bench_simple_quadratic()\n# bench_asymetric_quadratic()\n# bench_sin_1d()\n# bench_booth()\n# bench_beale()\n# bench_LJ()\n\nif __name__ == \"__main__\":\n Tester().bench(extra_argv=dict())\n" ]
[ [ "numpy.empty", "numpy.lexsort", "scipy.lib.six.xrange" ], [ "numpy.cos", "numpy.sin", "numpy.mean", "numpy.random.uniform", "numpy.testing.Tester" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
otanan/STP
[ "85e6f6ff7bfe1bdd832dc5c2f32d0fdb084c55fa" ]
[ "stp/info.py" ]
[ "#!/usr/bin/env python3\n\"\"\"Entropy and information theory related calculations.\n\n**Author: Jonathan Delgado**\n\n\"\"\"\n\n######################## Imports ########################\n\n\nimport numpy as np\n\nimport stp\n\n\n######################## Helper functions ########################\n\n\ndef _eps_filter(x):\n \"\"\" Checks if the value is within machine-epsilon of zero and maps it to \n zero if it is the case. Useful for removing negative values in entropies that should otherwise be zero.\n \n Args:\n x (float): value to be checked.\n \n Returns:\n (float): x if the value is not within machine epsilon of zero, 0 otherwise.\n \n \"\"\"\n return x if not np.isclose(x, 0, atol=9*10E-15) else 0\n \n\n######################## Entropy calculations ########################\n\n\ndef entropy(p):\n \"\"\" Calculates the Shannon entropy for a marginal distribution.\n\n Args:\n p (np.ndarray): the marginal distribution.\n\n Returns:\n (float): the entropy of p\n\n \"\"\"\n # Since zeros do not contribute to the Shannon entropy by definition, we \n # ignore them to avoid any errors/warnings.\n p = p[p != 0]\n\n H = -np.dot(p, np.log(p))\n # Filter against machine epsilon\n return _eps_filter(H)\n\n\ndef delta_entropy(R, p):\n \"\"\" Calculates the discrete time change in entropy using the entropy of p \n evolved with R, minus the entropy of p.\n \n Args:\n R (np.ndarray): the transition matrix.\n\n p (np.ndarray): the marginal distribution.\n \n Returns:\n (float): the change in entropy\n \n \"\"\"\n return entropy(step(R, p)) - entropy(p)\n\n\ndef relative_entropy(p, q):\n \"\"\" Calculates the Kullback-Leibler divergence, which is nonnegative and \n vanishes if and only if the distributions coincide.\n \n Args:\n p, q (np.ndarray): the probability distributions.\n \n Returns:\n (float): the relative entropy.\n \n \"\"\"\n if p.shape[0] != q.shape[0]:\n print('Dimensions of vectors are not equal. Cannot find relative entropy.')\n sys.exit()\n\n # Any values where p is zero are defined to be zero and hence do not\n # contribute to the relative entropy\n # By masking q as well we automatically skip the values that were supposed\n # to vanish with p avoiding any misalignment issues\n # Note that by masking q only where p is zero doesn't remove\n # any mismatching meaning it will still be infinite (as it should be)\n # in the case where q has a zero that p does not.\n p_filtered = p[p != 0]\n log_ratio = np.log(p_filtered / q[p != 0])\n\n return np.dot(p_filtered, log_ratio)\n\n\ndef entropy_production(matrix, p, discrete=True):\n \"\"\" Calculates the entropy production for either discrete or continuous \n time.\n \n Args:\n matrix (np.ndarray): the stochastic matrix, either a discrete time transition matrix or a continuous time rate matrix.\n\n p (np.ndarray): the marginal distribution\n\n Kwargs:\n discrete (bool): True if we are calculating the discrete time entropy production (nats), False if we are calculating it in continuous time (nats/time).\n \n Returns:\n (float/np.inf): the entropy production\n \n \"\"\"\n log_product = matrix * np.log( matrix / matrix.T )\n # The entropy term only exists in the case of discrete time\n # it vanishes when we calculate the continuous time EP,\n # by multiplying by the boolean we include it only when\n # necessary\n EP = np.dot(log_product.sum(axis=0), p) - (entropy(p) * discrete) \\\n - np.dot(stp.step(matrix, p), np.log(p))\n return EP\n\n\ndef entropy_flow(R, p):\n \"\"\" Calculates the discrete time entropy flow. This has not been \n generalized to handle the continuous time entropy flow yet.\n \n Args:\n R (np.ndarray): the discrete time transition matrix\n \n p (np.ndarray): the marginal distribution\n \n Returns:\n (float): the entropy flow\n \n \"\"\"\n # Vectorized calculation\n log_product = R * np.log( R / R.T )\n p_step = step(R, p)\n EF = -np.dot(log_product.sum(axis=0), p) + entropy(p_step) \\\n + np.dot(p_step, np.log(p))\n return EF\n\n\n######################## Entropy rates ########################\n\n\ndef entropy_rate(R):\n \"\"\" Calculates the asymptotic entropy rate for the provided transition \n matrix. If the matrix is time-inhomogeneous then we return a function that generates the entropy_rate as a function of n by calculating the systems limiting distribution for each n.\n \n Args:\n R (np.ndarray/function): the transition matrix.\n \n Returns:\n (float/function): the entropy velocity.\n \n \"\"\"\n if callable(R):\n return lambda n : entropy_rate(R(n))\n\n pst = stp.get_stationary_distribution(R, discrete=True)\n RProduct = (R * np.log(R)).sum(axis=0)\n\n return -np.dot(pst, RProduct)\n\n\n######################## Information Space Objects ########################\n\n\nclass InfoSpace:\n \"\"\" Information space. Holds collections of paths that traverse states in a \n state space as a matrix, and the probability of each of those paths. \n \n Provides functionality on this path space such as providing path entropies.\n \n Attributes:\n paths: the matrix of paths.\n\n probabilities: a list of probabilities each path.\n\n num_paths: the number of paths considered.\n\n path_length: the length of the paths considered.\n\n probabilities: a matrix where the (i,j)th element is the probability of observing the first j states of the ith path.\n\n entropies: a list of path entropies for each path\n\n total_probability: the sum of the probabilities of each path.\n\n \"\"\"\n\n def __init__(self, paths, p_matrix):\n \"\"\" Initializes the InfoSpace object.\n \n Args:\n paths (np.ndarray): a matrix of paths where the (i,j)th element corresponds to the jth symbol of the ith path.\n\n p_matrix (np.ndarray): a matrix of probabilities where the (i,j)th element corresponds to the probability of observing the ith path for the first j+1 (zero-indexing) symbols.\n \n \"\"\"\n self._paths = np.array(paths)\n \n # Matrix of probabilities corresponding to the probability for the path\n # at each moment.\n self._p_matrix = np.array(p_matrix)\n \n if self._p_matrix.size != 0:\n # The information space is not empty\n self._probabilities = self._p_matrix[:, -1]\n else:\n # There is zero probability here.\n self._probabilities = 0\n\n\n #------------- Properties -------------#\n\n\n @property\n def paths(self):\n return self._paths\n\n\n @property\n def num_paths(self):\n return self.paths.shape[0]\n\n\n @property\n def path_length(self):\n return self.paths.shape[1]\n \n\n @property\n def probabilities(self):\n return self._probabilities\n\n\n @property\n def entropies(self):\n \"\"\" Returns a list of path entropies for each corresponding path \n probability.\n\n \"\"\"\n try:\n return self._entropies\n except AttributeError:\n # It's never been calculated before\n self._entropies = -np.log(self.probabilities)\n\n return self._entropies\n\n\n @property\n def total_probability(self):\n try:\n return self.probabilities.sum()\n except AttributeError:\n # Space is empty\n return 0\n \n\n #------------- Static methods -------------#\n\n\n @staticmethod\n def shorten(infospace, path_length, return_index=False):\n \"\"\" Takes an Information Space and shortens it. Since unique paths of \n length n, may be degenerate when truncated to paths of length m < n, we need to check for degeneracies and filter them out in both paths and probabilities.\n \n Args:\n infospace (InfoSpace): the information space to shorten.\n\n path_length (int): the path length the information space should be shortened to.\n\n Kwargs:\n return_index (bool): returns the indices of the non-degenerate paths for the given path length using the original matrix. Useful for filtering other quantities of interest that may not be attached to this object. \n \n Returns:\n (InfoSpace): the shortened InfoSpace.\n \n \"\"\"\n if path_length < 1:\n raise ValueError(f'Invalid path length: {path_length}. Path length must be an integer greater than 0.')\n elif path_length > infospace.path_length:\n raise ValueError(f'Cannot shorten an InformationSpace from length: {infospace.path_length} -> {path_length}.')\n\n if infospace.paths.size == 0:\n # This is an empty information space\n return infospace if not return_index else (infospace, [])\n\n # Truncate the path matrix\n paths = infospace.paths[:, :path_length]\n # Return index will provide the path indices of the non-degenerate paths\n _, indices = np.unique(paths, axis=0, return_index=True) \n # Sort the indices\n indices = sorted(indices)\n # Filter out the paths. Not taken from np.unique to ensure the correct\n # ordering.\n paths = paths[indices, :]\n # Truncate the probability matrix\n p_matrix = infospace._p_matrix[:, :path_length]\n # Filter the probabilities matrix\n p_matrix = p_matrix[indices, :]\n\n infospace = InfoSpace(paths, p_matrix)\n return infospace if not return_index else infospace, indices\n\n\nclass PartitionedInfoSpace(InfoSpace):\n \"\"\" Partitioned Information Space. Constructs a typical set on an \n information space to partition it into a typical information space and an atypical one. \n\n Holds path probabilities, typical paths, atypical paths, atypical path probabilities and more. This object will use a provided (often sampled) path space to partition the space into a collection of typical and atypical paths depending on the dynamics provided. Will also track other quantities of interest such as the upper and lower bounds on the path probabilities required for the paths to be considered typical.\n \n Attributes:\n paths: the matrix of paths.\n\n probabilities: a list of probabilities each path.\n\n num_paths: the number of paths considered.\n\n path_length: the length of the paths considered.\n\n probabilities: a matrix where the (i,j)th element is the probability of observing the first j states of the ith path.\n\n entropies: a list of path entropies for each path.\n\n entropy_rates: a list of the entropy rates for each various path length. This will be the center of the epsilon-neighborhood for path entropies to qualify paths as typical for.\n\n epsilon: the widths of the neighborhood used for paths to be considered typical for each path length.\n\n upper/lower: the upper/lower bounds as measured in nats. This means that a path is typical if and only if its path entropy rate is within these bounds.\n\n typicalities: a matrix where the (i,j)th element is a boolean determining whether the ith path is typical after j+1 steps.\n\n ts: the typical set.\n\n ats: the atypical set.\n\n \"\"\"\n\n def __init__(self, entropy_rates, epsilon, paths=None, p_matrix=None, typical_space=None, atypical_space=None):\n \"\"\" Generates the PartitionedInfoSpace.\n \n Args:\n entropy_rates (np.ndarray): a list of the entropy rates for each various path length. This will be the center of the epsilon-neighborhood for path entropies to qualify paths as typical for.\n\n epsilon (np.ndarray): the widths of the neighborhood used for paths to be considered typical for each path length.\n\n Kwargs:\n paths (np.ndarray/None): the entire sampled path space, the union of the typical and atypical spaces. If not provided these spaces will be merged to generate it.\n\n p_matrix (np.ndarray/None): the entire matrix of probabilities for each path and each path length. If not provided, this will be generated by merging the p_matrix of the typical and atypical spaces.\n\n typical_space (InfoSpace/None): the typical set on this space. If None, partitions the provided path space.\n\n atypical_space (InfoSpace): the atypical set on this space. If None, partitions the provided path space.\n \n \"\"\"\n # Bool if the space simply needs to be partitioned\n must_partition = (paths is None) or (p_matrix is None)\n # Bool if the space simply needs to be merged since it's already been \n # partitioned into a typical and atypical space\n must_union = (typical_space is None) or (atypical_space is None)\n\n if must_partition and must_union:\n # We need either the paths AND the p_matrix or the tupical/atypical \n # spaces to partition/union the spaces respectively.\n raise TypeError('In sufficient information provided to partition/union the Information Space. We need either paths with their probabilities or the already partitioned spaces.')\n\n\n if must_partition:\n # Partition the paths and probability matrix into a typical and\n # atypical space\n\n # Need to generate the upper/lower bounds for the partitioning\n # of the spaces\n self._lower = entropy_rates - epsilon\n self._upper = entropy_rates + epsilon\n\n ts_paths = []; ts_p_matrix = []\n ats_paths = []; ats_p_matrix = []\n\n for path, path_index in enumerate(paths):\n path_prob = p_matrix[path_index]\n # The path entropy rate for direct comparison with the\n # upper/lower bounds\n path_entropy_rate = -np.log(path_prob[-1]) / path_length\n\n is_typical = (\n (self.lower[-1] <= path_entropy_rate)\n and (path_entropy_rate <= self._upper)\n )\n\n if is_typical:\n ts_paths.append(path)\n ts_p_matrix.append(path_prob)\n else:\n ats_paths.append(path)\n ats_p_matrix.append(path_prob)\n\n typical_space = InfoSpace(ts_paths, ts_p_matrix)\n atypical_space = InfoSpace(ats_paths, ats_p_matrix)\n\n elif must_union:\n # Union the path data\n ts_empty = (typical_space.paths.size == 0)\n ats_empty = (atypical_space.paths.size == 0)\n\n if not ts_empty and not ats_empty:\n # Both are nonempty\n paths = np.vstack( (typical_space.paths, atypical_space.paths) )\n p_matrix = np.vstack(\n (typical_space._p_matrix, atypical_space._p_matrix)\n )\n elif ts_empty:\n # Only the typical_space is empty\n paths = atypical_space.paths\n p_matrix = atypical_space._p_matrix\n else:\n # Only the atypical_space is empty\n paths = typical_space.paths\n p_matrix = typical_space._p_matrix\n\n ### Storing properties ###\n self._paths = paths\n self._p_matrix = p_matrix\n\n self._probabilities = self._p_matrix[:, -1]\n\n self._entropy_rates = entropy_rates\n\n # Generalize the epsilon to a path_length dependent epsilon for\n # potential generalizations in child classes.\n if isinstance(epsilon, list):\n epsilon = np.array(epsilon)\n if not isinstance(epsilon, np.ndarray):\n # We were only provided a float\n epsilon = np.full(self.path_length, epsilon)\n self._epsilon = epsilon\n\n self._ts = typical_space\n self._ats = atypical_space\n\n\n #------------- Properties -------------#\n\n\n @property\n def entropy_rates(self):\n return self._entropy_rates\n \n\n @property\n def epsilon(self):\n return self._epsilon\n\n\n @property\n def upper(self):\n try:\n return self._upper\n except AttributeError:\n # It's never been calculated before\n self._upper = self.entropy_rates + self.epsilon \n\n return self._upper\n\n\n @property\n def lower(self):\n try:\n return self._lower\n except AttributeError:\n # It's never been calculated before.\n self._lower = self.entropy_rates - self.epsilon\n\n return self._lower\n\n\n @property\n def typicalities(self):\n \"\"\" Returns the matrix of typicalities. \"\"\"\n try:\n return self._typicalities\n except AttributeError:\n # It's never been calculated before\n typicalities = []\n ns = np.arange(1, self.path_length + 1)\n\n path_entropy_rates = -np.log(self._p_matrix) / ns\n\n self._typicalities = (\n (self.lower <= path_entropy_rates)\n & (path_entropy_rates <= self.upper)\n )\n\n return self._typicalities\n\n\n @property\n def ats(self):\n return self._ats\n\n\n @property\n def ts(self):\n return self._ts\n \n \n #------------- Static methods -------------#\n\n\n @staticmethod\n def shorten(pinfospace, path_length, return_index=False):\n \"\"\" Takes a PartitionedInformationSpace and shortens it. Since unique \n paths of length n, may be degenerate when truncated to paths of length m < n, we need to check for degeneracies and filter them out in both paths and probabilities.\n \n Args:\n pinfospace (PartitionedInfoSpace): the partitioned information space to shorten.\n\n path_length (int): the path length the information space should be shortened to. \n\n Kwargs:\n return_index (bool): returns the indices of the non-degenerate paths for the given path length using the original matrix. Useful for filtering other quantities of interest that may not be attached to this object. \n \n Returns:\n (PartitionedInfoSpace): the shortened PartitionedInfoSpace.\n \n \"\"\"\n # Hold the current information space to access properties\n old_pinfospace = pinfospace\n # Call parent method\n # Paths and p_matrix will be handled here along with any other \n # properties shared with parent. Sorted indices of non-degenerate \n # paths will be calculated here too.\n pinfospace, indices = InfoSpace.shorten(old_pinfospace, path_length, return_index=True)\n \n # Finish the rest of this object's specific properties\n\n # Truncate the entropy_rates\n entropy_rates = old_pinfospace.entropy_rates[:path_length]\n\n # Truncate the epsilon\n epsilon = old_pinfospace.epsilon[:path_length]\n\n # Truncate the typicalities matrix\n # Necessary to re-partition the space.\n # Filter out the typicalities matrix\n typicalities = old_pinfospace.typicalities[indices, :path_length]\n\n ### Partitioning ###\n ts_paths, ts_p_matrix = [], []\n ats_paths, ats_p_matrix = [], []\n\n paths = pinfospace.paths\n p_matrix = pinfospace._p_matrix\n for path_index, is_typical in enumerate(typicalities[:, -1]):\n path = paths[path_index]\n probs = p_matrix[path_index]\n\n if is_typical:\n ts_paths.append(path)\n ts_p_matrix.append(probs)\n else:\n ats_paths.append(path)\n ats_p_matrix.append(probs)\n\n # The partitioned spaces\n ts = InfoSpace(ts_paths, ts_p_matrix)\n ats = InfoSpace(ats_paths, ats_p_matrix)\n\n pinfospace = PartitionedInfoSpace(entropy_rates=entropy_rates, epsilon=epsilon, paths=paths, p_matrix=p_matrix, typical_space=ts, atypical_space=ats)\n \n # Save the pre-generated property\n pinfospace._typicalities = typicalities\n\n return pinfospace if not return_index else pinfospace, indices\n\n\n @staticmethod\n def partition_space(R, p, paths, epsilon=0.5, return_p=False):\n \"\"\" Partitions a path space using the dynamics provided.\n\n Args:\n R (np.ndarray/function): the transition matrix, time-dependent if provided as a function.\n\n p (np.ndarray): the initial marginal distribution.\n\n paths (np.ndarray): the portion of the path space to use.\n \n Kwargs:\n epsilon (float/np.ndarray): the radius/radii of the epsilon neighborhood to consider paths to be typical within.\n\n return_p (bool): False, return only the PartitionedInfoSpace, True returns both the PartitionedInfoSpace and a list of the marginal vs time.\n \n Returns:\n (ParitionedInfoSpace/2-tuple): the PartitionedInfoSpace (PIS) or the PIS and a list of the marginal versus observation step if return_p is True.\n\n \"\"\"\n\n #------------- Data preparation -------------#\n\n # Convert the transition matrix to add time-dependence as a constant \n # matrix if a constant matrix was provided\n if not callable(R):\n # Not being saved as an attribute since this is not easily\n # recoverable by being saved to a file.\n # Emphasize saving properties that can be saved/loaded.\n oldR = R\n R = lambda n : oldR\n\n num_paths, path_length = paths.shape\n\n p_matrix = np.zeros(paths.shape)\n # Initialize the marginal distribution data\n for x, path in enumerate(paths):\n # Just equal to the initial marginal\n p_matrix[x, 0] = p[path[0]]\n \n # Used for the bounds\n entropy_rates = np.array([\n entropy_rate(R(i))\n for i in range(path_length)\n ])\n\n # The marginal versus time\n if return_p: p_vs_time = [p]\n\n #------------- Data gathering -------------#\n\n # bar = gui.ProgressBar(path_length * num_paths, width=300, title='Gathering data...')\n\n ### Quantities versus time ###\n for current_path_length in range(2, path_length + 1):\n # The data index\n i = current_path_length - 1\n # Since the marginals are zero-indexed as are the paths\n step_index = current_path_length - 2\n\n currentR = R(current_path_length - 1)\n # Propagate the marginal one step and save it separately\n # for quantities like the temporal coarse graining term\n pstep = stp.step(currentR, p)\n\n ### Path probability calculations ###\n for x, path in enumerate(paths):\n\n current_state = path[step_index]\n jump_state = path[step_index + 1]\n\n # Forward calculations\n # Recursive calculation to save time\n last_joint = p_matrix[x, i - 1]\n jump_prob = currentR[jump_state, current_state]\n p_matrix[x, i] = last_joint * jump_prob\n\n # If updated in each iteration, slows down the simulation \n # drastically\n # bar.update(amount=num_paths)\n\n if return_p: p_vs_time.append(pstep)\n # Finished data gathering for this iteration, propagate marginal\n # forward in time\n p = pstep\n\n # bar.finish()\n\n ### Partitioning ###\n upper = entropy_rates + epsilon\n lower = entropy_rates - epsilon\n\n ts_paths, ts_p_matrix = [], []\n ats_paths, ats_p_matrix = [], []\n\n path_entropy_rates = -np.log(p_matrix[:, -1]) / path_length\n\n # Identify the paths that are typical and atypical\n for path_index, path_entropy_rate in enumerate(path_entropy_rates):\n # Can't create typicality matrix since partitioning it will\n # break the ordering\n # Determines whether this path is ultimately typical\n is_typical = (\n (lower[-1] <= path_entropy_rate)\n and (path_entropy_rate <= upper[-1])\n )\n\n probs = p_matrix[path_index]\n\n if is_typical:\n ts_paths.append(path)\n ts_p_matrix.append(probs)\n else:\n ats_paths.append(path)\n ats_p_matrix.append(probs)\n\n # The partitioned spaces\n ts = InfoSpace(ts_paths, ts_p_matrix)\n ats = InfoSpace(ats_paths, ats_p_matrix)\n\n pinfospace = PartitionedInfoSpace(\n entropy_rates=entropy_rates,\n epsilon=epsilon,\n paths=paths,\n p_matrix=p_matrix,\n typical_space=ts,\n atypical_space=ats\n )\n\n # Set pre-calculated properties\n pinfospace._upper = upper\n pinfospace._lower = lower\n\n return (pinfospace, p_vs_time) if return_p else pinfospace\n\n\n######################## Entry ########################\n\n\ndef main():\n print('info.py')\n \n ### Testing ###\n p = stp.rand_p(3)\n R = stp.self_assembly_transition_matrix()\n paths = stp.complete_path_space(3, 4)\n pinfospace = PartitionedInfoSpace.partition_space(R, p, paths)\n print( f'pinfospace.total_probability: {pinfospace.total_probability}' )\n print(pinfospace.ats.num_paths)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.unique", "numpy.arange", "numpy.full", "numpy.array", "numpy.zeros", "numpy.vstack", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
I--P/scipy
[ "ad02ef083824d195f04f267b141716e0f047197f" ]
[ "scipy/stats/_distn_infrastructure.py" ]
[ "#\n# Author: Travis Oliphant 2002-2011 with contributions from\n# SciPy Developers 2004-2011\n#\nfrom __future__ import division, print_function, absolute_import\n\nfrom scipy._lib.six import string_types, exec_\nfrom scipy._lib._util import getargspec_no_self as _getargspec\n\nimport sys\nimport keyword\nimport re\nimport types\nimport warnings\n\nfrom scipy.misc import doccer\nfrom ._distr_params import distcont, distdiscrete\nfrom scipy._lib._util import check_random_state, _lazywhere\nfrom scipy._lib._util import _valarray as valarray\n\nfrom scipy.special import (comb, chndtr, gammaln, entr, kl_div, xlogy, ive)\n\n# for root finding for discrete distribution ppf, and max likelihood estimation\nfrom scipy import optimize\n\n# for functions of continuous distributions (e.g. moments, entropy, cdf)\nfrom scipy import integrate\n\n# to approximate the pdf of a continuous distribution given its cdf\nfrom scipy.misc import derivative\n\nfrom numpy import (arange, putmask, ravel, take, ones, shape, ndarray,\n product, reshape, zeros, floor, logical_and, log, sqrt, exp)\n\nfrom numpy import (place, argsort, argmax, vectorize,\n asarray, nan, inf, isinf, NINF, empty)\n\nimport numpy as np\n\nfrom ._constants import _EPS, _XMAX\n\ntry:\n from new import instancemethod\nexcept ImportError:\n # Python 3\n def instancemethod(func, obj, cls):\n return types.MethodType(func, obj)\n\n\n# These are the docstring parts used for substitution in specific\n# distribution docstrings\n\ndocheaders = {'methods': \"\"\"\\nMethods\\n-------\\n\"\"\",\n 'notes': \"\"\"\\nNotes\\n-----\\n\"\"\",\n 'examples': \"\"\"\\nExamples\\n--------\\n\"\"\"}\n\n_doc_rvs = \"\"\"\\\n``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``\n Random variates.\n\"\"\"\n_doc_pdf = \"\"\"\\\n``pdf(x, %(shapes)s, loc=0, scale=1)``\n Probability density function.\n\"\"\"\n_doc_logpdf = \"\"\"\\\n``logpdf(x, %(shapes)s, loc=0, scale=1)``\n Log of the probability density function.\n\"\"\"\n_doc_pmf = \"\"\"\\\n``pmf(x, %(shapes)s, loc=0, scale=1)``\n Probability mass function.\n\"\"\"\n_doc_logpmf = \"\"\"\\\n``logpmf(x, %(shapes)s, loc=0, scale=1)``\n Log of the probability mass function.\n\"\"\"\n_doc_cdf = \"\"\"\\\n``cdf(x, %(shapes)s, loc=0, scale=1)``\n Cumulative density function.\n\"\"\"\n_doc_logcdf = \"\"\"\\\n``logcdf(x, %(shapes)s, loc=0, scale=1)``\n Log of the cumulative density function.\n\"\"\"\n_doc_sf = \"\"\"\\\n``sf(x, %(shapes)s, loc=0, scale=1)``\n Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).\n\"\"\"\n_doc_logsf = \"\"\"\\\n``logsf(x, %(shapes)s, loc=0, scale=1)``\n Log of the survival function.\n\"\"\"\n_doc_ppf = \"\"\"\\\n``ppf(q, %(shapes)s, loc=0, scale=1)``\n Percent point function (inverse of ``cdf`` --- percentiles).\n\"\"\"\n_doc_isf = \"\"\"\\\n``isf(q, %(shapes)s, loc=0, scale=1)``\n Inverse survival function (inverse of ``sf``).\n\"\"\"\n_doc_moment = \"\"\"\\\n``moment(n, %(shapes)s, loc=0, scale=1)``\n Non-central moment of order n\n\"\"\"\n_doc_stats = \"\"\"\\\n``stats(%(shapes)s, loc=0, scale=1, moments='mv')``\n Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').\n\"\"\"\n_doc_entropy = \"\"\"\\\n``entropy(%(shapes)s, loc=0, scale=1)``\n (Differential) entropy of the RV.\n\"\"\"\n_doc_fit = \"\"\"\\\n``fit(data, %(shapes)s, loc=0, scale=1)``\n Parameter estimates for generic data.\n\"\"\"\n_doc_expect = \"\"\"\\\n``expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``\n Expected value of a function (of one argument) with respect to the distribution.\n\"\"\"\n_doc_expect_discrete = \"\"\"\\\n``expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)``\n Expected value of a function (of one argument) with respect to the distribution.\n\"\"\"\n_doc_median = \"\"\"\\\n``median(%(shapes)s, loc=0, scale=1)``\n Median of the distribution.\n\"\"\"\n_doc_mean = \"\"\"\\\n``mean(%(shapes)s, loc=0, scale=1)``\n Mean of the distribution.\n\"\"\"\n_doc_var = \"\"\"\\\n``var(%(shapes)s, loc=0, scale=1)``\n Variance of the distribution.\n\"\"\"\n_doc_std = \"\"\"\\\n``std(%(shapes)s, loc=0, scale=1)``\n Standard deviation of the distribution.\n\"\"\"\n_doc_interval = \"\"\"\\\n``interval(alpha, %(shapes)s, loc=0, scale=1)``\n Endpoints of the range that contains alpha percent of the distribution\n\"\"\"\n_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,\n _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,\n _doc_logsf, _doc_ppf, _doc_isf, _doc_moment,\n _doc_stats, _doc_entropy, _doc_fit,\n _doc_expect, _doc_median,\n _doc_mean, _doc_var, _doc_std, _doc_interval])\n\n_doc_default_longsummary = \"\"\"\\\nAs an instance of the `rv_continuous` class, `%(name)s` object inherits from it\na collection of generic methods (see below for the full list),\nand completes them with details specific for this particular distribution.\n\"\"\"\n\n_doc_default_frozen_note = \"\"\"\nAlternatively, the object may be called (as a function) to fix the shape,\nlocation, and scale parameters returning a \"frozen\" continuous RV object:\n\nrv = %(name)s(%(shapes)s, loc=0, scale=1)\n - Frozen RV object with the same methods but holding the given shape,\n location, and scale fixed.\n\"\"\"\n_doc_default_example = \"\"\"\\\nExamples\n--------\n>>> from scipy.stats import %(name)s\n>>> import matplotlib.pyplot as plt\n>>> fig, ax = plt.subplots(1, 1)\n\nCalculate a few first moments:\n\n%(set_vals_stmt)s\n>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')\n\nDisplay the probability density function (``pdf``):\n\n>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),\n... %(name)s.ppf(0.99, %(shapes)s), 100)\n>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),\n... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')\n\nAlternatively, the distribution object can be called (as a function)\nto fix the shape, location and scale parameters. This returns a \"frozen\"\nRV object holding the given parameters fixed.\n\nFreeze the distribution and display the frozen ``pdf``:\n\n>>> rv = %(name)s(%(shapes)s)\n>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')\n\nCheck accuracy of ``cdf`` and ``ppf``:\n\n>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)\n>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))\nTrue\n\nGenerate random numbers:\n\n>>> r = %(name)s.rvs(%(shapes)s, size=1000)\n\nAnd compare the histogram:\n\n>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)\n>>> ax.legend(loc='best', frameon=False)\n>>> plt.show()\n\n\"\"\"\n\n_doc_default_locscale = \"\"\"\\\nThe probability density above is defined in the \"standardized\" form. To shift\nand/or scale the distribution use the ``loc`` and ``scale`` parameters.\nSpecifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically\nequivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with\n``y = (x - loc) / scale``.\n\"\"\"\n\n_doc_default = ''.join([_doc_default_longsummary,\n _doc_allmethods,\n '\\n',\n _doc_default_example])\n\n_doc_default_before_notes = ''.join([_doc_default_longsummary,\n _doc_allmethods])\n\ndocdict = {\n 'rvs': _doc_rvs,\n 'pdf': _doc_pdf,\n 'logpdf': _doc_logpdf,\n 'cdf': _doc_cdf,\n 'logcdf': _doc_logcdf,\n 'sf': _doc_sf,\n 'logsf': _doc_logsf,\n 'ppf': _doc_ppf,\n 'isf': _doc_isf,\n 'stats': _doc_stats,\n 'entropy': _doc_entropy,\n 'fit': _doc_fit,\n 'moment': _doc_moment,\n 'expect': _doc_expect,\n 'interval': _doc_interval,\n 'mean': _doc_mean,\n 'std': _doc_std,\n 'var': _doc_var,\n 'median': _doc_median,\n 'allmethods': _doc_allmethods,\n 'longsummary': _doc_default_longsummary,\n 'frozennote': _doc_default_frozen_note,\n 'example': _doc_default_example,\n 'default': _doc_default,\n 'before_notes': _doc_default_before_notes,\n 'after_notes': _doc_default_locscale\n}\n\n# Reuse common content between continuous and discrete docs, change some\n# minor bits.\ndocdict_discrete = docdict.copy()\n\ndocdict_discrete['pmf'] = _doc_pmf\ndocdict_discrete['logpmf'] = _doc_logpmf\ndocdict_discrete['expect'] = _doc_expect_discrete\n_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',\n 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',\n 'mean', 'var', 'std', 'interval']\nfor obj in _doc_disc_methods:\n docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')\ndocdict_discrete.pop('pdf')\ndocdict_discrete.pop('logpdf')\n\n_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])\ndocdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods\n\ndocdict_discrete['longsummary'] = _doc_default_longsummary.replace(\n 'rv_continuous', 'rv_discrete')\n\n_doc_default_frozen_note = \"\"\"\nAlternatively, the object may be called (as a function) to fix the shape and\nlocation parameters returning a \"frozen\" discrete RV object:\n\nrv = %(name)s(%(shapes)s, loc=0)\n - Frozen RV object with the same methods but holding the given shape and\n location fixed.\n\"\"\"\ndocdict_discrete['frozennote'] = _doc_default_frozen_note\n\n_doc_default_discrete_example = \"\"\"\\\nExamples\n--------\n>>> from scipy.stats import %(name)s\n>>> import matplotlib.pyplot as plt\n>>> fig, ax = plt.subplots(1, 1)\n\nCalculate a few first moments:\n\n%(set_vals_stmt)s\n>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')\n\nDisplay the probability mass function (``pmf``):\n\n>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),\n... %(name)s.ppf(0.99, %(shapes)s))\n>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')\n>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)\n\nAlternatively, the distribution object can be called (as a function)\nto fix the shape and location. This returns a \"frozen\" RV object holding\nthe given parameters fixed.\n\nFreeze the distribution and display the frozen ``pmf``:\n\n>>> rv = %(name)s(%(shapes)s)\n>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,\n... label='frozen pmf')\n>>> ax.legend(loc='best', frameon=False)\n>>> plt.show()\n\nCheck accuracy of ``cdf`` and ``ppf``:\n\n>>> prob = %(name)s.cdf(x, %(shapes)s)\n>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))\nTrue\n\nGenerate random numbers:\n\n>>> r = %(name)s.rvs(%(shapes)s, size=1000)\n\"\"\"\n\n\n_doc_default_discrete_locscale = \"\"\"\\\nThe probability mass function above is defined in the \"standardized\" form.\nTo shift distribution use the ``loc`` parameter.\nSpecifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically\nequivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.\n\"\"\"\n\ndocdict_discrete['example'] = _doc_default_discrete_example\ndocdict_discrete['after_notes'] = _doc_default_discrete_locscale\n\n_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],\n docdict_discrete['allmethods']])\ndocdict_discrete['before_notes'] = _doc_default_before_notes\n\n_doc_default_disc = ''.join([docdict_discrete['longsummary'],\n docdict_discrete['allmethods'],\n docdict_discrete['frozennote'],\n docdict_discrete['example']])\ndocdict_discrete['default'] = _doc_default_disc\n\n# clean up all the separate docstring elements, we do not need them anymore\nfor obj in [s for s in dir() if s.startswith('_doc_')]:\n exec('del ' + obj)\ndel obj\ntry:\n del s\nexcept NameError:\n # in Python 3, loop variables are not visible after the loop\n pass\n\n\ndef _moment(data, n, mu=None):\n if mu is None:\n mu = data.mean()\n return ((data - mu)**n).mean()\n\n\ndef _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):\n if (n == 0):\n return 1.0\n elif (n == 1):\n if mu is None:\n val = moment_func(1, *args)\n else:\n val = mu\n elif (n == 2):\n if mu2 is None or mu is None:\n val = moment_func(2, *args)\n else:\n val = mu2 + mu*mu\n elif (n == 3):\n if g1 is None or mu2 is None or mu is None:\n val = moment_func(3, *args)\n else:\n mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment\n val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment\n elif (n == 4):\n if g1 is None or g2 is None or mu2 is None or mu is None:\n val = moment_func(4, *args)\n else:\n mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment\n mu3 = g1*np.power(mu2, 1.5) # 3rd central moment\n val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu\n else:\n val = moment_func(n, *args)\n\n return val\n\n\ndef _skew(data):\n \"\"\"\n skew is third central moment / variance**(1.5)\n \"\"\"\n data = np.ravel(data)\n mu = data.mean()\n m2 = ((data - mu)**2).mean()\n m3 = ((data - mu)**3).mean()\n return m3 / np.power(m2, 1.5)\n\n\ndef _kurtosis(data):\n \"\"\"\n kurtosis is fourth central moment / variance**2 - 3\n \"\"\"\n data = np.ravel(data)\n mu = data.mean()\n m2 = ((data - mu)**2).mean()\n m4 = ((data - mu)**4).mean()\n return m4 / m2**2 - 3\n\n\n# Frozen RV class\nclass rv_frozen(object):\n\n def __init__(self, dist, *args, **kwds):\n self.args = args\n self.kwds = kwds\n\n # create a new instance\n self.dist = dist.__class__(**dist._updated_ctor_param())\n\n # a, b may be set in _argcheck, depending on *args, **kwds. Ouch.\n shapes, _, _ = self.dist._parse_args(*args, **kwds)\n self.dist._argcheck(*shapes)\n self.a, self.b = self.dist.a, self.dist.b\n\n @property\n def random_state(self):\n return self.dist._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self.dist._random_state = check_random_state(seed)\n\n def pdf(self, x): # raises AttributeError in frozen discrete distribution\n return self.dist.pdf(x, *self.args, **self.kwds)\n\n def logpdf(self, x):\n return self.dist.logpdf(x, *self.args, **self.kwds)\n\n def cdf(self, x):\n return self.dist.cdf(x, *self.args, **self.kwds)\n\n def logcdf(self, x):\n return self.dist.logcdf(x, *self.args, **self.kwds)\n\n def ppf(self, q):\n return self.dist.ppf(q, *self.args, **self.kwds)\n\n def isf(self, q):\n return self.dist.isf(q, *self.args, **self.kwds)\n\n def rvs(self, size=None, random_state=None):\n kwds = self.kwds.copy()\n kwds.update({'size': size, 'random_state': random_state})\n return self.dist.rvs(*self.args, **kwds)\n\n def sf(self, x):\n return self.dist.sf(x, *self.args, **self.kwds)\n\n def logsf(self, x):\n return self.dist.logsf(x, *self.args, **self.kwds)\n\n def stats(self, moments='mv'):\n kwds = self.kwds.copy()\n kwds.update({'moments': moments})\n return self.dist.stats(*self.args, **kwds)\n\n def median(self):\n return self.dist.median(*self.args, **self.kwds)\n\n def mean(self):\n return self.dist.mean(*self.args, **self.kwds)\n\n def var(self):\n return self.dist.var(*self.args, **self.kwds)\n\n def std(self):\n return self.dist.std(*self.args, **self.kwds)\n\n def moment(self, n):\n return self.dist.moment(n, *self.args, **self.kwds)\n\n def entropy(self):\n return self.dist.entropy(*self.args, **self.kwds)\n\n def pmf(self, k):\n return self.dist.pmf(k, *self.args, **self.kwds)\n\n def logpmf(self, k):\n return self.dist.logpmf(k, *self.args, **self.kwds)\n\n def interval(self, alpha):\n return self.dist.interval(alpha, *self.args, **self.kwds)\n\n def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):\n # expect method only accepts shape parameters as positional args\n # hence convert self.args, self.kwds, also loc/scale\n # See the .expect method docstrings for the meaning of\n # other parameters.\n a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)\n if isinstance(self.dist, rv_discrete):\n return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)\n else:\n return self.dist.expect(func, a, loc, scale, lb, ub,\n conditional, **kwds)\n\n\n# This should be rewritten\ndef argsreduce(cond, *args):\n \"\"\"Return the sequence of ravel(args[i]) where ravel(condition) is\n True in 1D.\n\n Examples\n --------\n >>> import numpy as np\n >>> rand = np.random.random_sample\n >>> A = rand((4, 5))\n >>> B = 2\n >>> C = rand((1, 5))\n >>> cond = np.ones(A.shape)\n >>> [A1, B1, C1] = argsreduce(cond, A, B, C)\n >>> B1.shape\n (20,)\n >>> cond[2,:] = 0\n >>> [A2, B2, C2] = argsreduce(cond, A, B, C)\n >>> B2.shape\n (15,)\n\n \"\"\"\n newargs = np.atleast_1d(*args)\n if not isinstance(newargs, list):\n newargs = [newargs, ]\n expand_arr = (cond == cond)\n return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]\n\n\nparse_arg_template = \"\"\"\ndef _parse_args(self, %(shape_arg_str)s %(locscale_in)s):\n return (%(shape_arg_str)s), %(locscale_out)s\n\ndef _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):\n return (%(shape_arg_str)s), %(locscale_out)s, size\n\ndef _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):\n return (%(shape_arg_str)s), %(locscale_out)s, moments\n\"\"\"\n\n\n# Both the continuous and discrete distributions depend on ncx2.\n# I think the function name ncx2 is an abbreviation for noncentral chi squared.\n\ndef _ncx2_log_pdf(x, df, nc):\n # We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor\n # of exp(-xs*ns) into the ive function to improve numerical stability\n # at large values of xs. See also `rice.pdf`.\n df2 = df/2.0 - 1.0\n xs, ns = np.sqrt(x), np.sqrt(nc)\n res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2\n res += np.log(ive(df2, xs*ns) / 2.0)\n return res\n\n\ndef _ncx2_pdf(x, df, nc):\n return np.exp(_ncx2_log_pdf(x, df, nc))\n\n\ndef _ncx2_cdf(x, df, nc):\n return chndtr(x, df, nc)\n\n\nclass rv_generic(object):\n \"\"\"Class which encapsulates common functionality between rv_discrete\n and rv_continuous.\n\n \"\"\"\n def __init__(self, seed=None):\n super(rv_generic, self).__init__()\n\n # figure out if _stats signature has 'moments' keyword\n sign = _getargspec(self._stats)\n self._stats_has_moments = ((sign[2] is not None) or\n ('moments' in sign[0]))\n self._random_state = check_random_state(seed)\n\n @property\n def random_state(self):\n \"\"\" Get or set the RandomState object for generating random variates.\n\n This can be either None or an existing RandomState object.\n\n If None (or np.random), use the RandomState singleton used by np.random.\n If already a RandomState instance, use it.\n If an int, use a new RandomState instance seeded with seed.\n\n \"\"\"\n return self._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self._random_state = check_random_state(seed)\n\n def __getstate__(self):\n return self._updated_ctor_param(), self._random_state\n\n def __setstate__(self, state):\n ctor_param, r = state\n self.__init__(**ctor_param)\n self._random_state = r\n return self\n\n def _construct_argparser(\n self, meths_to_inspect, locscale_in, locscale_out):\n \"\"\"Construct the parser for the shape arguments.\n\n Generates the argument-parsing functions dynamically and attaches\n them to the instance.\n Is supposed to be called in __init__ of a class for each distribution.\n\n If self.shapes is a non-empty string, interprets it as a\n comma-separated list of shape parameters.\n\n Otherwise inspects the call signatures of `meths_to_inspect`\n and constructs the argument-parsing functions from these.\n In this case also sets `shapes` and `numargs`.\n \"\"\"\n\n if self.shapes:\n # sanitize the user-supplied shapes\n if not isinstance(self.shapes, string_types):\n raise TypeError('shapes must be a string.')\n\n shapes = self.shapes.replace(',', ' ').split()\n\n for field in shapes:\n if keyword.iskeyword(field):\n raise SyntaxError('keywords cannot be used as shapes.')\n if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):\n raise SyntaxError(\n 'shapes must be valid python identifiers')\n else:\n # find out the call signatures (_pdf, _cdf etc), deduce shape\n # arguments. Generic methods only have 'self, x', any further args\n # are shapes.\n shapes_list = []\n for meth in meths_to_inspect:\n shapes_args = _getargspec(meth) # NB: does not contain self\n args = shapes_args.args[1:] # peel off 'x', too\n\n if args:\n shapes_list.append(args)\n\n # *args or **kwargs are not allowed w/automatic shapes\n if shapes_args.varargs is not None:\n raise TypeError(\n '*args are not allowed w/out explicit shapes')\n if shapes_args.keywords is not None:\n raise TypeError(\n '**kwds are not allowed w/out explicit shapes')\n if shapes_args.defaults is not None:\n raise TypeError('defaults are not allowed for shapes')\n\n if shapes_list:\n shapes = shapes_list[0]\n\n # make sure the signatures are consistent\n for item in shapes_list:\n if item != shapes:\n raise TypeError('Shape arguments are inconsistent.')\n else:\n shapes = []\n\n # have the arguments, construct the method from template\n shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None\n dct = dict(shape_arg_str=shapes_str,\n locscale_in=locscale_in,\n locscale_out=locscale_out,\n )\n ns = {}\n exec_(parse_arg_template % dct, ns)\n # NB: attach to the instance, not class\n for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:\n setattr(self, name,\n instancemethod(ns[name], self, self.__class__)\n )\n\n self.shapes = ', '.join(shapes) if shapes else None\n if not hasattr(self, 'numargs'):\n # allows more general subclassing with *args\n self.numargs = len(shapes)\n\n def _construct_doc(self, docdict, shapes_vals=None):\n \"\"\"Construct the instance docstring with string substitutions.\"\"\"\n tempdict = docdict.copy()\n tempdict['name'] = self.name or 'distname'\n tempdict['shapes'] = self.shapes or ''\n\n if shapes_vals is None:\n shapes_vals = ()\n vals = ', '.join('%.3g' % val for val in shapes_vals)\n tempdict['vals'] = vals\n\n tempdict['shapes_'] = self.shapes or ''\n if self.shapes and self.numargs == 1:\n tempdict['shapes_'] += ','\n\n if self.shapes:\n tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)\n else:\n tempdict['set_vals_stmt'] = ''\n\n if self.shapes is None:\n # remove shapes from call parameters if there are none\n for item in ['default', 'before_notes']:\n tempdict[item] = tempdict[item].replace(\n \"\\n%(shapes)s : array_like\\n shape parameters\", \"\")\n for i in range(2):\n if self.shapes is None:\n # necessary because we use %(shapes)s in two forms (w w/o \", \")\n self.__doc__ = self.__doc__.replace(\"%(shapes)s, \", \"\")\n self.__doc__ = doccer.docformat(self.__doc__, tempdict)\n\n # correct for empty shapes\n self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')\n\n def _construct_default_doc(self, longname=None, extradoc=None,\n docdict=None, discrete='continuous'):\n \"\"\"Construct instance docstring from the default template.\"\"\"\n if longname is None:\n longname = 'A'\n if extradoc is None:\n extradoc = ''\n if extradoc.startswith('\\n\\n'):\n extradoc = extradoc[2:]\n self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),\n '\\n\\n%(before_notes)s\\n', docheaders['notes'],\n extradoc, '\\n%(example)s'])\n self._construct_doc(docdict)\n\n def freeze(self, *args, **kwds):\n \"\"\"Freeze the distribution for the given arguments.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution. Should include all\n the non-optional arguments, may include ``loc`` and ``scale``.\n\n Returns\n -------\n rv_frozen : rv_frozen instance\n The frozen distribution.\n\n \"\"\"\n return rv_frozen(self, *args, **kwds)\n\n def __call__(self, *args, **kwds):\n return self.freeze(*args, **kwds)\n __call__.__doc__ = freeze.__doc__\n\n # The actual calculation functions (no basic checking need be done)\n # If these are defined, the others won't be looked at.\n # Otherwise, the other set can be defined.\n def _stats(self, *args, **kwds):\n return None, None, None, None\n\n # Central moments\n def _munp(self, n, *args):\n # Silence floating point warnings from integration.\n olderr = np.seterr(all='ignore')\n vals = self.generic_moment(n, *args)\n np.seterr(**olderr)\n return vals\n\n ## These are the methods you must define (standard form functions)\n ## NB: generic _pdf, _logpdf, _cdf are different for\n ## rv_continuous and rv_discrete hence are defined in there\n def _argcheck(self, *args):\n \"\"\"Default check for correct values on args and keywords.\n\n Returns condition array of 1's where arguments are correct and\n 0's where they are not.\n\n \"\"\"\n cond = 1\n for arg in args:\n cond = logical_and(cond, (asarray(arg) > 0))\n return cond\n\n ##(return 1-d using self._size to get number)\n def _rvs(self, *args):\n ## Use basic inverse cdf algorithm for RV generation as default.\n U = self._random_state.random_sample(self._size)\n Y = self._ppf(U, *args)\n return Y\n\n def _logcdf(self, x, *args):\n return log(self._cdf(x, *args))\n\n def _sf(self, x, *args):\n return 1.0-self._cdf(x, *args)\n\n def _logsf(self, x, *args):\n return log(self._sf(x, *args))\n\n def _ppf(self, q, *args):\n return self._ppfvec(q, *args)\n\n def _isf(self, q, *args):\n return self._ppf(1.0-q, *args) # use correct _ppf for subclasses\n\n # These are actually called, and should not be overwritten if you\n # want to keep error checking.\n def rvs(self, *args, **kwds):\n \"\"\"\n Random variates of given type.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n scale : array_like, optional\n Scale parameter (default=1).\n size : int or tuple of ints, optional\n Defining number of random variates (default is 1).\n random_state : None or int or ``np.random.RandomState`` instance, optional\n If int or RandomState, use it for drawing the random variates.\n If None, rely on ``self.random_state``.\n Default is None.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of given `size`.\n\n \"\"\"\n discrete = kwds.pop('discrete', None)\n rndm = kwds.pop('random_state', None)\n args, loc, scale, size = self._parse_args_rvs(*args, **kwds)\n cond = logical_and(self._argcheck(*args), (scale >= 0))\n if not np.all(cond):\n raise ValueError(\"Domain error in arguments.\")\n\n # self._size is total size of all output values\n self._size = product(size, axis=0)\n if self._size is not None and self._size > 1:\n size = np.array(size, ndmin=1)\n\n if np.all(scale == 0):\n return loc*ones(size, 'd')\n\n # extra gymnastics needed for a custom random_state\n if rndm is not None:\n random_state_saved = self._random_state\n self._random_state = check_random_state(rndm)\n\n vals = self._rvs(*args)\n if self._size is not None:\n vals = reshape(vals, size)\n\n vals = vals * scale + loc\n\n # do not forget to restore the _random_state\n if rndm is not None:\n self._random_state = random_state_saved\n\n # Cast to int if discrete\n if discrete:\n if np.isscalar(vals):\n vals = int(vals)\n else:\n vals = vals.astype(int)\n\n return vals\n\n def stats(self, *args, **kwds):\n \"\"\"\n Some statistics of the given RV.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional (continuous RVs only)\n scale parameter (default=1)\n moments : str, optional\n composed of letters ['mvsk'] defining which moments to compute:\n 'm' = mean,\n 'v' = variance,\n 's' = (Fisher's) skew,\n 'k' = (Fisher's) kurtosis.\n (default is 'mv')\n\n Returns\n -------\n stats : sequence\n of requested moments.\n\n \"\"\"\n args, loc, scale, moments = self._parse_args_stats(*args, **kwds)\n # scale = 1 by construction for discrete RVs\n loc, scale = map(asarray, (loc, scale))\n args = tuple(map(asarray, args))\n cond = self._argcheck(*args) & (scale > 0) & (loc == loc)\n output = []\n default = valarray(shape(cond), self.badvalue)\n\n # Use only entries that are valid in calculation\n if np.any(cond):\n goodargs = argsreduce(cond, *(args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n\n if self._stats_has_moments:\n mu, mu2, g1, g2 = self._stats(*goodargs,\n **{'moments': moments})\n else:\n mu, mu2, g1, g2 = self._stats(*goodargs)\n if g1 is None:\n mu3 = None\n else:\n if mu2 is None:\n mu2 = self._munp(2, *goodargs)\n if g2 is None:\n # (mu2**1.5) breaks down for nan and inf\n mu3 = g1 * np.power(mu2, 1.5)\n\n if 'm' in moments:\n if mu is None:\n mu = self._munp(1, *goodargs)\n out0 = default.copy()\n place(out0, cond, mu * scale + loc)\n output.append(out0)\n\n if 'v' in moments:\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n mu2 = mu2p - mu * mu\n if np.isinf(mu):\n #if mean is inf then var is also inf\n mu2 = np.inf\n out0 = default.copy()\n place(out0, cond, mu2 * scale * scale)\n output.append(out0)\n\n if 's' in moments:\n if g1 is None:\n mu3p = self._munp(3, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n mu2 = mu2p - mu * mu\n mu3 = mu3p - 3 * mu * mu2 - mu**3\n g1 = mu3 / np.power(mu2, 1.5)\n out0 = default.copy()\n place(out0, cond, g1)\n output.append(out0)\n\n if 'k' in moments:\n if g2 is None:\n mu4p = self._munp(4, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n mu2 = mu2p - mu * mu\n if mu3 is None:\n mu3p = self._munp(3, *goodargs)\n mu3 = mu3p - 3 * mu * mu2 - mu**3\n mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4\n g2 = mu4 / mu2**2.0 - 3.0\n out0 = default.copy()\n place(out0, cond, g2)\n output.append(out0)\n else: # no valid args\n output = []\n for _ in moments:\n out0 = default.copy()\n output.append(out0)\n\n if len(output) == 1:\n return output[0]\n else:\n return tuple(output)\n\n def entropy(self, *args, **kwds):\n \"\"\"\n Differential entropy of the RV.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n scale : array_like, optional (continuous distributions only).\n Scale parameter (default=1).\n\n Notes\n -----\n Entropy is defined base `e`:\n\n >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))\n >>> np.allclose(drv.entropy(), np.log(2.0))\n True\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n # NB: for discrete distributions scale=1 by construction in _parse_args\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n output = zeros(shape(cond0), 'd')\n place(output, (1-cond0), self.badvalue)\n goodargs = argsreduce(cond0, *args)\n # np.vectorize doesn't work when numargs == 0 in numpy 1.6.2. Once the\n # lowest supported numpy version is >= 1.7.0, this special case can be\n # removed (see gh-4314).\n if self.numargs == 0:\n place(output, cond0, self._entropy() + log(scale))\n else:\n place(output, cond0, self.vecentropy(*goodargs) + log(scale))\n return output\n\n def moment(self, n, *args, **kwds):\n \"\"\"\n n-th order non-central moment of distribution.\n\n Parameters\n ----------\n n : int, n >= 1\n Order of moment.\n arg1, arg2, arg3,... : float\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n if not (self._argcheck(*args) and (scale > 0)):\n return nan\n if (floor(n) != n):\n raise ValueError(\"Moment must be an integer.\")\n if (n < 0):\n raise ValueError(\"Moment must be positive.\")\n mu, mu2, g1, g2 = None, None, None, None\n if (n > 0) and (n < 5):\n if self._stats_has_moments:\n mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}\n else:\n mdict = {}\n mu, mu2, g1, g2 = self._stats(*args, **mdict)\n val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)\n\n # Convert to transformed X = L + S*Y\n # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)\n if loc == 0:\n return scale**n * val\n else:\n result = 0\n fac = float(scale) / float(loc)\n for k in range(n):\n valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)\n result += comb(n, k, exact=True)*(fac**k) * valk\n result += fac**n * val\n return result * loc**n\n\n def median(self, *args, **kwds):\n \"\"\"\n Median of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n Location parameter, Default is 0.\n scale : array_like, optional\n Scale parameter, Default is 1.\n\n Returns\n -------\n median : float\n The median of the distribution.\n\n See Also\n --------\n stats.distributions.rv_discrete.ppf\n Inverse of the CDF\n\n \"\"\"\n return self.ppf(0.5, *args, **kwds)\n\n def mean(self, *args, **kwds):\n \"\"\"\n Mean of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n mean : float\n the mean of the distribution\n\n \"\"\"\n kwds['moments'] = 'm'\n res = self.stats(*args, **kwds)\n if isinstance(res, ndarray) and res.ndim == 0:\n return res[()]\n return res\n\n def var(self, *args, **kwds):\n \"\"\"\n Variance of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n var : float\n the variance of the distribution\n\n \"\"\"\n kwds['moments'] = 'v'\n res = self.stats(*args, **kwds)\n if isinstance(res, ndarray) and res.ndim == 0:\n return res[()]\n return res\n\n def std(self, *args, **kwds):\n \"\"\"\n Standard deviation of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n std : float\n standard deviation of the distribution\n\n \"\"\"\n kwds['moments'] = 'v'\n res = sqrt(self.stats(*args, **kwds))\n return res\n\n def interval(self, alpha, *args, **kwds):\n \"\"\"\n Confidence interval with equal areas around the median.\n\n Parameters\n ----------\n alpha : array_like of float\n Probability that an rv will be drawn from the returned range.\n Each value should be in the range [0, 1].\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter, Default is 0.\n scale : array_like, optional\n scale parameter, Default is 1.\n\n Returns\n -------\n a, b : ndarray of float\n end-points of range that contain ``100 * alpha %`` of the rv's\n possible values.\n\n \"\"\"\n alpha = asarray(alpha)\n if np.any((alpha > 1) | (alpha < 0)):\n raise ValueError(\"alpha must be between 0 and 1 inclusive\")\n q1 = (1.0-alpha)/2\n q2 = (1.0+alpha)/2\n a = self.ppf(q1, *args, **kwds)\n b = self.ppf(q2, *args, **kwds)\n return a, b\n\n\n## continuous random variables: implement maybe later\n##\n## hf --- Hazard Function (PDF / SF)\n## chf --- Cumulative hazard function (-log(SF))\n## psf --- Probability sparsity function (reciprocal of the pdf) in\n## units of percent-point-function (as a function of q).\n## Also, the derivative of the percent-point function.\n\nclass rv_continuous(rv_generic):\n \"\"\"\n A generic continuous random variable class meant for subclassing.\n\n `rv_continuous` is a base class to construct specific distribution classes\n and instances for continuous random variables. It cannot be used\n directly as a distribution.\n\n Parameters\n ----------\n momtype : int, optional\n The type of generic moment calculation to use: 0 for pdf, 1 (default)\n for ppf.\n a : float, optional\n Lower bound of the support of the distribution, default is minus\n infinity.\n b : float, optional\n Upper bound of the support of the distribution, default is plus\n infinity.\n xtol : float, optional\n The tolerance for fixed point calculation for generic ppf.\n badvalue : float, optional\n The value in a result arrays that indicates a value that for which\n some argument restriction is violated, default is np.nan.\n name : str, optional\n The name of the instance. This string is used to construct the default\n example for distributions.\n longname : str, optional\n This string is used as part of the first line of the docstring returned\n when a subclass has no docstring of its own. Note: `longname` exists\n for backwards compatibility, do not use for new subclasses.\n shapes : str, optional\n The shape of the distribution. For example ``\"m, n\"`` for a\n distribution that takes two integers as the two shape arguments for all\n its methods. If not provided, shape parameters will be inferred from\n the signature of the private methods, ``_pdf`` and ``_cdf`` of the\n instance.\n extradoc : str, optional, deprecated\n This string is used as the last part of the docstring returned when a\n subclass has no docstring of its own. Note: `extradoc` exists for\n backwards compatibility, do not use for new subclasses.\n seed : None or int or ``numpy.random.RandomState`` instance, optional\n This parameter defines the RandomState object to use for drawing\n random variates.\n If None (or np.random), the global np.random state is used.\n If integer, it is used to seed the local RandomState instance.\n Default is None.\n\n Methods\n -------\n rvs\n pdf\n logpdf\n cdf\n logcdf\n sf\n logsf\n ppf\n isf\n moment\n stats\n entropy\n expect\n median\n mean\n std\n var\n interval\n __call__\n fit\n fit_loc_scale\n nnlf\n\n Notes\n -----\n Public methods of an instance of a distribution class (e.g., ``pdf``,\n ``cdf``) check their arguments and pass valid arguments to private,\n computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid\n if it is within the support of a distribution, ``self.a <= x <= self.b``.\n Whether a shape parameter is valid is decided by an ``_argcheck`` method\n (which defaults to checking that its arguments are strictly positive.)\n\n **Subclassing**\n\n New random variables can be defined by subclassing the `rv_continuous` class\n and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized\n to location 0 and scale 1).\n\n If positive argument checking is not correct for your RV\n then you will also need to re-define the ``_argcheck`` method.\n\n Correct, but potentially slow defaults exist for the remaining\n methods but for speed and/or accuracy you can over-ride::\n\n _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf\n\n Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.\n\n **Methods that can be overwritten by subclasses**\n ::\n\n _rvs\n _pdf\n _cdf\n _sf\n _ppf\n _isf\n _stats\n _munp\n _entropy\n _argcheck\n\n There are additional (internal and private) generic methods that can\n be useful for cross-checking and for debugging, but might work in all\n cases when directly called.\n\n A note on ``shapes``: subclasses need not specify them explicitly. In this\n case, `shapes` will be automatically deduced from the signatures of the\n overridden methods (`pdf`, `cdf` etc).\n If, for some reason, you prefer to avoid relying on introspection, you can\n specify ``shapes`` explicitly as an argument to the instance constructor.\n\n\n **Frozen Distributions**\n\n Normally, you must provide shape parameters (and, optionally, location and\n scale parameters to each call of a method of a distribution.\n\n Alternatively, the object may be called (as a function) to fix the shape,\n location, and scale parameters returning a \"frozen\" continuous RV object:\n\n rv = generic(<shape(s)>, loc=0, scale=1)\n frozen RV object with the same methods but holding the given shape,\n location, and scale fixed\n\n **Statistics**\n\n Statistics are computed using numerical integration by default.\n For speed you can redefine this using ``_stats``:\n\n - take shape parameters and return mu, mu2, g1, g2\n - If you can't compute one of these, return it as None\n - Can also be defined with a keyword argument ``moments``, which is a\n string composed of \"m\", \"v\", \"s\", and/or \"k\".\n Only the components appearing in string should be computed and\n returned in the order \"m\", \"v\", \"s\", or \"k\" with missing values\n returned as None.\n\n Alternatively, you can override ``_munp``, which takes ``n`` and shape\n parameters and returns the n-th non-central moment of the distribution.\n\n Examples\n --------\n To create a new Gaussian distribution, we would do the following:\n\n >>> from scipy.stats import rv_continuous\n >>> class gaussian_gen(rv_continuous):\n ... \"Gaussian distribution\"\n ... def _pdf(self, x):\n ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)\n >>> gaussian = gaussian_gen(name='gaussian')\n\n ``scipy.stats`` distributions are *instances*, so here we subclass\n `rv_continuous` and create an instance. With this, we now have\n a fully functional distribution with all relevant methods automagically\n generated by the framework.\n\n Note that above we defined a standard normal distribution, with zero mean\n and unit variance. Shifting and scaling of the distribution can be done\n by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``\n essentially computes ``y = (x - loc) / scale`` and\n ``gaussian._pdf(y) / scale``.\n\n \"\"\"\n def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,\n badvalue=None, name=None, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_continuous, self).__init__(seed)\n\n # save the ctor parameters, cf generic freeze\n self._ctor_param = dict(\n momtype=momtype, a=a, b=b, xtol=xtol,\n badvalue=badvalue, name=name, longname=longname,\n shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n if name is None:\n name = 'Distribution'\n self.badvalue = badvalue\n self.name = name\n self.a = a\n self.b = b\n if a is None:\n self.a = -inf\n if b is None:\n self.b = inf\n self.xtol = xtol\n self._size = 1\n self.moment_type = momtype\n self.shapes = shapes\n self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],\n locscale_in='loc=0, scale=1',\n locscale_out='loc, scale')\n\n # nin correction\n self._ppfvec = vectorize(self._ppf_single, otypes='d')\n self._ppfvec.nin = self.numargs + 1\n self.vecentropy = vectorize(self._entropy, otypes='d')\n self._cdfvec = vectorize(self._cdf_single, otypes='d')\n self._cdfvec.nin = self.numargs + 1\n\n self.extradoc = extradoc\n if momtype == 0:\n self.generic_moment = vectorize(self._mom0_sc, otypes='d')\n else:\n self.generic_moment = vectorize(self._mom1_sc, otypes='d')\n # Because of the *args argument of _mom0_sc, vectorize cannot count the\n # number of arguments correctly.\n self.generic_moment.nin = self.numargs + 1\n\n if longname is None:\n if name[0] in ['aeiouAEIOU']:\n hstr = \"An \"\n else:\n hstr = \"A \"\n longname = hstr + name\n\n if sys.flags.optimize < 2:\n # Skip adding docstrings if interpreter is run with -OO\n if self.__doc__ is None:\n self._construct_default_doc(longname=longname,\n extradoc=extradoc,\n docdict=docdict,\n discrete='continuous')\n else:\n dct = dict(distcont)\n self._construct_doc(docdict, dct.get(self.name))\n\n def _updated_ctor_param(self):\n \"\"\" Return the current version of _ctor_param, possibly updated by user.\n\n Used by freezing and pickling.\n Keep this in sync with the signature of __init__.\n \"\"\"\n dct = self._ctor_param.copy()\n dct['a'] = self.a\n dct['b'] = self.b\n dct['xtol'] = self.xtol\n dct['badvalue'] = self.badvalue\n dct['name'] = self.name\n dct['shapes'] = self.shapes\n dct['extradoc'] = self.extradoc\n return dct\n\n def _ppf_to_solve(self, x, q, *args):\n return self.cdf(*(x, )+args)-q\n\n def _ppf_single(self, q, *args):\n left = right = None\n if self.a > -np.inf:\n left = self.a\n if self.b < np.inf:\n right = self.b\n\n factor = 10.\n if not left: # i.e. self.a = -inf\n left = -1.*factor\n while self._ppf_to_solve(left, q, *args) > 0.:\n right = left\n left *= factor\n # left is now such that cdf(left) < q\n if not right: # i.e. self.b = inf\n right = factor\n while self._ppf_to_solve(right, q, *args) < 0.:\n left = right\n right *= factor\n # right is now such that cdf(right) > q\n\n return optimize.brentq(self._ppf_to_solve,\n left, right, args=(q,)+args, xtol=self.xtol)\n\n # moment from definition\n def _mom_integ0(self, x, m, *args):\n return x**m * self.pdf(x, *args)\n\n def _mom0_sc(self, m, *args):\n return integrate.quad(self._mom_integ0, self.a, self.b,\n args=(m,)+args)[0]\n\n # moment calculated using ppf\n def _mom_integ1(self, q, m, *args):\n return (self.ppf(q, *args))**m\n\n def _mom1_sc(self, m, *args):\n return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]\n\n def _pdf(self, x, *args):\n return derivative(self._cdf, x, dx=1e-5, args=args, order=5)\n\n ## Could also define any of these\n def _logpdf(self, x, *args):\n return log(self._pdf(x, *args))\n\n def _cdf_single(self, x, *args):\n return integrate.quad(self._pdf, self.a, x, args=args)[0]\n\n def _cdf(self, x, *args):\n return self._cdfvec(x, *args)\n\n ## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined\n ## in rv_generic\n\n def pdf(self, x, *args, **kwds):\n \"\"\"\n Probability density function at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n pdf : ndarray\n Probability density function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n putmask(output, (1-cond0)+np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args+(scale,)))\n scale, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._pdf(*goodargs) / scale)\n if output.ndim == 0:\n return output[()]\n return output\n\n def logpdf(self, x, *args, **kwds):\n \"\"\"\n Log of the probability density function at x of the given RV.\n\n This uses a more numerically accurate calculation if available.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logpdf : array_like\n Log of the probability density function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n putmask(output, (1-cond0)+np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args+(scale,)))\n scale, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._logpdf(*goodargs) - log(scale))\n if output.ndim == 0:\n return output[()]\n return output\n\n def cdf(self, x, *args, **kwds):\n \"\"\"\n Cumulative distribution function of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n cdf : ndarray\n Cumulative distribution function evaluated at `x`\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x > self.a) & (x < self.b)\n cond2 = (x >= self.b) & cond0\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._cdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logcdf(self, x, *args, **kwds):\n \"\"\"\n Log of the cumulative distribution function at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logcdf : array_like\n Log of the cumulative distribution function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x > self.a) & (x < self.b)\n cond2 = (x >= self.b) & cond0\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._logcdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def sf(self, x, *args, **kwds):\n \"\"\"\n Survival function (1 - `cdf`) at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n sf : array_like\n Survival function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x > self.a) & (x < self.b)\n cond2 = cond0 & (x <= self.a)\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._sf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logsf(self, x, *args, **kwds):\n \"\"\"\n Log of the survival function of the given RV.\n\n Returns the log of the \"survival function,\" defined as (1 - `cdf`),\n evaluated at `x`.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logsf : ndarray\n Log of the survival function evaluated at `x`.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x > self.a) & (x < self.b)\n cond2 = cond0 & (x <= self.a)\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._logsf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def ppf(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of `cdf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n lower tail probability\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : array_like\n quantile corresponding to the lower tail probability q.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n q, loc, scale = map(asarray, (q, loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n cond1 = (0 < q) & (q < 1)\n cond2 = cond0 & (q == 0)\n cond3 = cond0 & (q == 1)\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue)\n\n lower_bound = self.a * scale + loc\n upper_bound = self.b * scale + loc\n place(output, cond2, argsreduce(cond2, lower_bound)[0])\n place(output, cond3, argsreduce(cond3, upper_bound)[0])\n\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n place(output, cond, self._ppf(*goodargs) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output\n\n def isf(self, q, *args, **kwds):\n \"\"\"\n Inverse survival function (inverse of `sf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n upper tail probability\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : ndarray or scalar\n Quantile corresponding to the upper tail probability q.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n q, loc, scale = map(asarray, (q, loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n cond1 = (0 < q) & (q < 1)\n cond2 = cond0 & (q == 1)\n cond3 = cond0 & (q == 0)\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue)\n\n lower_bound = self.a * scale + loc\n upper_bound = self.b * scale + loc\n place(output, cond2, argsreduce(cond2, lower_bound)[0])\n place(output, cond3, argsreduce(cond3, upper_bound)[0])\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n place(output, cond, self._isf(*goodargs) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output\n\n def _nnlf(self, x, *args):\n return -np.sum(self._logpdf(x, *args), axis=0)\n\n def nnlf(self, theta, x):\n '''Return negative loglikelihood function.\n\n Notes\n -----\n This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the\n parameters (including loc and scale).\n '''\n try:\n loc = theta[-2]\n scale = theta[-1]\n args = tuple(theta[:-2])\n except IndexError:\n raise ValueError(\"Not enough input arguments.\")\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = asarray((x-loc) / scale)\n cond0 = (x <= self.a) | (self.b <= x)\n if np.any(cond0):\n return inf\n else:\n N = len(x)\n return self._nnlf(x, *args) + N * log(scale)\n\n def _penalized_nnlf(self, theta, x):\n ''' Return negative loglikelihood function,\n i.e., - sum (log pdf(x, theta), axis=0)\n where theta are the parameters (including loc and scale)\n '''\n try:\n loc = theta[-2]\n scale = theta[-1]\n args = tuple(theta[:-2])\n except IndexError:\n raise ValueError(\"Not enough input arguments.\")\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = asarray((x-loc) / scale)\n\n loginf = log(_XMAX)\n\n if np.isneginf(self.a).all() and np.isinf(self.b).all():\n Nbad = 0\n else:\n cond0 = (x <= self.a) | (self.b <= x)\n Nbad = np.sum(cond0)\n if Nbad > 0:\n x = argsreduce(~cond0, x)[0]\n\n N = len(x)\n return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf\n\n # return starting point for fit (shape arguments + loc + scale)\n def _fitstart(self, data, args=None):\n if args is None:\n args = (1.0,)*self.numargs\n loc, scale = self._fit_loc_scale_support(data, *args)\n return args + (loc, scale)\n\n # Return the (possibly reduced) function to optimize in order to find MLE\n # estimates for the .fit method\n def _reduce_func(self, args, kwds):\n # First of all, convert fshapes params to fnum: eg for stats.beta,\n # shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.\n # Convert the latter into the former.\n if self.shapes:\n shapes = self.shapes.replace(',', ' ').split()\n for j, s in enumerate(shapes):\n val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)\n if val is not None:\n key = 'f%d' % j\n if key in kwds:\n raise ValueError(\"Duplicate entry for %s.\" % key)\n else:\n kwds[key] = val\n\n args = list(args)\n Nargs = len(args)\n fixedn = []\n names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']\n x0 = []\n for n, key in enumerate(names):\n if key in kwds:\n fixedn.append(n)\n args[n] = kwds.pop(key)\n else:\n x0.append(args[n])\n\n if len(fixedn) == 0:\n func = self._penalized_nnlf\n restore = None\n else:\n if len(fixedn) == Nargs:\n raise ValueError(\n \"All parameters fixed. There is nothing to optimize.\")\n\n def restore(args, theta):\n # Replace with theta for all numbers not in fixedn\n # This allows the non-fixed values to vary, but\n # we still call self.nnlf with all parameters.\n i = 0\n for n in range(Nargs):\n if n not in fixedn:\n args[n] = theta[i]\n i += 1\n return args\n\n def func(theta, x):\n newtheta = restore(args[:], theta)\n return self._penalized_nnlf(newtheta, x)\n\n return x0, func, restore, args\n\n def fit(self, data, *args, **kwds):\n \"\"\"\n Return MLEs for shape, location, and scale parameters from data.\n\n MLE stands for Maximum Likelihood Estimate. Starting estimates for\n the fit are given by input arguments; for any arguments not provided\n with starting estimates, ``self._fitstart(data)`` is called to generate\n such.\n\n One can hold some parameters fixed to specific values by passing in\n keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)\n and ``floc`` and ``fscale`` (for location and scale parameters,\n respectively).\n\n Parameters\n ----------\n data : array_like\n Data to use in calculating the MLEs.\n args : floats, optional\n Starting value(s) for any shape-characterizing arguments (those not\n provided will be determined by a call to ``_fitstart(data)``).\n No default value.\n kwds : floats, optional\n Starting values for the location and scale parameters; no default.\n Special keyword arguments are recognized as holding certain\n parameters fixed:\n\n - f0...fn : hold respective shape parameters fixed.\n Alternatively, shape parameters to fix can be specified by name.\n For example, if ``self.shapes == \"a, b\"``, ``fa``and ``fix_a``\n are equivalent to ``f0``, and ``fb`` and ``fix_b`` are\n equivalent to ``f1``.\n\n - floc : hold location parameter fixed to specified value.\n\n - fscale : hold scale parameter fixed to specified value.\n\n - optimizer : The optimizer to use. The optimizer must take ``func``,\n and starting position as the first two arguments,\n plus ``args`` (for extra arguments to pass to the\n function to be optimized) and ``disp=0`` to suppress\n output as keyword arguments.\n\n Returns\n -------\n shape, loc, scale : tuple of floats\n MLEs for any shape statistics, followed by those for location and\n scale.\n\n Notes\n -----\n This fit is computed by maximizing a log-likelihood function, with\n penalty applied for samples outside of range of the distribution. The\n returned answer is not guaranteed to be the globally optimal MLE, it\n may only be locally optimal, or the optimization may fail altogether.\n\n\n Examples\n --------\n\n Generate some data to fit: draw random variates from the `beta`\n distribution\n\n >>> from scipy.stats import beta\n >>> a, b = 1., 2.\n >>> x = beta.rvs(a, b, size=1000)\n\n Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):\n\n >>> a1, b1, loc1, scale1 = beta.fit(x)\n\n We can also use some prior knowledge about the dataset: let's keep\n ``loc`` and ``scale`` fixed:\n\n >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)\n >>> loc1, scale1\n (0, 1)\n\n We can also keep shape parameters fixed by using ``f``-keywords. To\n keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,\n equivalently, ``fa=1``:\n\n >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)\n >>> a1\n 1\n\n \"\"\"\n Narg = len(args)\n if Narg > self.numargs:\n raise TypeError(\"Too many input arguments.\")\n\n start = [None]*2\n if (Narg < self.numargs) or not ('loc' in kwds and\n 'scale' in kwds):\n # get distribution specific starting locations\n start = self._fitstart(data)\n args += start[Narg:-2]\n loc = kwds.pop('loc', start[-2])\n scale = kwds.pop('scale', start[-1])\n args += (loc, scale)\n x0, func, restore, args = self._reduce_func(args, kwds)\n\n optimizer = kwds.pop('optimizer', optimize.fmin)\n # convert string to function in scipy.optimize\n if not callable(optimizer) and isinstance(optimizer, string_types):\n if not optimizer.startswith('fmin_'):\n optimizer = \"fmin_\"+optimizer\n if optimizer == 'fmin_':\n optimizer = 'fmin'\n try:\n optimizer = getattr(optimize, optimizer)\n except AttributeError:\n raise ValueError(\"%s is not a valid optimizer\" % optimizer)\n\n # by now kwds must be empty, since everybody took what they needed\n if kwds:\n raise TypeError(\"Unknown arguments: %s.\" % kwds)\n\n vals = optimizer(func, x0, args=(ravel(data),), disp=0)\n if restore is not None:\n vals = restore(args, vals)\n vals = tuple(vals)\n return vals\n\n def _fit_loc_scale_support(self, data, *args):\n \"\"\"\n Estimate loc and scale parameters from data accounting for support.\n\n Parameters\n ----------\n data : array_like\n Data to fit.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n\n Returns\n -------\n Lhat : float\n Estimated location parameter for the data.\n Shat : float\n Estimated scale parameter for the data.\n\n \"\"\"\n data = np.asarray(data)\n\n # Estimate location and scale according to the method of moments.\n loc_hat, scale_hat = self.fit_loc_scale(data, *args)\n\n # Compute the support according to the shape parameters.\n self._argcheck(*args)\n a, b = self.a, self.b\n support_width = b - a\n\n # If the support is empty then return the moment-based estimates.\n if support_width <= 0:\n return loc_hat, scale_hat\n\n # Compute the proposed support according to the loc and scale estimates.\n a_hat = loc_hat + a * scale_hat\n b_hat = loc_hat + b * scale_hat\n\n # Use the moment-based estimates if they are compatible with the data.\n data_a = np.min(data)\n data_b = np.max(data)\n if a_hat < data_a and data_b < b_hat:\n return loc_hat, scale_hat\n\n # Otherwise find other estimates that are compatible with the data.\n data_width = data_b - data_a\n rel_margin = 0.1\n margin = data_width * rel_margin\n\n # For a finite interval, both the location and scale\n # should have interesting values.\n if support_width < np.inf:\n loc_hat = (data_a - a) - margin\n scale_hat = (data_width + 2 * margin) / support_width\n return loc_hat, scale_hat\n\n # For a one-sided interval, use only an interesting location parameter.\n if a > -np.inf:\n return (data_a - a) - margin, 1\n elif b < np.inf:\n return (data_b - b) + margin, 1\n else:\n raise RuntimeError\n\n def fit_loc_scale(self, data, *args):\n \"\"\"\n Estimate loc and scale parameters from data using 1st and 2nd moments.\n\n Parameters\n ----------\n data : array_like\n Data to fit.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n\n Returns\n -------\n Lhat : float\n Estimated location parameter for the data.\n Shat : float\n Estimated scale parameter for the data.\n\n \"\"\"\n mu, mu2 = self.stats(*args, **{'moments': 'mv'})\n tmp = asarray(data)\n muhat = tmp.mean()\n mu2hat = tmp.var()\n Shat = sqrt(mu2hat / mu2)\n Lhat = muhat - Shat*mu\n if not np.isfinite(Lhat):\n Lhat = 0\n if not (np.isfinite(Shat) and (0 < Shat)):\n Shat = 1\n return Lhat, Shat\n\n def _entropy(self, *args):\n def integ(x):\n val = self._pdf(x, *args)\n return entr(val)\n\n # upper limit is often inf, so suppress warnings when integrating\n olderr = np.seterr(over='ignore')\n h = integrate.quad(integ, self.a, self.b)[0]\n np.seterr(**olderr)\n\n if not np.isnan(h):\n return h\n else:\n # try with different limits if integration problems\n low, upp = self.ppf([1e-10, 1. - 1e-10], *args)\n if np.isinf(self.b):\n upper = upp\n else:\n upper = self.b\n if np.isinf(self.a):\n lower = low\n else:\n lower = self.a\n return integrate.quad(integ, lower, upper)[0]\n\n def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,\n conditional=False, **kwds):\n \"\"\"Calculate expected value of a function with respect to the\n distribution.\n\n The expected value of a function ``f(x)`` with respect to a\n distribution ``dist`` is defined as::\n\n ubound\n E[x] = Integral(f(x) * dist.pdf(x))\n lbound\n\n Parameters\n ----------\n func : callable, optional\n Function for which integral is calculated. Takes only one argument.\n The default is the identity mapping f(x) = x.\n args : tuple, optional\n Shape parameters of the distribution.\n loc : float, optional\n Location parameter (default=0).\n scale : float, optional\n Scale parameter (default=1).\n lb, ub : scalar, optional\n Lower and upper bound for integration. Default is set to the\n support of the distribution.\n conditional : bool, optional\n If True, the integral is corrected by the conditional probability\n of the integration interval. The return value is the expectation\n of the function, conditional on being in the given interval.\n Default is False.\n\n Additional keyword arguments are passed to the integration routine.\n\n Returns\n -------\n expect : float\n The calculated expected value.\n\n Notes\n -----\n The integration behavior of this function is inherited from\n `integrate.quad`.\n\n \"\"\"\n lockwds = {'loc': loc,\n 'scale': scale}\n self._argcheck(*args)\n if func is None:\n def fun(x, *args):\n return x * self.pdf(x, *args, **lockwds)\n else:\n def fun(x, *args):\n return func(x) * self.pdf(x, *args, **lockwds)\n if lb is None:\n lb = loc + self.a * scale\n if ub is None:\n ub = loc + self.b * scale\n if conditional:\n invfac = (self.sf(lb, *args, **lockwds)\n - self.sf(ub, *args, **lockwds))\n else:\n invfac = 1.0\n kwds['args'] = args\n # Silence floating point warnings from integration.\n olderr = np.seterr(all='ignore')\n vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac\n np.seterr(**olderr)\n return vals\n\n\n## Handlers for generic case where xk and pk are given\n## The _drv prefix probably means discrete random variable.\n\ndef _drv_pmf(self, xk, *args):\n try:\n return self.P[xk]\n except KeyError:\n return 0.0\n\n\ndef _drv_cdf(self, xk, *args):\n indx = argmax((self.xk > xk), axis=-1)-1\n return self.F[self.xk[indx]]\n\n\ndef _drv_ppf(self, q, *args):\n indx = argmax((self.qvals >= q), axis=-1)\n return self.Finv[self.qvals[indx]]\n\n\ndef _drv_nonzero(self, k, *args):\n return 1\n\n\ndef _drv_moment(self, n, *args):\n n = asarray(n)\n return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)\n\n\ndef _drv_moment_gen(self, t, *args):\n t = asarray(t)\n return np.sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)\n\n\ndef _drv2_moment(self, n, *args):\n \"\"\"Non-central moment of discrete distribution.\"\"\"\n def fun(x):\n return np.power(x, n) * self._pmf(x, *args)\n return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc)\n\n\ndef _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm\n b = self.b\n a = self.a\n if isinf(b): # Be sure ending point is > q\n b = int(max(100*q, 10))\n while 1:\n if b >= self.b:\n qb = 1.0\n break\n qb = self._cdf(b, *args)\n if (qb < q):\n b += 10\n else:\n break\n else:\n qb = 1.0\n if isinf(a): # be sure starting point < q\n a = int(min(-100*q, -10))\n while 1:\n if a <= self.a:\n qb = 0.0\n break\n qa = self._cdf(a, *args)\n if (qa > q):\n a -= 10\n else:\n break\n else:\n qa = self._cdf(a, *args)\n\n while 1:\n if (qa == q):\n return a\n if (qb == q):\n return b\n if b <= a+1:\n # testcase: return wrong number at lower index\n # python -c \"from scipy.stats import zipf;print zipf.ppf(0.01, 2)\" wrong\n # python -c \"from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)\"\n # python -c \"from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)\"\n if qa > q:\n return a\n else:\n return b\n c = int((a+b)/2.0)\n qc = self._cdf(c, *args)\n if (qc < q):\n if a != c:\n a = c\n else:\n raise RuntimeError('updating stopped, endless loop')\n qa = qc\n elif (qc > q):\n if b != c:\n b = c\n else:\n raise RuntimeError('updating stopped, endless loop')\n qb = qc\n else:\n return c\n\n\ndef entropy(pk, qk=None, base=None):\n \"\"\"Calculate the entropy of a distribution for given probability values.\n\n If only probabilities `pk` are given, the entropy is calculated as\n ``S = -sum(pk * log(pk), axis=0)``.\n\n If `qk` is not None, then compute the Kullback-Leibler divergence\n ``S = sum(pk * log(pk / qk), axis=0)``.\n\n This routine will normalize `pk` and `qk` if they don't sum to 1.\n\n Parameters\n ----------\n pk : sequence\n Defines the (discrete) distribution. ``pk[i]`` is the (possibly\n unnormalized) probability of event ``i``.\n qk : sequence, optional\n Sequence against which the relative entropy is computed. Should be in\n the same format as `pk`.\n base : float, optional\n The logarithmic base to use, defaults to ``e`` (natural logarithm).\n\n Returns\n -------\n S : float\n The calculated entropy.\n\n \"\"\"\n pk = asarray(pk)\n pk = 1.0*pk / np.sum(pk, axis=0)\n if qk is None:\n vec = entr(pk)\n else:\n qk = asarray(qk)\n if len(qk) != len(pk):\n raise ValueError(\"qk and pk must have same length.\")\n qk = 1.0*qk / np.sum(qk, axis=0)\n vec = kl_div(pk, qk)\n S = np.sum(vec, axis=0)\n if base is not None:\n S /= log(base)\n return S\n\n\n# Must over-ride one of _pmf or _cdf or pass in\n# x_k, p(x_k) lists in initialization\n\nclass rv_discrete(rv_generic):\n \"\"\"\n A generic discrete random variable class meant for subclassing.\n\n `rv_discrete` is a base class to construct specific distribution classes\n and instances for discrete random variables. It can also be used\n to construct an arbitrary distribution defined by a list of support\n points and corresponding probabilities.\n\n Parameters\n ----------\n a : float, optional\n Lower bound of the support of the distribution, default: 0\n b : float, optional\n Upper bound of the support of the distribution, default: plus infinity\n moment_tol : float, optional\n The tolerance for the generic calculation of moments.\n values : tuple of two array_like, optional\n ``(xk, pk)`` where ``xk`` are integers with non-zero\n probabilities ``pk`` with ``sum(pk) = 1``.\n inc : integer, optional\n Increment for the support of the distribution.\n Default is 1. (other values have not been tested)\n badvalue : float, optional\n The value in a result arrays that indicates a value that for which\n some argument restriction is violated, default is np.nan.\n name : str, optional\n The name of the instance. This string is used to construct the default\n example for distributions.\n longname : str, optional\n This string is used as part of the first line of the docstring returned\n when a subclass has no docstring of its own. Note: `longname` exists\n for backwards compatibility, do not use for new subclasses.\n shapes : str, optional\n The shape of the distribution. For example \"m, n\" for a distribution\n that takes two integers as the two shape arguments for all its methods\n If not provided, shape parameters will be inferred from\n the signatures of the private methods, ``_pmf`` and ``_cdf`` of\n the instance.\n extradoc : str, optional\n This string is used as the last part of the docstring returned when a\n subclass has no docstring of its own. Note: `extradoc` exists for\n backwards compatibility, do not use for new subclasses.\n seed : None or int or ``numpy.random.RandomState`` instance, optional\n This parameter defines the RandomState object to use for drawing\n random variates.\n If None, the global np.random state is used.\n If integer, it is used to seed the local RandomState instance.\n Default is None.\n\n Methods\n -------\n rvs\n pmf\n logpmf\n cdf\n logcdf\n sf\n logsf\n ppf\n isf\n moment\n stats\n entropy\n expect\n median\n mean\n std\n var\n interval\n __call__\n\n\n Notes\n -----\n\n This class is similar to `rv_continuous`, the main differences being:\n\n - the support of the distribution is a set of integers\n - instead of the probability density function, ``pdf`` (and the\n corresponding private ``_pdf``), this class defines the\n *probability mass function*, `pmf` (and the corresponding\n private ``_pmf``.)\n - scale parameter is not defined.\n\n To create a new discrete distribution, we would do the following:\n\n >>> from scipy.stats import rv_discrete\n >>> class poisson_gen(rv_discrete):\n ... \"Poisson distribution\"\n ... def _pmf(self, k, mu):\n ... return exp(-mu) * mu**k / factorial(k)\n\n and create an instance::\n\n >>> poisson = poisson_gen(name=\"poisson\")\n\n Note that above we defined the Poisson distribution in the standard form.\n Shifting the distribution can be done by providing the ``loc`` parameter\n to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``\n delegates the work to ``poisson._pmf(x-loc, mu)``.\n\n **Discrete distributions from a list of probabilities**\n\n Alternatively, you can construct an arbitrary discrete rv defined\n on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the\n ``values`` keyword argument to the `rv_discrete` constructor.\n\n Examples\n --------\n\n Custom made discrete distribution:\n\n >>> from scipy import stats\n >>> xk = np.arange(7)\n >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)\n >>> custm = stats.rv_discrete(name='custm', values=(xk, pk))\n >>>\n >>> import matplotlib.pyplot as plt\n >>> fig, ax = plt.subplots(1, 1)\n >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')\n >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)\n >>> plt.show()\n\n Random number generation:\n\n >>> R = custm.rvs(size=100)\n\n \"\"\"\n\n def __init__(self, a=0, b=inf, name=None, badvalue=None,\n moment_tol=1e-8, values=None, inc=1, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_discrete, self).__init__(seed)\n\n # cf generic freeze\n self._ctor_param = dict(\n a=a, b=b, name=name, badvalue=badvalue,\n moment_tol=moment_tol, values=values, inc=inc,\n longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n if name is None:\n name = 'Distribution'\n self.badvalue = badvalue\n self.a = a\n self.b = b\n self.name = name\n self.moment_tol = moment_tol\n self.inc = inc\n self._cdfvec = vectorize(self._cdf_single, otypes='d')\n self.return_integers = 1\n self.vecentropy = vectorize(self._entropy)\n self.shapes = shapes\n self.extradoc = extradoc\n\n if values is not None:\n self.xk, self.pk = values\n self.return_integers = 0\n indx = argsort(ravel(self.xk))\n self.xk = take(ravel(self.xk), indx, 0)\n self.pk = take(ravel(self.pk), indx, 0)\n self.a = self.xk[0]\n self.b = self.xk[-1]\n self.P = dict(zip(self.xk, self.pk))\n self.qvals = np.cumsum(self.pk, axis=0)\n self.F = dict(zip(self.xk, self.qvals))\n decreasing_keys = sorted(self.F.keys(), reverse=True)\n self.Finv = dict((self.F[k], k) for k in decreasing_keys)\n self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),\n self, rv_discrete)\n self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),\n self, rv_discrete)\n self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),\n self, rv_discrete)\n self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)\n self.generic_moment = instancemethod(_drv_moment,\n self, rv_discrete)\n self.moment_gen = instancemethod(_drv_moment_gen,\n self, rv_discrete)\n\n self.shapes = ' ' # bypass inspection\n self._construct_argparser(meths_to_inspect=[self._pmf],\n locscale_in='loc=0',\n # scale=1 for discrete RVs\n locscale_out='loc, 1')\n else:\n self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],\n locscale_in='loc=0',\n # scale=1 for discrete RVs\n locscale_out='loc, 1')\n\n # nin correction needs to be after we know numargs\n # correct nin for generic moment vectorization\n _vec_generic_moment = vectorize(_drv2_moment, otypes='d')\n _vec_generic_moment.nin = self.numargs + 2\n self.generic_moment = instancemethod(_vec_generic_moment,\n self, rv_discrete)\n\n # correct nin for ppf vectorization\n _vppf = vectorize(_drv2_ppfsingle, otypes='d')\n _vppf.nin = self.numargs + 2 # +1 is for self\n self._ppfvec = instancemethod(_vppf,\n self, rv_discrete)\n\n # now that self.numargs is defined, we can adjust nin\n self._cdfvec.nin = self.numargs + 1\n\n # generate docstring for subclass instances\n if longname is None:\n if name[0] in ['aeiouAEIOU']:\n hstr = \"An \"\n else:\n hstr = \"A \"\n longname = hstr + name\n\n if sys.flags.optimize < 2:\n # Skip adding docstrings if interpreter is run with -OO\n if self.__doc__ is None:\n self._construct_default_doc(longname=longname,\n extradoc=extradoc,\n docdict=docdict_discrete,\n discrete='discrete')\n else:\n dct = dict(distdiscrete)\n self._construct_doc(docdict_discrete, dct.get(self.name))\n\n #discrete RV do not have the scale parameter, remove it\n self.__doc__ = self.__doc__.replace(\n '\\n scale : array_like, '\n 'optional\\n scale parameter (default=1)', '')\n\n def _updated_ctor_param(self):\n \"\"\" Return the current version of _ctor_param, possibly updated by user.\n\n Used by freezing and pickling.\n Keep this in sync with the signature of __init__.\n \"\"\"\n dct = self._ctor_param.copy()\n dct['a'] = self.a\n dct['b'] = self.b\n dct['badvalue'] = self.badvalue\n dct['moment_tol'] = self.moment_tol\n dct['inc'] = self.inc\n dct['name'] = self.name\n dct['shapes'] = self.shapes\n dct['extradoc'] = self.extradoc\n return dct\n\n def _nonzero(self, k, *args):\n return floor(k) == k\n\n def _pmf(self, k, *args):\n return self._cdf(k, *args) - self._cdf(k-1, *args)\n\n def _logpmf(self, k, *args):\n return log(self._pmf(k, *args))\n\n def _cdf_single(self, k, *args):\n m = arange(int(self.a), k+1)\n return np.sum(self._pmf(m, *args), axis=0)\n\n def _cdf(self, x, *args):\n k = floor(x)\n return self._cdfvec(k, *args)\n\n # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic\n\n def rvs(self, *args, **kwargs):\n \"\"\"\n Random variates of given type.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n size : int or tuple of ints, optional\n Defining number of random variates (Default is 1). Note that `size`\n has to be given as keyword, not as positional argument.\n random_state : None or int or ``np.random.RandomState`` instance, optional\n If int or RandomState, use it for drawing the random variates.\n If None, rely on ``self.random_state``.\n Default is None.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of given `size`.\n\n \"\"\"\n kwargs['discrete'] = True\n return super(rv_discrete, self).rvs(*args, **kwargs)\n\n def pmf(self, k, *args, **kwds):\n \"\"\"\n Probability mass function at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n pmf : array_like\n Probability mass function evaluated at k\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logpmf(self, k, *args, **kwds):\n \"\"\"\n Log of the probability mass function at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter. Default is 0.\n\n Returns\n -------\n logpmf : array_like\n Log of the probability mass function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logpmf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def cdf(self, k, *args, **kwds):\n \"\"\"\n Cumulative distribution function of the given RV.\n\n Parameters\n ----------\n k : array_like, int\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n cdf : ndarray\n Cumulative distribution function evaluated at `k`.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k < self.b)\n cond2 = (k >= self.b)\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2*(cond0 == cond0), 1.0)\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logcdf(self, k, *args, **kwds):\n \"\"\"\n Log of the cumulative distribution function at k of the given RV.\n\n Parameters\n ----------\n k : array_like, int\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n logcdf : array_like\n Log of the cumulative distribution function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k < self.b)\n cond2 = (k >= self.b)\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2*(cond0 == cond0), 0.0)\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logcdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def sf(self, k, *args, **kwds):\n \"\"\"\n Survival function (1 - `cdf`) at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n sf : array_like\n Survival function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray(k-loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k < self.b)\n cond2 = (k < self.a) & cond0\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._sf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logsf(self, k, *args, **kwds):\n \"\"\"\n Log of the survival function of the given RV.\n\n Returns the log of the \"survival function,\" defined as 1 - `cdf`,\n evaluated at `k`.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n logsf : ndarray\n Log of the survival function evaluated at `k`.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray(k-loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k < self.b)\n cond2 = (k < self.a) & cond0\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logsf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def ppf(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of `cdf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n Lower tail probability.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n k : array_like\n Quantile corresponding to the lower tail probability, q.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n q, loc = map(asarray, (q, loc))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (loc == loc)\n cond1 = (q > 0) & (q < 1)\n cond2 = (q == 1) & cond0\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue, typecode='d')\n # output type 'd' to handle nin and inf\n place(output, (q == 0)*(cond == cond), self.a-1)\n place(output, cond2, self.b)\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(loc,)))\n loc, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._ppf(*goodargs) + loc)\n\n if output.ndim == 0:\n return output[()]\n return output\n\n def isf(self, q, *args, **kwds):\n \"\"\"\n Inverse survival function (inverse of `sf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n Upper tail probability.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n k : ndarray or scalar\n Quantile corresponding to the upper tail probability, q.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n q, loc = map(asarray, (q, loc))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (loc == loc)\n cond1 = (q > 0) & (q < 1)\n cond2 = (q == 1) & cond0\n cond = cond0 & cond1\n\n # same problem as with ppf; copied from ppf and changed\n output = valarray(shape(cond), value=self.badvalue, typecode='d')\n # output type 'd' to handle nin and inf\n place(output, (q == 0)*(cond == cond), self.b)\n place(output, cond2, self.a-1)\n\n # call place only if at least 1 valid argument\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(loc,)))\n loc, goodargs = goodargs[-1], goodargs[:-1]\n # PB same as ticket 766\n place(output, cond, self._isf(*goodargs) + loc)\n\n if output.ndim == 0:\n return output[()]\n return output\n\n def _entropy(self, *args):\n if hasattr(self, 'pk'):\n return entropy(self.pk)\n else:\n return _expect(lambda x: entr(self.pmf(x, *args)),\n self.a, self.b, self.ppf(0.5, *args), self.inc)\n\n def expect(self, func=None, args=(), loc=0, lb=None, ub=None,\n conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):\n \"\"\"\n Calculate expected value of a function with respect to the distribution\n for discrete distribution.\n\n Parameters\n ----------\n func : callable, optional\n Function for which the expectation value is calculated.\n Takes only one argument.\n The default is the identity mapping f(k) = k.\n args : tuple, optional\n Shape parameters of the distribution.\n loc : float, optional\n Location parameter.\n Default is 0.\n lb, ub : int, optional\n Lower and upper bound for the summation, default is set to the\n support of the distribution, inclusive (``ul <= k <= ub``).\n conditional : bool, optional\n If true then the expectation is corrected by the conditional\n probability of the summation interval. The return value is the\n expectation of the function, `func`, conditional on being in\n the given interval (k such that ``ul <= k <= ub``).\n Default is False.\n maxcount : int, optional\n Maximal number of terms to evaluate (to avoid an endless loop for\n an infinite sum). Default is 1000.\n tolerance : float, optional\n Absolute tolerance for the summation. Default is 1e-10.\n chunksize : int, optional\n Iterate over the support of a distributions in chunks of this size.\n Default is 32.\n\n Returns\n -------\n expect : float\n Expected value.\n\n Notes\n -----\n For heavy-tailed distributions, the expected value may or may not exist,\n depending on the function, `func`. If it does exist, but the sum converges\n slowly, the accuracy of the result may be rather low. For instance, for\n ``zipf(4)``, accuracy for mean, variance in example is only 1e-5.\n increasing `maxcount` and/or `chunksize` may improve the result, but may also\n make zipf very slow.\n\n The function is not vectorized.\n\n \"\"\"\n if func is None:\n def fun(x):\n # loc and args from outer scope\n return (x+loc)*self._pmf(x, *args)\n else:\n def fun(x):\n # loc and args from outer scope\n return func(x+loc)*self._pmf(x, *args)\n # used pmf because _pmf does not check support in randint and there\n # might be problems(?) with correct self.a, self.b at this stage maybe\n # not anymore, seems to work now with _pmf\n\n self._argcheck(*args) # (re)generate scalar self.a and self.b\n if lb is None:\n lb = self.a\n else:\n lb = lb - loc # convert bound for standardized distribution\n if ub is None:\n ub = self.b\n else:\n ub = ub - loc # convert bound for standardized distribution\n if conditional:\n invfac = self.sf(lb-1, *args) - self.sf(ub, *args)\n else:\n invfac = 1.0\n\n # iterate over the support, starting from the median\n x0 = self.ppf(0.5, *args)\n res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)\n return res / invfac\n\n\ndef _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,\n chunksize=32):\n \"\"\"Helper for computing the expectation value of `fun`.\"\"\"\n\n # short-circuit if the support size is small enough\n if (ub - lb) <= chunksize:\n supp = np.arange(lb, ub+1, inc)\n vals = fun(supp)\n return np.sum(vals)\n\n # otherwise, iterate starting from x0\n if x0 < lb:\n x0 = lb\n if x0 > ub:\n x0 = ub\n\n count, tot = 0, 0.\n # iterate over [x0, ub] inclusive\n for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):\n count += x.size\n delta = np.sum(fun(x))\n tot += delta\n if abs(delta) < tolerance * x.size:\n break\n if count > maxcount:\n warnings.warn('expect(): sum did not converge', RuntimeWarning)\n return tot\n\n # iterate over [lb, x0)\n for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):\n count += x.size\n delta = np.sum(fun(x))\n tot += delta\n if abs(delta) < tolerance * x.size:\n break\n if count > maxcount:\n warnings.warn('expect(): sum did not converge', RuntimeWarning)\n break\n\n return tot\n\n\ndef _iter_chunked(x0, x1, chunksize=4, inc=1):\n \"\"\"Iterate from x0 to x1 in chunks of chunksize and steps inc.\n\n x0 must be finite, x1 need not be. In the latter case, the iterator is infinite.\n Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards\n (make sure to set inc < 0.)\n\n >>> [x for x in _iter_chunked(2, 5, inc=2)]\n [array([2, 4])]\n >>> [x for x in _iter_chunked(2, 11, inc=2)]\n [array([2, 4, 6, 8]), array([10])]\n >>> [x for x in _iter_chunked(2, -5, inc=-2)]\n [array([ 2, 0, -2, -4])]\n >>> [x for x in _iter_chunked(2, -9, inc=-2)]\n [array([ 2, 0, -2, -4]), array([-6, -8])]\n\n \"\"\"\n if inc == 0:\n raise ValueError('Cannot increment by zero.')\n if chunksize <= 0:\n raise ValueError('Chunk size must be positive; got %s.' % chunksize)\n\n s = 1 if inc > 0 else -1\n stepsize = abs(chunksize * inc)\n\n x = x0\n while (x - x1) * inc < 0:\n delta = min(stepsize, abs(x - x1))\n step = delta * s\n supp = np.arange(x, x + step, inc)\n x += step\n yield supp\n\n\ndef get_distribution_names(namespace_pairs, rv_base_class):\n \"\"\"\n Collect names of statistical distributions and their generators.\n\n Parameters\n ----------\n namespace_pairs : sequence\n A snapshot of (name, value) pairs in the namespace of a module.\n rv_base_class : class\n The base class of random variable generator classes in a module.\n\n Returns\n -------\n distn_names : list of strings\n Names of the statistical distributions.\n distn_gen_names : list of strings\n Names of the generators of the statistical distributions.\n Note that these are not simply the names of the statistical\n distributions, with a _gen suffix added.\n\n \"\"\"\n distn_names = []\n distn_gen_names = []\n for name, value in namespace_pairs:\n if name.startswith('_'):\n continue\n if name.endswith('_gen') and issubclass(value, rv_base_class):\n distn_gen_names.append(name)\n if isinstance(value, rv_base_class):\n distn_names.append(name)\n return distn_names, distn_gen_names\n" ]
[ [ "numpy.product", "numpy.sqrt", "scipy.special.ive", "numpy.asarray", "numpy.cumsum", "numpy.all", "numpy.seterr", "numpy.max", "scipy.special.kl_div", "numpy.isneginf", "numpy.any", "numpy.exp", "numpy.place", "scipy.misc.derivative", "scipy.special.entr", "scipy.special.chndtr", "numpy.reshape", "numpy.arange", "numpy.atleast_1d", "scipy._lib._util.getargspec_no_self", "scipy._lib._util.check_random_state", "numpy.argmax", "numpy.ravel", "scipy._lib.six.exec_", "numpy.log", "numpy.power", "numpy.min", "numpy.isnan", "numpy.floor", "scipy.integrate.quad", "numpy.find_common_type", "numpy.extract", "scipy.special.xlogy", "numpy.sum", "scipy.misc.doccer.docformat", "numpy.array", "numpy.isfinite", "numpy.ones", "scipy.special.comb", "numpy.vectorize", "numpy.shape", "numpy.isscalar", "scipy.optimize.brentq", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "0.15", "1.4", "0.16", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3" ], "tensorflow": [] } ]
tomztyang/DSGN
[ "135dabf96183f4502532dea0a45230bf9a23e2d8" ]
[ "dsgn/models/submodule.py" ]
[ "from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport math\nimport numpy as np\nfrom torch.nn import BatchNorm2d\n\ndef convbn(in_planes, out_planes, kernel_size, stride, pad, dilation, gn=False, groups=32):\n return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False),\n nn.BatchNorm2d(out_planes) if not gn else nn.GroupNorm(groups, out_planes))\n\n\ndef convbn_3d(in_planes, out_planes, kernel_size, stride, pad, gn=False, groups=32):\n return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride,bias=False),\n nn.BatchNorm3d(out_planes) if not gn else nn.GroupNorm(groups, out_planes))\n\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride, downsample, pad, dilation, gn=False):\n super(BasicBlock, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(inplanes, planes, 3, stride, pad, dilation, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(planes, planes, 3, 1, pad, dilation, gn=gn)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n\n if self.downsample is not None:\n x = self.downsample(x)\n\n out += x\n\n return out\n\nclass disparityregression(nn.Module):\n def __init__(self, maxdisp, cfg):\n super(disparityregression, self).__init__()\n self.disp = Variable(torch.Tensor(np.array(range(maxdisp))).cuda(), requires_grad=False)\n\n def forward(self, x, depth):\n out = torch.sum(x * depth[None, :, None, None],1)\n return out\n\nclass hourglass(nn.Module):\n def __init__(self, inplanes, gn=False):\n super(hourglass, self).__init__()\n\n self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1, gn=gn)\n\n self.conv3 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv5 = nn.Sequential(\n nn.ConvTranspose3d(inplanes * 2, inplanes * 2, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm3d(inplanes * 2) if not gn else nn.GroupNorm(32, inplanes * 2)) # +conv2\n\n self.conv6 = nn.Sequential(\n nn.ConvTranspose3d(inplanes * 2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm3d(inplanes) if not gn else nn.GroupNorm(32, inplanes)) # +x\n\n def forward(self, x, presqu, postsqu):\n\n out = self.conv1(x) # in:1/4 out:1/8\n pre = self.conv2(out) # in:1/8 out:1/8\n if postsqu is not None:\n pre = F.relu(pre + postsqu, inplace=True)\n else:\n pre = F.relu(pre, inplace=True)\n\n out = self.conv3(pre) # in:1/8 out:1/16\n out = self.conv4(out) # in:1/16 out:1/16\n\n if presqu is not None:\n post = F.relu(self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8\n else:\n post = F.relu(self.conv5(out) + pre, inplace=True)\n\n out = self.conv6(post) # in:1/8 out:1/4\n\n return out, pre, post\n\nclass hourglass2d(nn.Module):\n def __init__(self, inplanes, gn=False):\n super(hourglass2d, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1, dilation=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1, dilation=1, gn=gn)\n\n self.conv3 = nn.Sequential(convbn(inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1, dilation=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1, dilation=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv5 = nn.Sequential(\n nn.ConvTranspose2d(inplanes * 2, inplanes * 2, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm2d(inplanes * 2) if not gn else nn.GroupNorm(32, inplanes * 2)) # +conv2\n\n self.conv6 = nn.Sequential(\n nn.ConvTranspose2d(inplanes * 2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm2d(inplanes) if not gn else nn.GroupNorm(32, inplanes)) # +x\n\n def forward(self, x, presqu, postsqu):\n\n out = self.conv1(x) # in:1/4 out:1/8\n pre = self.conv2(out) # in:1/8 out:1/8\n if postsqu is not None:\n pre = F.relu(pre + postsqu, inplace=True)\n else:\n pre = F.relu(pre, inplace=True)\n\n out = self.conv3(pre) # in:1/8 out:1/16\n out = self.conv4(out) # in:1/16 out:1/16\n\n if presqu is not None:\n post = F.relu(self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8\n else:\n post = F.relu(self.conv5(out) + pre, inplace=True)\n\n out = self.conv6(post) # in:1/8 out:1/4\n\n return out, pre, post\n\nclass feature_extraction(nn.Module):\n def __init__(self, cfg):\n super(feature_extraction, self).__init__()\n\n self.cfg = cfg\n self.RPN3D_ENABLE = self.cfg.RPN3D_ENABLE\n self.cat_img_feature = getattr(self.cfg, 'cat_img_feature', False)\n self.rpn_onemore_conv = getattr(self.cfg, 'RPN_ONEMORE_CONV', False)\n self.rpn_onemore_dim = getattr(self.cfg, 'RPN_ONEMORE_DIM', 256)\n self.img_feature_relu = getattr(self.cfg, 'img_feature_relu', True)\n self.branch = getattr(self.cfg, 'branch', True)\n\n self.backbone = getattr(self.cfg, 'backbone', 'reslike-det-small')\n if self.backbone == 'reslike-det':\n first_dim = 64\n dims = [64, 128, 192, 256]\n nr_convs = [3, 6, 12, 4]\n branch_dim = 32\n lastconv_dim = [256, 32]\n elif self.backbone == 'reslike-det-small':\n first_dim = 64\n dims = [32, 64, 128, 192]\n nr_convs = [3, 6, 12, 4]\n branch_dim = 32\n lastconv_dim = [256, 32]\n else:\n raise ValueError('Invalid backbone {}.'.format(self.backbone))\n\n self.inplanes = first_dim\n\n self.firstconv = nn.Sequential(convbn(3, first_dim, 3, 2, 1, 1, gn=cfg.GN if first_dim >= 32 else False),\n nn.ReLU(inplace=True),\n convbn(first_dim, first_dim, 3, 1, 1, 1, gn=cfg.GN if first_dim >= 32 else False),\n nn.ReLU(inplace=True),\n convbn(first_dim, first_dim, 3, 1, 1, 1, gn=cfg.GN if first_dim >= 32 else False),\n nn.ReLU(inplace=True))\n\n self.layer1 = self._make_layer(BasicBlock, dims[0], nr_convs[0], 1,1,1, gn=cfg.GN if dims[0] >= 32 else False)\n self.layer2 = self._make_layer(BasicBlock, dims[1], nr_convs[1], 2,1,1, gn=cfg.GN) \n self.layer3 = self._make_layer(BasicBlock, dims[2], nr_convs[2], 1,1,1, gn=cfg.GN)\n self.layer4 = self._make_layer(BasicBlock, dims[3], nr_convs[3], 1,1,2, gn=cfg.GN)\n\n if self.branch:\n self.branch1 = nn.Sequential(nn.AvgPool2d((64, 64), stride=(64,64)),\n convbn(dims[3], branch_dim, 1, 1, 0, 1, gn=cfg.GN, groups=min(32, branch_dim)),\n nn.ReLU(inplace=True))\n\n self.branch2 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32,32)),\n convbn(dims[3], branch_dim, 1, 1, 0, 1, gn=cfg.GN, groups=min(32, branch_dim)),\n nn.ReLU(inplace=True))\n\n self.branch3 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16,16)),\n convbn(dims[3], branch_dim, 1, 1, 0, 1, gn=cfg.GN, groups=min(32, branch_dim)),\n nn.ReLU(inplace=True))\n\n self.branch4 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8,8)),\n convbn(dims[3], branch_dim, 1, 1, 0, 1, gn=cfg.GN, groups=min(32, branch_dim)),\n nn.ReLU(inplace=True))\n\n if self.branch:\n concat_dim = branch_dim * 4 + dims[1] + dims[3] + dims[2]\n else:\n concat_dim = dims[1] + dims[3] + dims[2]\n\n self.PlaneSweepVolume = getattr(cfg, 'PlaneSweepVolume', True)\n if self.PlaneSweepVolume:\n self.lastconv = nn.Sequential(convbn(concat_dim, lastconv_dim[0], 3, 1, 1, 1, gn=cfg.GN),\n nn.ReLU(inplace=True),\n nn.Conv2d(lastconv_dim[0], lastconv_dim[1], kernel_size=1, padding=0, stride = 1, bias=False))\n\n if self.cfg.RPN3D_ENABLE and self.cat_img_feature:\n if self.rpn_onemore_conv:\n rpnconvs = [convbn(concat_dim, self.rpn_onemore_dim, 3, 1, 1, 1, gn=cfg.GN),\n nn.ReLU(inplace=True),\n convbn(self.rpn_onemore_dim, self.cfg.RPN_CONVDIM, 3, 1, 1, 1, gn=cfg.GN, groups=(32 if self.cfg.RPN_CONVDIM % 32 == 0 else 16))]\n else:\n rpnconvs = [convbn(concat_dim, self.cfg.RPN_CONVDIM, 3, 1, 1, 1, gn=cfg.GN, groups=(32 if self.cfg.RPN_CONVDIM % 32 == 0 else 16))]\n if self.img_feature_relu:\n rpnconvs.append( nn.ReLU(inplace=True) )\n self.rpnconv = nn.Sequential( *rpnconvs )\n\n def _make_layer(self, block, planes, blocks, stride, pad, dilation, gn=False):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion) if not gn else nn.GroupNorm(32, planes * block.expansion))\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, pad, dilation, gn=gn))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,1,None,pad,dilation, gn=gn))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n output = self.firstconv(x) ; #print('conv1', output.shape) # (1, 32, 192, 624)\n output = self.layer1(output) ; #print('conv2', output.shape) # (1, 32, 192, 624)\n output_raw = self.layer2(output) ; #print('conv3', output_raw.shape) # (1, 64, 96, 312)\n output_mid = self.layer3(output_raw) ; #print('conv4', output.shape) # (1, 128, 96, 312)\n output_skip = self.layer4(output_mid) ; #print('conv5', output_skip.shape) # (1, 128, 96, 312)\n\n if self.branch:\n output_branch1 = self.branch1(output_skip) ; #print('b1', output_branch1.shape) # (1, 32, 1, 4) # avgpool 64\n output_branch1 = F.interpolate(output_branch1, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear', align_corners=self.cfg.align_corners) # (1, 32, 96, 312)\n\n output_branch2 = self.branch2(output_skip) ; #print('b2', output_branch2.shape)# (1, 32, 3, 9)\n output_branch2 = F.interpolate(output_branch2, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear', align_corners=self.cfg.align_corners)\n\n output_branch3 = self.branch3(output_skip) ; #print('b3', output_branch3.shape)# (1, 32, 6, 19)\n output_branch3 = F.interpolate(output_branch3, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear', align_corners=self.cfg.align_corners)\n\n output_branch4 = self.branch4(output_skip) ; #print('b4', output_branch4.shape)# (1, 32, 12, 39)\n output_branch4 = F.interpolate(output_branch4, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear', align_corners=self.cfg.align_corners)\n\n if self.branch:\n concat_feature = torch.cat((output_raw, output_mid, output_skip, output_branch4, output_branch3, output_branch2, output_branch1), 1) ; #print('cat', concat_feature.shape)\n else:\n concat_feature = torch.cat((output_raw, output_mid, output_skip), 1)\n \n if self.RPN3D_ENABLE and self.cat_img_feature:\n rpn_feature = self.rpnconv(concat_feature)\n else:\n rpn_feature = None\n\n if self.PlaneSweepVolume:\n output_feature = self.lastconv(concat_feature) ; #print('last', output_feature.shape)\n else:\n output_feature = None\n\n return output_feature, rpn_feature\n" ]
[ [ "torch.nn.Sequential", "torch.nn.ConvTranspose2d", "torch.cat", "torch.nn.ConvTranspose3d", "torch.nn.Conv2d", "torch.sum", "torch.nn.Conv3d", "torch.nn.functional.relu", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.GroupNorm", "torch.nn.ReLU", "torch.nn.BatchNorm3d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aegissystems/yolact
[ "29ee13f41a83b62a8946a86cb01e49d8fce0676c" ]
[ "utils/functions.py" ]
[ "import torch\nimport torch.nn as nn\nimport os\nimport math\nfrom collections import deque\nfrom pathlib import Path\nfrom YOLACT.layers.interpolate import InterpolateModule\n\nclass MovingAverage():\n \"\"\" Keeps an average window of the specified number of items. \"\"\"\n\n def __init__(self, max_window_size=1000):\n self.max_window_size = max_window_size\n self.reset()\n\n def add(self, elem):\n \"\"\" Adds an element to the window, removing the earliest element if necessary. \"\"\"\n if not math.isfinite(elem):\n print('Warning: Moving average ignored a value of %f' % elem)\n return\n \n self.window.append(elem)\n self.sum += elem\n\n if len(self.window) > self.max_window_size:\n self.sum -= self.window.popleft()\n \n def append(self, elem):\n \"\"\" Same as add just more pythonic. \"\"\"\n self.add(elem)\n\n def reset(self):\n \"\"\" Resets the MovingAverage to its initial state. \"\"\"\n self.window = deque()\n self.sum = 0\n\n def get_avg(self):\n \"\"\" Returns the average of the elements in the window. \"\"\"\n return self.sum / max(len(self.window), 1)\n\n def __str__(self):\n return str(self.get_avg())\n \n def __repr__(self):\n return repr(self.get_avg())\n \n def __len__(self):\n return len(self.window)\n\n\nclass ProgressBar():\n \"\"\" A simple progress bar that just outputs a string. \"\"\"\n\n def __init__(self, length, max_val):\n self.max_val = max_val\n self.length = length\n self.cur_val = 0\n \n self.cur_num_bars = -1\n self._update_str()\n\n def set_val(self, new_val):\n self.cur_val = new_val\n\n if self.cur_val > self.max_val:\n self.cur_val = self.max_val\n if self.cur_val < 0:\n self.cur_val = 0\n\n self._update_str()\n \n def is_finished(self):\n return self.cur_val == self.max_val\n\n def _update_str(self):\n num_bars = int(self.length * (self.cur_val / self.max_val))\n\n if num_bars != self.cur_num_bars:\n self.cur_num_bars = num_bars\n self.string = '█' * num_bars + '░' * (self.length - num_bars)\n \n def __repr__(self):\n return self.string\n \n def __str__(self):\n return self.string\n\n\ndef init_console():\n \"\"\"\n Initialize the console to be able to use ANSI escape characters on Windows.\n \"\"\"\n if os.name == 'nt':\n from colorama import init\n init()\n\n\nclass SavePath:\n \"\"\"\n Why is this a class?\n Why do I have a class for creating and parsing save paths?\n What am I doing with my life?\n \"\"\"\n\n def __init__(self, model_name:str, epoch:int, iteration:int):\n self.model_name = model_name\n self.epoch = epoch\n self.iteration = iteration\n\n def get_path(self, root:str=''):\n file_name = self.model_name + '_' + str(self.epoch) + '_' + str(self.iteration) + '.pth'\n return os.path.join(root, file_name)\n\n @staticmethod\n def from_str(path:str):\n file_name = os.path.basename(path)\n \n if file_name.endswith('.pth'):\n file_name = file_name[:-4]\n \n params = file_name.split('_')\n\n if file_name.endswith('interrupt'):\n params = params[:-1]\n \n model_name = '_'.join(params[:-2])\n epoch = params[-2]\n iteration = params[-1]\n \n return SavePath(model_name, int(epoch), int(iteration))\n\n @staticmethod\n def remove_interrupt(save_folder):\n for p in Path(save_folder).glob('*_interrupt.pth'):\n p.unlink()\n \n @staticmethod\n def get_interrupt(save_folder):\n for p in Path(save_folder).glob('*_interrupt.pth'): \n return str(p)\n return None\n \n @staticmethod\n def get_latest(save_folder, config):\n \"\"\" Note: config should be config.name. \"\"\"\n max_iter = -1\n max_name = None\n\n for p in Path(save_folder).glob(config + '_*'):\n path_name = str(p)\n\n try:\n save = SavePath.from_str(path_name)\n except:\n continue \n \n if save.model_name == config and save.iteration > max_iter:\n max_iter = save.iteration\n max_name = path_name\n\n return max_name\n\ndef make_net(in_channels, conf, include_last_relu=True):\n \"\"\"\n A helper function to take a config setting and turn it into a network.\n Used by protonet and extrahead. Returns (network, out_channels)\n \"\"\"\n def make_layer(layer_cfg):\n nonlocal in_channels\n \n # Possible patterns:\n # ( 256, 3, {}) -> conv\n # ( 256,-2, {}) -> deconv\n # (None,-2, {}) -> bilinear interpolate\n # ('cat',[],{}) -> concat the subnetworks in the list\n #\n # You know it would have probably been simpler just to adopt a 'c' 'd' 'u' naming scheme.\n # Whatever, it's too late now.\n if isinstance(layer_cfg[0], str):\n layer_name = layer_cfg[0]\n\n if layer_name == 'cat':\n nets = [make_net(in_channels, x) for x in layer_cfg[1]]\n layer = Concat([net[0] for net in nets], layer_cfg[2])\n num_channels = sum([net[1] for net in nets])\n else:\n num_channels = layer_cfg[0]\n kernel_size = layer_cfg[1]\n\n if kernel_size > 0:\n layer = nn.Conv2d(in_channels, num_channels, kernel_size, **layer_cfg[2])\n else:\n if num_channels is None:\n layer = InterpolateModule(scale_factor=-kernel_size, mode='bilinear', align_corners=False, **layer_cfg[2])\n else:\n layer = nn.ConvTranspose2d(in_channels, num_channels, -kernel_size, **layer_cfg[2])\n \n in_channels = num_channels if num_channels is not None else in_channels\n\n # Don't return a ReLU layer if we're doing an upsample. This probably doesn't affect anything\n # output-wise, but there's no need to go through a ReLU here.\n # Commented out for backwards compatibility with previous models\n # if num_channels is None:\n # return [layer]\n # else:\n return [layer, nn.ReLU(inplace=True)]\n\n # Use sum to concat together all the component layer lists\n net = sum([make_layer(x) for x in conf], [])\n if not include_last_relu:\n net = net[:-1]\n\n return nn.Sequential(*(net)), in_channels" ]
[ [ "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.ConvTranspose2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bio-ontology-research-group/deeppheno
[ "8a58c6efcdd6861c2e5c380bb81690618e58b629" ]
[ "results.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport click as ck\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport sys\nfrom collections import deque\nimport time\nimport logging\nfrom sklearn.metrics import roc_curve, auc, matthews_corrcoef\nfrom scipy.spatial import distance\nfrom scipy import sparse\nimport math\nfrom utils import FUNC_DICT, Ontology, NAMESPACES\nfrom matplotlib import pyplot as plt\n\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\n\n\[email protected]()\[email protected](\n '--method', '-m', default='',\n help='model method')\[email protected](\n '--ont', '-o', default='organ',\n help='model method')\ndef main(method, ont):\n # res = {}\n # for fold in range(1,6):\n # with open(f'fold{fold}_data-cafa/predictions{method}.pkl.{ont}.res') as f:\n # lines = f.read().splitlines()\n # items = lines[-1].split(', ')\n # for item in items:\n # it = item.split(': ')\n # if it[0] not in res:\n # res[it[0]] = []\n # res[it[0]].append(float(it[1]))\n # with open(f'fold{fold}_data-cafa/predictions{method}.pkl.auc.{ont}.res') as f:\n # lines = f.read().splitlines()\n # auc = float(lines[-1])\n # if 'mauc' not in res:\n # res['mauc'] = []\n # res['mauc'].append(auc)\n \n # avg = {}\n # avg_err = {}\n # for key in res:\n # res[key] = np.array(res[key])\n # avg[key] = np.mean(res[key])\n # avg_err[key] = np.mean(np.abs(res[key] - avg[key]))\n \n # res_flat = {}\n # for fold in range(1,6):\n # with open(f'fold{fold}_data-cafa/predictions{method}.pkl_flat.pkl.{ont}.res') as f:\n # lines = f.read().splitlines()\n # items = lines[-1].split(', ')\n # for item in items:\n # it = item.split(': ')\n # if it[0] not in res_flat:\n # res_flat[it[0]] = []\n # res_flat[it[0]].append(float(it[1]))\n # with open(f'fold{fold}_data-cafa/predictions{method}.pkl_flat.pkl.auc.{ont}.res') as f:\n # lines = f.read().splitlines()\n # auc = float(lines[-1])\n # if 'mauc' not in res_flat:\n # res_flat['mauc'] = []\n # res_flat['mauc'].append(auc)\n \n # avg_flat = {}\n # avg_flat_err = {}\n # for key in res_flat:\n # res_flat[key] = np.array(res_flat[key])\n # avg_flat[key] = np.mean(res_flat[key])\n # avg_flat_err[key] = np.mean(np.abs(res_flat[key] - avg_flat[key]))\n\n # auc = avg_flat['mauc']\n # fmax = avg_flat['Fmax']\n # smin = avg_flat['Smin']\n # aupr = avg_flat['AUPR']\n # auce = avg_flat_err['mauc']\n # fmaxe = avg_flat_err['Fmax']\n # smine = avg_flat_err['Smin']\n # aupre = avg_flat_err['AUPR']\n # print(f'DeepPhenoFlat & {fmax:0.3f} $\\pm$ {fmaxe:0.3f} & {smin:0.3f} $\\pm$ {smine:0.3f} & {aupr:0.3f} $\\pm$ {aupre:0.3f} & {auc:0.3f} $\\pm$ {auce:0.3f} \\\\\\\\')\n # print('\\\\hline')\n\n # auc = avg['mauc']\n # fmax = avg['Fmax']\n # smin = avg['Smin']\n # aupr = avg['AUPR']\n # auce = avg_err['mauc']\n # fmaxe = avg_err['Fmax']\n # smine = avg_err['Smin']\n # aupre = avg_err['AUPR']\n # print(f'DeepPheno & {fmax:0.3f} $\\pm$ {fmaxe:0.3f} & {smin:0.3f} $\\pm$ {smine:0.3f} & {aupr:0.3f} $\\pm$ {aupre:0.3f} & {auc:0.3f} $\\pm$ {auce:0.3f} \\\\\\\\')\n\n # res_gd = {}\n # gd = {}\n # gd_err = {}\n # for fold in range(1,6):\n # with open(f'fold{fold}_data/sim_gene_disease{method}.txt.res') as f:\n # lines = f.read().splitlines()\n # res = lines[-1].split(' ')\n # for i, item in enumerate(res):\n # if i not in res_gd:\n # res_gd[i] = []\n # res_gd[i].append(float(item))\n # for key in res_gd:\n # res_gd[key] = np.array(res_gd[key])\n # gd[key] = np.mean(res_gd[key])\n # gd_err[key] = np.mean(np.abs(res_gd[key] - gd[key]))\n \n # print(f'{gd[0]:0.2f} {gd[1]:0.2f} {gd[2]:0.2f} {gd[3]:0.2f}')\n\n res_phenos = {}\n phenos = {}\n ph = {}\n ph_err = {}\n for fold in range(1,6):\n with open(f'fold{fold}_data/phenotype_results.tsv') as f:\n for line in f:\n it = line.strip().split('\\t')\n if it[0] not in res_phenos:\n res_phenos[it[0]] = []\n phenos[it[0]] = it\n res_phenos[it[0]].append(float(it[2]))\n for key in res_phenos:\n res_phenos[key] = np.array(res_phenos[key])\n ph[key] = np.mean(res_phenos[key])\n ph_err[key] = np.mean(np.abs(res_phenos[key] - ph[key]))\n \n res = []\n for key, it in phenos.items():\n res.append((it[0], it[1], ph[key], ph_err[key], it[3], it[4]))\n res = sorted(res, key=lambda x: x[2], reverse=True)\n with open('data/phenotype_results.tsv', 'w') as f:\n f.write('HP\\tLabel\\tFmax\\n')\n for it in res:\n f.write(f'{it[0]} & {it[1]} & {it[2]:.3f} $\\pm$ {it[3]:.3f} \\\\\\\\ \\n')\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.mean", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
souravsingh/chainercv
[ "8f76510472bc95018c183e72f37bc6c34a89969c", "8f76510472bc95018c183e72f37bc6c34a89969c", "8f76510472bc95018c183e72f37bc6c34a89969c", "8f76510472bc95018c183e72f37bc6c34a89969c", "8f76510472bc95018c183e72f37bc6c34a89969c", "8f76510472bc95018c183e72f37bc6c34a89969c", "8f76510472bc95018c183e72f37bc6c34a89969c" ]
[ "tests/utils_tests/testing_tests/assertions_tests/test_assert_is_bbox_dataset.py", "tests/visualizations_tests/test_vis_bbox.py", "chainercv/visualizations/vis_image.py", "chainercv/experimental/links/model/fcis/utils/proposal_target_creator.py", "tests/transforms_tests/bbox_tests/test_flip_bbox.py", "chainercv/links/model/ssd/multibox_coder.py", "chainercv/transforms/image/random_sized_crop.py" ]
[ "import numpy as np\nimport unittest\n\nfrom chainer.dataset import DatasetMixin\nfrom chainer import testing\n\nfrom chainercv.utils import assert_is_bbox_dataset\nfrom chainercv.utils import generate_random_bbox\n\n\nclass BboxDataset(DatasetMixin):\n\n def __init__(self, options=(), empty_bbox=False):\n self.options = options\n self.empty_bbox = empty_bbox\n\n def __len__(self):\n return 10\n\n def get_example(self, i):\n img = np.random.randint(0, 256, size=(3, 48, 64))\n if self.empty_bbox:\n n_bbox = 0\n else:\n n_bbox = np.random.randint(10, 20)\n bbox = generate_random_bbox(n_bbox, (48, 64), 5, 20)\n label = np.random.randint(0, 20, size=n_bbox).astype(np.int32)\n\n return (img, bbox, label) + self.options\n\n\nclass InvalidSampleSizeDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(\n InvalidSampleSizeDataset, self).get_example(i)[:3]\n return img, bbox\n\n\nclass InvalidImageDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(InvalidImageDataset, self).get_example(i)[:3]\n return img[0], bbox, label\n\n\nclass InvalidBboxDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(InvalidBboxDataset, self).get_example(i)[:3]\n bbox += 1000\n return img, bbox, label\n\n\nclass InvalidLabelDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(InvalidLabelDataset, self).get_example(i)[:3]\n label += 1000\n return img, bbox, label\n\n\nclass MismatchLengthDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(\n MismatchLengthDataset, self).get_example(i)[:3]\n return img, bbox, label[1:]\n\n\[email protected](\n {'dataset': BboxDataset(), 'valid': True},\n {'dataset': BboxDataset(empty_bbox=True), 'valid': True},\n {'dataset': BboxDataset(('option',)), 'valid': True},\n {'dataset': InvalidSampleSizeDataset(), 'valid': False},\n {'dataset': InvalidImageDataset(), 'valid': False},\n {'dataset': InvalidBboxDataset(), 'valid': False},\n {'dataset': InvalidLabelDataset(), 'valid': False},\n {'dataset': MismatchLengthDataset(), 'valid': False},\n)\nclass TestAssertIsBboxDataset(unittest.TestCase):\n\n def test_assert_is_bbox_dataset(self):\n if self.valid:\n assert_is_bbox_dataset(self.dataset, 20)\n else:\n with self.assertRaises(AssertionError):\n assert_is_bbox_dataset(self.dataset, 20)\n\n\ntesting.run_module(__name__, __file__)\n", "import unittest\n\nimport numpy as np\n\nfrom chainer import testing\n\nfrom chainercv.utils import generate_random_bbox\nfrom chainercv.visualizations import vis_bbox\n\ntry:\n import matplotlib # NOQA\n _available = True\nexcept ImportError:\n _available = False\n\n\[email protected](\n *testing.product_dict([\n {\n 'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),\n 'label_names': ('c0', 'c1', 'c2')},\n {\n 'n_bbox': 3, 'label': (0, 1, 2), 'score': None,\n 'label_names': ('c0', 'c1', 'c2')},\n {\n 'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),\n 'label_names': None},\n {\n 'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),\n 'label_names': ('c0', 'c1', 'c2')},\n {\n 'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),\n 'label_names': None},\n {\n 'n_bbox': 3, 'label': None, 'score': None,\n 'label_names': None},\n {\n 'n_bbox': 3, 'label': (0, 1, 1), 'score': (0, 0.5, 1),\n 'label_names': ('c0', 'c1', 'c2')},\n {\n 'n_bbox': 0, 'label': (), 'score': (),\n 'label_names': ('c0', 'c1', 'c2')},\n {\n 'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),\n 'label_names': ('c0', 'c1', 'c2'), 'no_img': True},\n {\n 'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),\n 'label_names': ('c0', 'c1', 'c2'),\n 'instance_colors': [\n (255, 0, 0), (0, 255, 0), (0, 0, 255), (100, 100, 100)]},\n ], [{'sort_by_score': False}, {'sort_by_score': True}]))\[email protected](_available, 'Matplotlib is not installed')\nclass TestVisBbox(unittest.TestCase):\n\n def setUp(self):\n if hasattr(self, 'no_img'):\n self.img = None\n else:\n self.img = np.random.randint(0, 255, size=(3, 32, 48))\n self.bbox = generate_random_bbox(\n self.n_bbox, (48, 32), 8, 16)\n if self.label is not None:\n self.label = np.array(self.label, dtype=int)\n if self.score is not None:\n self.score = np.array(self.score)\n if not hasattr(self, 'instance_colors'):\n self.instance_colors = None\n\n def test_vis_bbox(self):\n ax = vis_bbox(\n self.img, self.bbox, self.label, self.score,\n label_names=self.label_names,\n instance_colors=self.instance_colors,\n sort_by_score=self.sort_by_score)\n\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n\n\[email protected](*testing.product_dict([\n {\n 'n_bbox': 3, 'label': (0, 1), 'score': (0, 0.5, 1),\n 'label_names': ('c0', 'c1', 'c2')},\n {\n 'n_bbox': 3, 'label': (0, 1, 2, 1), 'score': (0, 0.5, 1),\n 'label_names': ('c0', 'c1', 'c2')},\n\n {\n 'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5),\n 'label_names': ('c0', 'c1', 'c2')},\n {\n 'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1, 0.75),\n 'label_names': ('c0', 'c1', 'c2')},\n\n {\n 'n_bbox': 3, 'label': (0, 1, 3), 'score': (0, 0.5, 1),\n 'label_names': ('c0', 'c1', 'c2')},\n {\n 'n_bbox': 3, 'label': (-1, 1, 2), 'score': (0, 0.5, 1),\n 'label_names': ('c0', 'c1', 'c2')},\n], [{'sort_by_score': False}, {'sort_by_score': True}]))\[email protected](_available, 'Matplotlib is not installed')\nclass TestVisBboxInvalidInputs(unittest.TestCase):\n\n def setUp(self):\n self.img = np.random.randint(0, 255, size=(3, 32, 48))\n self.bbox = np.random.uniform(size=(self.n_bbox, 4))\n if self.label is not None:\n self.label = np.array(self.label, dtype=int)\n if self.score is not None:\n self.score = np.array(self.score)\n if not hasattr(self, 'instance_colors'):\n self.instance_colors = None\n\n def test_vis_bbox_invalid_inputs(self):\n with self.assertRaises(ValueError):\n vis_bbox(\n self.img, self.bbox, self.label, self.score,\n label_names=self.label_names,\n instance_colors=self.instance_colors,\n sort_by_score=self.sort_by_score)\n\n\ntesting.run_module(__name__, __file__)\n", "import numpy as np\n\n\ndef vis_image(img, ax=None):\n \"\"\"Visualize a color image.\n\n Args:\n img (~numpy.ndarray): An array of shape :math:`(3, height, width)`.\n This is in RGB format and the range of its value is\n :math:`[0, 255]`. If this is :obj:`None`, no image is displayed.\n ax (matplotlib.axes.Axis): The visualization is displayed on this\n axis. If this is :obj:`None` (default), a new axis is created.\n\n Returns:\n ~matploblib.axes.Axes:\n Returns the Axes object with the plot for further tweaking.\n\n \"\"\"\n from matplotlib import pyplot as plot\n if ax is None:\n fig = plot.figure()\n ax = fig.add_subplot(1, 1, 1)\n if img is not None:\n # CHW -> HWC\n img = img.transpose((1, 2, 0))\n ax.imshow(img.astype(np.uint8))\n return ax\n", "import numpy as np\n\nfrom chainer import cuda\n\nfrom chainercv.links.model.faster_rcnn.utils.bbox2loc import bbox2loc\nfrom chainercv.transforms.image.resize import resize\nfrom chainercv.utils.bbox.bbox_iou import bbox_iou\n\n\nclass ProposalTargetCreator(object):\n \"\"\"Assign ground truth classes, bounding boxes and masks to given RoIs.\n\n The :meth:`__call__` of this class generates training targets\n for each object proposal.\n This is used to train FCIS [#FCIS]_.\n\n .. [#FCIS] Yi Li, Haozhi Qi, Jifeng Dai, Xiangyang Ji, Yichen Wei. \\\n Fully Convolutional Instance-aware Semantic Segmentation. CVPR 2017.\n\n Args:\n n_sample (int): The number of sampled regions.\n pos_ratio (float): Fraction of regions that is labeled as a\n foreground.\n pos_iou_thresh (float): IoU threshold for a RoI to be considered as a\n foreground.\n neg_iou_thresh_hi (float): RoI is considered to be the background\n if IoU is in\n [:obj:`neg_iou_thresh_hi`, :obj:`neg_iou_thresh_hi`).\n neg_iou_thresh_lo (float): See above.\n binary_thresh (float): Threshold for resized mask.\n\n \"\"\"\n\n def __init__(\n self, n_sample=128,\n pos_ratio=0.25, pos_iou_thresh=0.5,\n neg_iou_thresh_hi=0.5, neg_iou_thresh_lo=0.1,\n binary_thresh=0.4):\n\n self.n_sample = n_sample\n self.pos_ratio = pos_ratio\n self.pos_iou_thresh = pos_iou_thresh\n self.neg_iou_thresh_hi = neg_iou_thresh_hi\n self.neg_iou_thresh_lo = neg_iou_thresh_lo\n self.binary_thresh = binary_thresh\n\n def __call__(\n self, roi, mask, label, bbox,\n loc_normalize_mean=(0., 0., 0., 0.),\n loc_normalize_std=(0.2, 0.2, 0.5, 0.5),\n mask_size=(21, 21),\n ):\n \"\"\"Assigns ground truth to sampled proposals.\n\n This function samples total of :obj:`self.n_sample` RoIs\n from the combination of :obj:`roi`, :obj:`mask`, :obj:`label`\n and :obj: `bbox`. The RoIs are assigned with the ground truth class\n labels as well as bounding box offsets and scales to match the ground\n truth bounding boxes. As many as :obj:`pos_ratio * self.n_sample` RoIs\n are sampled as foregrounds.\n\n Offsets and scales of bounding boxes are calculated using\n :func:`chainercv.links.model.faster_rcnn.bbox2loc`.\n Also, types of input arrays and output arrays are same.\n\n Here are notations.\n\n * :math:`S` is the total number of sampled RoIs, which equals \\\n :obj:`self.n_sample`.\n * :math:`L` is number of object classes possibly including the \\\n background.\n * :math:`H` is the image height.\n * :math:`W` is the image width.\n * :math:`RH` is the mask height.\n * :math:`RW` is the mask width.\n\n Args:\n roi (array): Region of Interests (RoIs) from which we sample.\n Its shape is :math:`(R, 4)`\n mask (array): The coordinates of ground truth masks.\n Its shape is :math:`(R', H, W)`.\n label (array): Ground truth bounding box labels. Its shape\n is :math:`(R',)`. Its range is :math:`[0, L - 1]`, where\n :math:`L` is the number of foreground classes.\n bbox (array): The coordinates of ground truth bounding boxes.\n Its shape is :math:`(R', 4)`.\n loc_normalize_mean (tuple of four floats): Mean values to normalize\n coordinates of bounding boxes.\n loc_normalize_std (tuple of four floats): Standard deviation of\n the coordinates of bounding boxes.\n mask_size (tuple of int or int): Generated mask size, which is\n equal to :math:`(RH, RW)`.\n\n Returns:\n (array, array, array, array):\n\n * **sample_roi**: Regions of interests that are sampled. \\\n Its shape is :math:`(S, 4)`.\n * **gt_roi_mask**: Masks assigned to sampled RoIs. Its shape is \\\n :math:`(S, RH, RW)`.\n * **gt_roi_label**: Labels assigned to sampled RoIs. Its shape is \\\n :math:`(S,)`. Its range is :math:`[0, L]`. The label with \\\n value 0 is the background.\n * **gt_roi_loc**: Offsets and scales to match \\\n the sampled RoIs to the ground truth bounding boxes. \\\n Its shape is :math:`(S, 4)`.\n\n \"\"\"\n\n xp = cuda.get_array_module(roi)\n roi = cuda.to_cpu(roi)\n mask = cuda.to_cpu(mask)\n label = cuda.to_cpu(label)\n bbox = cuda.to_cpu(bbox)\n\n if not isinstance(mask_size, tuple):\n mask_size = (mask_size, mask_size)\n\n n_bbox, _ = bbox.shape\n\n roi = np.concatenate((roi, bbox), axis=0)\n\n if self.n_sample is None:\n n_sample = roi.shape[0]\n else:\n n_sample = self.n_sample\n\n pos_roi_per_image = np.round(n_sample * self.pos_ratio)\n iou = bbox_iou(roi, bbox)\n gt_assignment = iou.argmax(axis=1)\n max_iou = iou.max(axis=1)\n\n # Offset range of classes from [0, n_fg_class - 1] to [1, n_fg_class].\n # The label with value 0 is the background.\n gt_roi_label = label[gt_assignment] + 1\n\n # Select foreground RoIs as those with >= pos_iou_thresh IoU.\n pos_index = np.where(max_iou >= self.pos_iou_thresh)[0]\n pos_roi_per_this_image = int(min(pos_roi_per_image, pos_index.size))\n if pos_index.size > 0:\n pos_index = np.random.choice(\n pos_index, size=pos_roi_per_this_image, replace=False)\n\n # Select background RoIs as those within\n # [neg_iou_thresh_lo, neg_iou_thresh_hi).\n neg_index = np.where((max_iou < self.neg_iou_thresh_hi) &\n (max_iou >= self.neg_iou_thresh_lo))[0]\n neg_roi_per_this_image = self.n_sample - pos_roi_per_this_image\n neg_roi_per_this_image = int(min(neg_roi_per_this_image,\n neg_index.size))\n if neg_index.size > 0:\n neg_index = np.random.choice(\n neg_index, size=neg_roi_per_this_image, replace=False)\n\n # The indices that we're selecting (both foreground and background).\n keep_index = np.append(pos_index, neg_index)\n gt_roi_label = gt_roi_label[keep_index]\n gt_roi_label[pos_roi_per_this_image:] = 0 # negative labels --> 0\n sample_roi = roi[keep_index]\n\n # locs\n # Compute offsets and scales to match sampled RoIs to the GTs.\n loc_normalize_mean = np.array(loc_normalize_mean, np.float32)\n loc_normalize_std = np.array(loc_normalize_std, np.float32)\n gt_roi_loc = bbox2loc(sample_roi, bbox[gt_assignment[keep_index]])\n gt_roi_loc = gt_roi_loc - loc_normalize_mean\n gt_roi_loc = gt_roi_loc / loc_normalize_std\n\n # masks\n gt_roi_mask = -1 * np.ones(\n (len(keep_index), mask_size[0], mask_size[1]),\n dtype=np.int32)\n\n for i, pos_ind in enumerate(pos_index):\n bb = np.round(sample_roi[i]).astype(np.int)\n gt_msk = mask[gt_assignment[pos_ind]]\n gt_roi_msk = gt_msk[bb[0]:bb[2], bb[1]:bb[3]]\n gt_roi_msk = resize(\n gt_roi_msk.astype(np.float32)[None], mask_size)[0]\n gt_roi_msk = (gt_roi_msk >= self.binary_thresh).astype(np.int)\n gt_roi_mask[i] = gt_roi_msk\n\n if xp != np:\n sample_roi = cuda.to_gpu(sample_roi)\n gt_roi_mask = cuda.to_gpu(gt_roi_mask)\n gt_roi_label = cuda.to_gpu(gt_roi_label)\n gt_roi_loc = cuda.to_gpu(gt_roi_loc)\n\n return sample_roi, gt_roi_mask, gt_roi_label, gt_roi_loc\n", "import unittest\n\nimport numpy as np\n\nfrom chainer import testing\nfrom chainercv.transforms import flip_bbox\nfrom chainercv.utils.testing.generate_random_bbox import generate_random_bbox\n\n\nclass TestFlipBbox(unittest.TestCase):\n\n def test_flip_bbox(self):\n size = (32, 24)\n bbox = generate_random_bbox(10, size, 0, min(size))\n\n out = flip_bbox(bbox, size=size, y_flip=True)\n bbox_expected = bbox.copy()\n bbox_expected[:, 0] = size[0] - bbox[:, 2]\n bbox_expected[:, 2] = size[0] - bbox[:, 0]\n np.testing.assert_equal(out, bbox_expected)\n\n out = flip_bbox(bbox, size=size, x_flip=True)\n bbox_expected = bbox.copy()\n bbox_expected[:, 1] = size[1] - bbox[:, 3]\n bbox_expected[:, 3] = size[1] - bbox[:, 1]\n np.testing.assert_equal(out, bbox_expected)\n\n\ntesting.run_module(__name__, __file__)\n", "from __future__ import division\n\nimport itertools\nimport numpy as np\n\nimport chainer\n\nfrom chainercv import utils\n\n\nclass MultiboxCoder(object):\n \"\"\"A helper class to encode/decode bounding boxes.\n\n This class encodes :obj:`(bbox, label)` to :obj:`(mb_loc, mb_label)`\n and decodes :obj:`(mb_loc, mb_conf)` to :obj:`(bbox, label, score)`.\n These encoding/decoding are used in Single Shot Multibox Detector [#]_.\n\n * :obj:`mb_loc`: An array representing offsets and scales \\\n from the default bounding boxes. \\\n Its shape is :math:`(K, 4)`, where :math:`K` is the number of \\\n the default bounding boxes. \\\n The second axis is composed by \\\n :math:`(\\Delta y, \\Delta x, \\Delta h, \\Delta w)`. \\\n These values are computed by the following formulas.\n\n * :math:`\\Delta y = (b_y - m_y) / (m_h * v_0)`\n * :math:`\\Delta x = (b_x - m_x) / (m_w * v_0)`\n * :math:`\\Delta h = log(b_h / m_h) / v_1`\n * :math:`\\Delta w = log(b_w / m_w) / v_1`\n\n :math:`(m_y, m_x)` and :math:`(m_h, m_w)` are \\\n center coodinates and size of a default bounding box. \\\n :math:`(b_y, b_x)` and :math:`(b_h, b_w)` are \\\n center coodinates and size of \\\n a given bounding boxes that is assined to the default bounding box. \\\n :math:`(v_0, v_1)` are coefficients that can be set \\\n by argument :obj:`variance`.\n * :obj:`mb_label`: An array representing classes of \\\n ground truth bounding boxes. Its shape is :math:`(K,)`.\n * :obj:`mb_conf`: An array representing classes of \\\n predicted bounding boxes. Its shape is :math:`(K, n\\_fg\\_class + 1)`.\n\n .. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,\n Scott Reed, Cheng-Yang Fu, Alexander C. Berg.\n SSD: Single Shot MultiBox Detector. ECCV 2016.\n\n Args:\n grids (iterable of ints): An iterable of integers.\n Each integer indicates the size of a feature map.\n aspect_ratios (iterable of tuples of ints):\n An iterable of tuples of integers\n used to compute the default bounding boxes.\n Each tuple indicates the aspect ratios of\n the default bounding boxes at each feature maps.\n The length of this iterable should be :obj:`len(grids)`.\n steps (iterable of floats): The step size for each feature map.\n The length of this iterable should be :obj:`len(grids)`.\n sizes (iterable of floats): The base size of default bounding boxes\n for each feature map.\n The length of this iterable should be :obj:`len(grids) + 1`.\n variance (tuple of floats): Two coefficients for encoding/decoding\n the locations of bounding boxes. The first value is used to\n encode/decode coordinates of the centers.\n The second value is used to encode/decode the sizes of\n bounding boxes.\n \"\"\"\n\n def __init__(self, grids, aspect_ratios, steps, sizes, variance):\n if not len(aspect_ratios) == len(grids):\n raise ValueError('The length of aspect_ratios is wrong.')\n if not len(steps) == len(grids):\n raise ValueError('The length of steps is wrong.')\n if not len(sizes) == len(grids) + 1:\n raise ValueError('The length of sizes is wrong.')\n\n default_bbox = []\n\n for k, grid in enumerate(grids):\n for v, u in itertools.product(range(grid), repeat=2):\n cy = (v + 0.5) * steps[k]\n cx = (u + 0.5) * steps[k]\n\n s = sizes[k]\n default_bbox.append((cy, cx, s, s))\n\n s = np.sqrt(sizes[k] * sizes[k + 1])\n default_bbox.append((cy, cx, s, s))\n\n s = sizes[k]\n for ar in aspect_ratios[k]:\n default_bbox.append(\n (cy, cx, s / np.sqrt(ar), s * np.sqrt(ar)))\n default_bbox.append(\n (cy, cx, s * np.sqrt(ar), s / np.sqrt(ar)))\n\n # (center_y, center_x, height, width)\n self._default_bbox = np.stack(default_bbox)\n self._variance = variance\n\n @property\n def xp(self):\n return chainer.backends.cuda.get_array_module(self._default_bbox)\n\n def to_cpu(self):\n self._default_bbox = chainer.backends.cuda.to_cpu(self._default_bbox)\n\n def to_gpu(self, device=None):\n self._default_bbox = chainer.backends.cuda.to_gpu(\n self._default_bbox, device=device)\n\n def encode(self, bbox, label, iou_thresh=0.5):\n \"\"\"Encodes coordinates and classes of bounding boxes.\n\n This method encodes :obj:`bbox` and :obj:`label` to :obj:`mb_loc`\n and :obj:`mb_label`, which are used to compute multibox loss.\n\n Args:\n bbox (array): A float array of shape :math:`(R, 4)`,\n where :math:`R` is the number of bounding boxes in an image.\n Each bounding box is organized by\n :math:`(y_{min}, x_{min}, y_{max}, x_{max})`\n in the second axis.\n label (array) : An integer array of shape :math:`(R,)`.\n Each value indicates the class of the bounding box.\n iou_thresh (float): The threshold value to determine\n a default bounding box is assigned to a ground truth\n or not. The default value is :obj:`0.5`.\n\n Returns:\n tuple of two arrays:\n This method returns a tuple of two arrays,\n :obj:`(mb_loc, mb_label)`.\n\n * **mb_loc**: A float array of shape :math:`(K, 4)`, \\\n where :math:`K` is the number of default bounding boxes.\n * **mb_label**: An integer array of shape :math:`(K,)`.\n\n \"\"\"\n xp = self.xp\n\n if len(bbox) == 0:\n return (\n xp.zeros(self._default_bbox.shape, dtype=np.float32),\n xp.zeros(self._default_bbox.shape[0], dtype=np.int32))\n\n iou = utils.bbox_iou(\n xp.hstack((\n self._default_bbox[:, :2] - self._default_bbox[:, 2:] / 2,\n self._default_bbox[:, :2] + self._default_bbox[:, 2:] / 2)),\n bbox)\n\n index = xp.empty(len(self._default_bbox), dtype=int)\n # -1 is for background\n index[:] = -1\n\n masked_iou = iou.copy()\n while True:\n i, j = xp.unravel_index(masked_iou.argmax(), masked_iou.shape)\n if masked_iou[i, j] <= 1e-6:\n break\n index[i] = j\n masked_iou[i, :] = 0\n masked_iou[:, j] = 0\n\n mask = xp.logical_and(index < 0, iou.max(axis=1) >= iou_thresh)\n index[mask] = iou[mask].argmax(axis=1)\n\n mb_bbox = bbox[index].copy()\n # (y_min, x_min, y_max, x_max) -> (y_min, x_min, height, width)\n mb_bbox[:, 2:] -= mb_bbox[:, :2]\n # (y_min, x_min, height, width) -> (center_y, center_x, height, width)\n mb_bbox[:, :2] += mb_bbox[:, 2:] / 2\n\n mb_loc = xp.empty_like(mb_bbox)\n mb_loc[:, :2] = (mb_bbox[:, :2] - self._default_bbox[:, :2]) / \\\n (self._variance[0] * self._default_bbox[:, 2:])\n mb_loc[:, 2:] = xp.log(mb_bbox[:, 2:] / self._default_bbox[:, 2:]) / \\\n self._variance[1]\n\n # [0, n_fg_class - 1] -> [1, n_fg_class]\n mb_label = label[index] + 1\n # 0 is for background\n mb_label[index < 0] = 0\n\n return mb_loc.astype(np.float32), mb_label.astype(np.int32)\n\n def decode(self, mb_loc, mb_conf, nms_thresh=0.45, score_thresh=0.6):\n \"\"\"Decodes back to coordinates and classes of bounding boxes.\n\n This method decodes :obj:`mb_loc` and :obj:`mb_conf` returned\n by a SSD network back to :obj:`bbox`, :obj:`label` and :obj:`score`.\n\n Args:\n mb_loc (array): A float array whose shape is\n :math:`(K, 4)`, :math:`K` is the number of\n default bounding boxes.\n mb_conf (array): A float array whose shape is\n :math:`(K, n\\_fg\\_class + 1)`.\n nms_thresh (float): The threshold value\n for :func:`~chainercv.utils.non_maximum_suppression`.\n The default value is :obj:`0.45`.\n score_thresh (float): The threshold value for confidence score.\n If a bounding box whose confidence score is lower than\n this value, the bounding box will be suppressed.\n The default value is :obj:`0.6`.\n\n Returns:\n tuple of three arrays:\n This method returns a tuple of three arrays,\n :obj:`(bbox, label, score)`.\n\n * **bbox**: A float array of shape :math:`(R, 4)`, \\\n where :math:`R` is the number of bounding boxes in a image. \\\n Each bounding box is organized by \\\n :math:`(y_{min}, x_{min}, y_{max}, x_{max})` \\\n in the second axis.\n * **label** : An integer array of shape :math:`(R,)`. \\\n Each value indicates the class of the bounding box.\n * **score** : A float array of shape :math:`(R,)`. \\\n Each value indicates how confident the prediction is.\n\n \"\"\"\n xp = self.xp\n\n # (center_y, center_x, height, width)\n mb_bbox = self._default_bbox.copy()\n mb_bbox[:, :2] += mb_loc[:, :2] * self._variance[0] \\\n * self._default_bbox[:, 2:]\n mb_bbox[:, 2:] *= xp.exp(mb_loc[:, 2:] * self._variance[1])\n\n # (center_y, center_x, height, width) -> (y_min, x_min, height, width)\n mb_bbox[:, :2] -= mb_bbox[:, 2:] / 2\n # (center_y, center_x, height, width) -> (y_min, x_min, y_max, x_max)\n mb_bbox[:, 2:] += mb_bbox[:, :2]\n\n # softmax\n mb_score = xp.exp(mb_conf)\n mb_score /= mb_score.sum(axis=1, keepdims=True)\n\n bbox = []\n label = []\n score = []\n for l in range(mb_conf.shape[1] - 1):\n bbox_l = mb_bbox\n # the l-th class corresponds for the (l + 1)-th column.\n score_l = mb_score[:, l + 1]\n\n mask = score_l >= score_thresh\n bbox_l = bbox_l[mask]\n score_l = score_l[mask]\n\n if nms_thresh is not None:\n indices = utils.non_maximum_suppression(\n bbox_l, nms_thresh, score_l)\n bbox_l = bbox_l[indices]\n score_l = score_l[indices]\n\n bbox.append(bbox_l)\n label.append(xp.array((l,) * len(bbox_l)))\n score.append(score_l)\n\n bbox = xp.vstack(bbox).astype(np.float32)\n label = xp.hstack(label).astype(np.int32)\n score = xp.hstack(score).astype(np.float32)\n\n return bbox, label, score\n", "from __future__ import division\n\nimport math\nimport numpy as np\nimport random\n\n\ndef random_sized_crop(img,\n scale_ratio_range=(0.08, 1),\n aspect_ratio_range=(3 / 4, 4 / 3),\n return_param=False, copy=False):\n \"\"\"Crop an image to random size and aspect ratio.\n\n The size :math:`(H_{crop}, W_{crop})` and the left top coordinate\n :math:`(y_{start}, x_{start})` of the crop are calculated as follows:\n\n + :math:`H_{crop} = \\\\lfloor{\\\\sqrt{s \\\\times H \\\\times W \\\n \\\\times a}}\\\\rfloor`\n + :math:`W_{crop} = \\\\lfloor{\\\\sqrt{s \\\\times H \\\\times W \\\n \\\\div a}}\\\\rfloor`\n + :math:`y_{start} \\\\sim Uniform\\\\{0, H - H_{crop}\\\\}`\n + :math:`x_{start} \\\\sim Uniform\\\\{0, W - W_{crop}\\\\}`\n + :math:`s \\\\sim Uniform(s_1, s_2)`\n + :math:`b \\\\sim Uniform(a_1, a_2)` and \\\n :math:`a = b` or :math:`a = \\\\frac{1}{b}` in 50/50 probability.\n\n Here, :math:`s_1, s_2` are the two floats in\n :obj:`scale_ratio_range` and :math:`a_1, a_2` are the two floats\n in :obj:`aspect_ratio_range`.\n Also, :math:`H` and :math:`W` are the height and the width of the image.\n Note that :math:`s \\\\approx \\\\frac{H_{crop} \\\\times W_{crop}}{H \\\\times W}`\n and :math:`a \\\\approx \\\\frac{H_{crop}}{W_{crop}}`.\n The approximations come from flooring floats to integers.\n\n .. note::\n\n When it fails to sample a valid scale and aspect ratio for ten\n times, it picks values in a non-uniform way.\n If this happens, the selected scale ratio can be smaller\n than :obj:`scale_ratio_range[0]`.\n\n Args:\n img (~numpy.ndarray): An image array. This is in CHW format.\n scale_ratio_range (tuple of two floats): Determines\n the distribution from which a scale ratio is sampled.\n The default values are selected so that the area of the crop is\n 8~100% of the original image. This is the default\n setting used to train ResNets in Torch style.\n aspect_ratio_range (tuple of two floats): Determines\n the distribution from which an aspect ratio is sampled.\n The default values are\n :math:`\\\\frac{3}{4}` and :math:`\\\\frac{4}{3}`, which\n are also the default setting to train ResNets in Torch style.\n return_param (bool): Returns parameters if :obj:`True`.\n\n Returns:\n ~numpy.ndarray or (~numpy.ndarray, dict):\n\n If :obj:`return_param = False`,\n returns only the cropped image.\n\n If :obj:`return_param = True`,\n returns a tuple of cropped image and :obj:`param`.\n :obj:`param` is a dictionary of intermediate parameters whose\n contents are listed below with key, value-type and the description\n of the value.\n\n * **y_slice** (*slice*): A slice used to crop the input image.\\\n The relation below holds together with :obj:`x_slice`.\n * **x_slice** (*slice*): Similar to :obj:`y_slice`.\n\n .. code::\n\n out_img = img[:, y_slice, x_slice]\n\n * **scale_ratio** (float): :math:`s` in the description (see above).\n * **aspect_ratio** (float): :math:`a` in the description.\n\n \"\"\"\n _, H, W = img.shape\n scale_ratio, aspect_ratio =\\\n _sample_parameters(\n (H, W), scale_ratio_range, aspect_ratio_range)\n\n H_crop = int(math.floor(np.sqrt(scale_ratio * H * W * aspect_ratio)))\n W_crop = int(math.floor(np.sqrt(scale_ratio * H * W / aspect_ratio)))\n y_start = random.randint(0, H - H_crop)\n x_start = random.randint(0, W - W_crop)\n y_slice = slice(y_start, y_start + H_crop)\n x_slice = slice(x_start, x_start + W_crop)\n\n img = img[:, y_slice, x_slice]\n\n if copy:\n img = img.copy()\n if return_param:\n params = {'y_slice': y_slice, 'x_slice': x_slice,\n 'scale_ratio': scale_ratio, 'aspect_ratio': aspect_ratio}\n return img, params\n else:\n return img\n\n\ndef _sample_parameters(size, scale_ratio_range, aspect_ratio_range):\n H, W = size\n for _ in range(10):\n aspect_ratio = random.uniform(\n aspect_ratio_range[0], aspect_ratio_range[1])\n if random.uniform(0, 1) < 0.5:\n aspect_ratio = 1 / aspect_ratio\n # This is determined so that relationships \"H - H_crop >= 0\" and\n # \"W - W_crop >= 0\" are always satisfied.\n scale_ratio_max = min((scale_ratio_range[1],\n H / (W * aspect_ratio),\n (aspect_ratio * W) / H))\n\n scale_ratio = random.uniform(\n scale_ratio_range[0], scale_ratio_range[1])\n if scale_ratio_range[0] <= scale_ratio <= scale_ratio_max:\n return scale_ratio, aspect_ratio\n\n # This scale_ratio is outside the given range when\n # scale_ratio_max < scale_ratio_range[0].\n scale_ratio = random.uniform(\n min((scale_ratio_range[0], scale_ratio_max)), scale_ratio_max)\n return scale_ratio, aspect_ratio\n" ]
[ [ "numpy.random.randint" ], [ "numpy.random.uniform", "numpy.array", "numpy.random.randint" ], [ "matplotlib.pyplot.figure" ], [ "numpy.random.choice", "numpy.concatenate", "numpy.round", "numpy.append", "numpy.array", "numpy.where" ], [ "numpy.testing.assert_equal" ], [ "numpy.sqrt", "numpy.stack" ], [ "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
berlinchen7/EXOSIMS
[ "0c46945ae0b915f97157fb17de3eb344f5945e74" ]
[ "EXOSIMS/SurveySimulation/tieredScheduler_DD_SLSQP.py" ]
[ "from EXOSIMS.SurveySimulation.tieredScheduler_SLSQP import tieredScheduler_SLSQP\nimport EXOSIMS, os\nimport astropy.units as u\nimport astropy.constants as const\nimport numpy as np\nimport itertools\nfrom scipy import interpolate\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport time\nimport copy\nfrom EXOSIMS.util.deltaMag import deltaMag\n\nclass tieredScheduler_DD_SLSQP(tieredScheduler_SLSQP):\n \"\"\"tieredScheduler_DD_SLSQP - tieredScheduler Dual Detection SLSQP\n \n This class implements a version of the tieredScheduler that performs dual-band\n detections and uses the SLSQP scheduler as a base for inheritance.\n \"\"\"\n\n def __init__(self, **specs):\n \n tieredScheduler_SLSQP.__init__(self, **specs)\n \n\n def run_sim(self):\n \"\"\"Performs the survey simulation \n \n Returns:\n mission_end (string):\n Message printed at the end of a survey simulation.\n \n \"\"\"\n \n OS = self.OpticalSystem\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n Comp = self.Completeness\n \n # TODO: start using this self.currentSep\n # set occulter separation if haveOcculter\n self.currentSep = Obs.occulterSep\n \n # Choose observing modes selected for detection (default marked with a flag),\n det_modes = list(filter(lambda mode: 'imag' in mode['inst']['name'], OS.observingModes))\n # and for characterization (default is first spectro/IFS mode)\n spectroModes = list(filter(lambda mode: 'spec' in mode['inst']['name'], OS.observingModes))\n if np.any(spectroModes):\n char_mode = spectroModes[0]\n # if no spectro mode, default char mode is first observing mode\n else:\n char_mode = OS.observingModes[0]\n \n # Begin Survey, and loop until mission is finished\n self.logger.info('OB{}: survey beginning.'.format(TK.OBnumber+1))\n self.vprint('OB{}: survey beginning.'.format(TK.OBnumber+1))\n t0 = time.time()\n sInd = None\n occ_sInd = None\n cnt = 0\n\n while not TK.mission_is_over(OS, Obs, det_modes[0]):\n \n # Acquire the NEXT TARGET star index and create DRM\n prev_occ_sInd = occ_sInd\n old_sInd = sInd #used to save sInd if returned sInd is None\n waitTime = None\n DRM, sInd, occ_sInd, t_det, sd, occ_sInds, det_mode = self.next_target(sInd, occ_sInd, det_modes, char_mode)\n \n if det_mode is not None:\n true_t_det = t_det*det_mode['timeMultiplier'] + Obs.settlingTime + det_mode['syst']['ohTime']\n else:\n true_t_det = t_det\n\n if sInd != occ_sInd and sInd is not None:\n assert t_det != 0, \"Integration time can't be 0.\"\n\n if sInd is not None and (TK.currentTimeAbs.copy() + true_t_det) >= self.occ_arrives and occ_sInd != self.last_chard:\n sInd = occ_sInd\n if sInd == occ_sInd:\n self.ready_to_update = True\n\n time2arrive = self.occ_arrives - TK.currentTimeAbs.copy()\n \n if sInd is not None:\n cnt += 1\n\n # clean up revisit list when one occurs to prevent repeats\n if np.any(self.starRevisit) and np.any(np.where(self.starRevisit[:,0] == float(sInd))):\n s_revs = np.where(self.starRevisit[:,0] == float(sInd))[0]\n dt_max = 1.*u.week\n t_revs = np.where(self.starRevisit[:,1]*u.day - TK.currentTimeNorm.copy() < dt_max)[0]\n self.starRevisit = np.delete(self.starRevisit, np.intersect1d(s_revs,t_revs),0)\n\n # get the index of the selected target for the extended list\n if TK.currentTimeNorm.copy() > TK.missionLife and self.starExtended.shape[0] == 0:\n for i in range(len(self.DRM)):\n if np.any([x == 1 for x in self.DRM[i]['plan_detected']]):\n self.starExtended = np.hstack((self.starExtended, self.DRM[i]['star_ind']))\n self.starExtended = np.unique(self.starExtended)\n \n # Beginning of observation, start to populate DRM\n DRM['OB_nb'] = TK.OBnumber+1\n DRM['ObsNum'] = cnt\n DRM['star_ind'] = sInd\n pInds = np.where(SU.plan2star == sInd)[0]\n DRM['plan_inds'] = pInds.astype(int).tolist()\n\n if sInd == occ_sInd:\n # wait until expected arrival time is observed\n if time2arrive > 0*u.d:\n TK.advanceToAbsTime(self.occ_arrives)\n if time2arrive > 1*u.d:\n self.GAtime = self.GAtime + time2arrive.to('day')\n\n TK.obsStart = TK.currentTimeNorm.copy().to('day')\n\n self.logger.info(' Observation #%s, target #%s/%s with %s planet(s), mission time: %s'\\\n %(cnt, sInd+1, TL.nStars, len(pInds), TK.obsStart.round(2)))\n self.vprint(' Observation #%s, target #%s/%s with %s planet(s), mission time: %s'\\\n %(cnt, sInd+1, TL.nStars, len(pInds), TK.obsStart.round(2)))\n\n DRM['arrival_time'] = TK.currentTimeNorm.copy().to('day')\n \n if sInd != occ_sInd:\n self.starVisits[sInd] += 1\n # PERFORM DETECTION and populate revisit list attribute.\n # First store fEZ, dMag, WA\n if np.any(pInds):\n DRM['det_fEZ'] = SU.fEZ[pInds].to('1/arcsec2').value.tolist()\n DRM['det_dMag'] = SU.dMag[pInds].tolist()\n DRM['det_WA'] = SU.WA[pInds].to('mas').value.tolist()\n detected, det_fZ, det_systemParams, det_SNR, FA = self.observation_detection(sInd, t_det, det_mode)\n\n if np.any(detected):\n self.sInd_detcounts[sInd] += 1\n self.sInd_dettimes[sInd] = (self.sInd_dettimes.get(sInd) or []) + [TK.currentTimeNorm.copy().to('day')]\n self.vprint(' Det. results are: %s'%(detected))\n\n # update GAtime\n self.GAtime = self.GAtime + t_det.to('day')*self.GA_simult_det_fraction\n\n # populate the DRM with detection results\n DRM['det_time'] = t_det.to('day')\n DRM['det_status'] = detected\n DRM['det_SNR'] = det_SNR\n DRM['det_fZ'] = det_fZ.to('1/arcsec2')\n DRM['det_params'] = det_systemParams\n DRM['FA_det_status'] = int(FA)\n\n det_comp = Comp.comp_per_intTime(t_det, TL, sInd, det_fZ, self.ZodiacalLight.fEZ0, self.WAint[sInd], det_mode)[0]\n DRM['det_comp'] = det_comp\n DRM['det_mode'] = dict(det_mode)\n del DRM['det_mode']['inst'], DRM['det_mode']['syst']\n \n elif sInd == occ_sInd:\n self.last_chard = occ_sInd\n self.occ_starVisits[occ_sInd] += 1\n # PERFORM CHARACTERIZATION and populate spectra list attribute.\n occ_pInds = np.where(SU.plan2star == occ_sInd)[0]\n sInd = occ_sInd\n\n DRM['slew_time'] = self.occ_slewTime.to('day').value\n DRM['slew_angle'] = self.occ_sd.to('deg').value\n slew_mass_used = self.occ_slewTime*Obs.defburnPortion*Obs.flowRate\n DRM['slew_dV'] = (self.occ_slewTime*self.ao*Obs.defburnPortion).to('m/s').value\n DRM['slew_mass_used'] = slew_mass_used.to('kg')\n Obs.scMass = Obs.scMass - slew_mass_used\n DRM['scMass'] = Obs.scMass.to('kg')\n\n self.logger.info(' Starshade and telescope aligned at target star')\n self.vprint(' Starshade and telescope aligned at target star')\n\n # PERFORM CHARACTERIZATION and populate spectra list attribute\n characterized, char_fZ, char_systemParams, char_SNR, char_intTime = \\\n self.observation_characterization(sInd, char_mode)\n if np.any(characterized):\n self.vprint(' Char. results are: %s'%(characterized))\n else:\n # make sure we don't accidnetally double characterize\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + .01*u.d)\n assert char_intTime != 0, \"Integration time can't be 0.\"\n if np.any(occ_pInds):\n DRM['char_fEZ'] = SU.fEZ[occ_pInds].to('1/arcsec2').value.tolist()\n DRM['char_dMag'] = SU.dMag[occ_pInds].tolist()\n DRM['char_WA'] = SU.WA[occ_pInds].to('mas').value.tolist()\n DRM['char_mode'] = dict(char_mode)\n del DRM['char_mode']['inst'], DRM['char_mode']['syst']\n\n # update the occulter wet mass\n if OS.haveOcculter and char_intTime is not None:\n DRM = self.update_occulter_mass(DRM, sInd, char_intTime, 'char')\n char_comp = Comp.comp_per_intTime(char_intTime, TL, occ_sInd, char_fZ, self.ZodiacalLight.fEZ0, self.WAint[occ_sInd], char_mode)[0]\n DRM['char_comp'] = char_comp\n FA = False\n # populate the DRM with characterization results\n DRM['char_time'] = char_intTime.to('day') if char_intTime else 0.*u.day\n #DRM['char_counts'] = self.sInd_charcounts[sInd]\n DRM['char_status'] = characterized[:-1] if FA else characterized\n DRM['char_SNR'] = char_SNR[:-1] if FA else char_SNR\n DRM['char_fZ'] = char_fZ.to('1/arcsec2')\n DRM['char_params'] = char_systemParams\n # populate the DRM with FA results\n DRM['FA_det_status'] = int(FA)\n DRM['FA_char_status'] = characterized[-1] if FA else 0\n DRM['FA_char_SNR'] = char_SNR[-1] if FA else 0.\n DRM['FA_char_fEZ'] = self.lastDetected[sInd,1][-1]/u.arcsec**2 if FA else 0./u.arcsec**2\n DRM['FA_char_dMag'] = self.lastDetected[sInd,2][-1] if FA else 0.\n DRM['FA_char_WA'] = self.lastDetected[sInd,3][-1]*u.arcsec if FA else 0.*u.arcsec\n\n # add star back into the revisit list\n if np.any(characterized):\n char = np.where(characterized)[0]\n pInds = np.where(SU.plan2star == sInd)[0]\n smin = np.min(SU.s[pInds[char]])\n pInd_smin = pInds[np.argmin(SU.s[pInds[char]])]\n\n Ms = TL.MsTrue[sInd]\n sp = smin\n Mp = SU.Mp[pInd_smin]\n mu = const.G*(Mp + Ms)\n T = 2.*np.pi*np.sqrt(sp**3/mu)\n t_rev = TK.currentTimeNorm.copy() + T/2.\n\n self.goal_GAtime = self.GA_percentage * TK.currentTimeNorm.copy().to('day')\n goal_GAdiff = self.goal_GAtime - self.GAtime\n\n # allocate extra time to GA if we are falling behind\n if goal_GAdiff > 1*u.d and TK.currentTimeAbs.copy() < self.occ_arrives:\n GA_diff = min(self.occ_arrives - TK.currentTimeAbs.copy(), goal_GAdiff)\n self.vprint('Allocating time %s to general astrophysics'%(GA_diff))\n self.GAtime = self.GAtime + GA_diff\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + GA_diff)\n # allocate time if there is no target for the starshade\n elif goal_GAdiff > 1*u.d and (self.occ_arrives - TK.currentTimeAbs.copy()) < -5*u.d and not np.any(occ_sInds):\n self.vprint('No Available Occulter Targets: Allocating time %s to general astrophysics'%(goal_GAdiff))\n self.GAtime = self.GAtime + goal_GAdiff\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + goal_GAdiff)\n\n DRM['exoplanetObsTime'] = TK.exoplanetObsTime.copy()\n\n # Append result values to self.DRM\n self.DRM.append(DRM)\n\n # Calculate observation end time\n TK.obsEnd = TK.currentTimeNorm.copy().to('day')\n\n # With prototype TimeKeeping, if no OB duration was specified, advance\n # to the next OB with timestep equivalent to time spent on one target\n if np.isinf(TK.OBduration) and (TK.missionPortion < 1):\n self.arbitrary_time_advancement(TK.currentTimeNorm.to('day').copy() - DRM['arrival_time'])\n \n # With occulter, if spacecraft fuel is depleted, exit loop\n if Obs.scMass < Obs.dryMass:\n self.vprint('Total fuel mass exceeded at %s' %TK.obsEnd.round(2))\n break\n\n else:#sInd == None\n sInd = old_sInd#Retain the last observed star\n if(TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]): # currentTime is at end of OB\n #Conditional Advance To Start of Next OB\n if not TK.mission_is_over(OS, Obs,det_mode):#as long as the mission is not over\n TK.advancetToStartOfNextOB()#Advance To Start of Next OB\n elif(waitTime is not None):\n #CASE 1: Advance specific wait time\n success = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n self.vprint('waitTime is not None')\n else:\n startTimes = TK.currentTimeAbs.copy() + np.zeros(TL.nStars)*u.d # Start Times of Observations\n observableTimes = Obs.calculate_observableTimes(TL,np.arange(TL.nStars),startTimes,self.koMaps,self.koTimes,self.mode)[0]\n #CASE 2 If There are no observable targets for the rest of the mission\n if((observableTimes[(TK.missionFinishAbs.copy().value*u.d > observableTimes.value*u.d)*(observableTimes.value*u.d >= TK.currentTimeAbs.copy().value*u.d)].shape[0]) == 0):#Are there any stars coming out of keepout before end of mission\n self.vprint('No Observable Targets for Remainder of mission at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n #Manually advancing time to mission end\n TK.currentTimeNorm = TK.missionLife\n TK.currentTimeAbs = TK.missionFinishAbs\n else:#CASE 3 nominal wait time if at least 1 target is still in list and observable\n #TODO: ADD ADVANCE TO WHEN FZMIN OCURS\n inds1 = np.arange(TL.nStars)[observableTimes.value*u.d > TK.currentTimeAbs.copy().value*u.d]\n inds2 = np.intersect1d(self.intTimeFilterInds, inds1) #apply intTime filter\n inds3 = self.revisitFilter(inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)) #apply revisit Filter #NOTE this means stars you added to the revisit list \n self.vprint(\"Filtering %d stars from advanceToAbsTime\"%(TL.nStars - len(inds3)))\n oTnowToEnd = observableTimes[inds3]\n if not oTnowToEnd.value.shape[0] == 0: #there is at least one observableTime between now and the end of the mission\n tAbs = np.min(oTnowToEnd)#advance to that observable time\n else:\n tAbs = TK.missionStart + TK.missionLife#advance to end of mission\n tmpcurrentTimeNorm = TK.currentTimeNorm.copy()\n success = TK.advanceToAbsTime(tAbs)#Advance Time to this time OR start of next OB following this time\n self.vprint('No Observable Targets a currentTimeNorm= %.2f Advanced To currentTimeNorm= %.2f'%(tmpcurrentTimeNorm.to('day').value, TK.currentTimeNorm.to('day').value))\n \n\n else:\n dtsim = (time.time()-t0)*u.s\n mission_end = \"Mission complete: no more time available.\\n\"\\\n + \"Simulation duration: %s.\\n\" %dtsim.astype('int')\\\n + \"Results stored in SurveySimulation.DRM (Design Reference Mission).\"\n\n self.logger.info(mission_end)\n self.vprint(mission_end)\n\n return mission_end\n\n def next_target(self, old_sInd, old_occ_sInd, det_modes, char_mode):\n \"\"\"Finds index of next target star and calculates its integration time.\n \n This method chooses the next target star index based on which\n stars are available, their integration time, and maximum completeness.\n Returns None if no target could be found.\n \n Args:\n old_sInd (integer):\n Index of the previous target star for the telescope\n old_occ_sInd (integer):\n Index of the previous target star for the occulter\n det_modes (dict array):\n Selected observing mode for detection\n char_mode (dict):\n Selected observing mode for characterization\n \n Returns:\n DRM (dicts):\n Contains the results of survey simulation\n sInd (integer):\n Index of next target star. Defaults to None.\n occ_sInd (integer):\n Index of next occulter target star. Defaults to None.\n t_det (astropy Quantity):\n Selected star integration time for detection in units of day. \n Defaults to None.\n \n \"\"\"\n \n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n Comp = self.Completeness\n TL = self.TargetList\n Obs = self.Observatory\n TK = self.TimeKeeping\n SU = self.SimulatedUniverse\n \n # Create DRM\n DRM = {}\n \n # selecting appropriate koMap\n occ_koMap = self.koMaps[char_mode['syst']['name']]\n koMap = self.koMaps[det_modes[0]['syst']['name']]\n\n # In case of an occulter, initialize slew time factor\n # (add transit time and reduce starshade mass)\n assert OS.haveOcculter == True\n self.ao = Obs.thrust/Obs.scMass\n\n # Star indices that correspond with the given HIPs numbers for the occulter\n # XXX ToDo: print out HIPs that don't show up in TL\n HIP_sInds = np.where(np.in1d(TL.Name, self.occHIPs))[0]\n if TL.earths_only:\n HIP_sInds = np.union1d(HIP_sInds, self.promoted_stars).astype(int)\n sInd = None\n\n # Now, start to look for available targets\n while not TK.mission_is_over(OS, Obs, det_modes[0]):\n # allocate settling time + overhead time\n tmpCurrentTimeAbs = TK.currentTimeAbs.copy()\n tmpCurrentTimeNorm = TK.currentTimeNorm.copy()\n occ_tmpCurrentTimeAbs = TK.currentTimeAbs.copy()\n occ_tmpCurrentTimeNorm = TK.currentTimeNorm.copy()\n\n # 0 initialize arrays\n slewTimes = np.zeros(TL.nStars)*u.d\n fZs = np.zeros(TL.nStars)/u.arcsec**2\n dV = np.zeros(TL.nStars)*u.m/u.s\n intTimes = np.zeros(TL.nStars)*u.d\n occ_intTimes = np.zeros(TL.nStars)*u.d\n tovisit = np.zeros(TL.nStars, dtype=bool)\n occ_tovisit = np.zeros(TL.nStars, dtype=bool)\n sInds = np.arange(TL.nStars)\n\n # 1 Find spacecraft orbital START positions and filter out unavailable \n # targets. If occulter, each target has its own START position.\n sd = Obs.star_angularSep(TL, old_occ_sInd, sInds, tmpCurrentTimeAbs)\n obsTimes = Obs.calculate_observableTimes(TL, sInds, tmpCurrentTimeAbs, self.koMaps, self.koTimes, char_mode)\n slewTimes = Obs.calculate_slewTimes(TL, old_occ_sInd, sInds, sd, obsTimes, tmpCurrentTimeAbs)\n\n # 2.1 filter out totTimes > integration cutoff\n if len(sInds) > 0:\n occ_sInds = np.intersect1d(self.occ_intTimeFilterInds, sInds)\n if len(sInds) > 0:\n sInds = np.intersect1d(self.intTimeFilterInds, sInds)\n \n # Starttimes based off of slewtime\n occ_startTimes = occ_tmpCurrentTimeAbs.copy() + slewTimes\n occ_startTimesNorm = occ_tmpCurrentTimeNorm.copy() + slewTimes\n\n startTimes = tmpCurrentTimeAbs.copy() + np.zeros(TL.nStars)*u.d\n startTimesNorm = tmpCurrentTimeNorm.copy()\n\n # 2.5 Filter stars not observable at startTimes\n try:\n tmpIndsbool = list()\n for i in np.arange(len(occ_sInds)):\n koTimeInd = np.where(np.round(occ_startTimes[occ_sInds[i]].value) - self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(occ_koMap[occ_sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n sInds_occ_ko = occ_sInds[tmpIndsbool]\n occ_sInds = sInds_occ_ko[np.where(np.in1d(sInds_occ_ko, HIP_sInds))[0]]\n del tmpIndsbool\n except:#If there are no target stars to observe \n sInds_occ_ko = np.asarray([],dtype=int)\n occ_sInds = np.asarray([],dtype=int)\n\n try:\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n koTimeInd = np.where(np.round(startTimes[sInds[i]].value) - self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(koMap[sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except:#If there are no target stars to observe \n sInds = np.asarray([],dtype=int)\n\n # 2.9 Occulter target promotion step\n occ_sInds = self.promote_coro_targets(occ_sInds, sInds_occ_ko)\n\n # 3 Filter out all previously (more-)visited targets, unless in \n # revisit list, with time within some dt of start (+- 1 week)\n if len(sInds.tolist()) > 0:\n sInds = self.revisitFilter(sInds, TK.currentTimeNorm.copy())\n\n # revisit list, with time after start\n if np.any(occ_sInds):\n occ_tovisit[occ_sInds] = (self.occ_starVisits[occ_sInds] == self.occ_starVisits[occ_sInds].min())\n if self.occ_starRevisit.size != 0:\n dt_max = 1.*u.week\n dt_rev = TK.currentTimeNorm.copy() - self.occ_starRevisit[:,1]*u.day\n ind_rev = [int(x) for x in self.occ_starRevisit[dt_rev > 0, 0] if x in occ_sInds]\n occ_tovisit[ind_rev] = True\n occ_sInds = np.where(occ_tovisit)[0]\n\n # 4 calculate integration times for ALL preselected targets, \n # and filter out totTimes > integration cutoff\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, det_modes[0])\n maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife, OS.intCutoff)#Maximum intTime allowed\n\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, char_mode)\n occ_maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife, OS.intCutoff)#Maximum intTime allowed\n\n if len(occ_sInds) > 0:\n if self.int_inflection:\n fEZ = ZL.fEZ0\n WA = self.WAint\n occ_intTimes[occ_sInds] = self.calc_int_inflection(occ_sInds, fEZ, occ_startTimes, WA[occ_sInds], char_mode, ischar=True)\n totTimes = occ_intTimes*char_mode['timeMultiplier']\n occ_endTimes = occ_startTimes + totTimes\n else:\n # characterization_start = occ_startTimes\n occ_intTimes[occ_sInds] = self.calc_targ_intTime(occ_sInds, occ_startTimes[occ_sInds], char_mode) * (1 + self.charMargin)\n\n # Adjust integration time for stars with known earths around them\n for occ_star in occ_sInds:\n if occ_star in self.promoted_stars:\n occ_earths = np.intersect1d(np.where(SU.plan2star == occ_star)[0], self.known_earths).astype(int)\n if np.any(occ_earths):\n fZ = ZL.fZ(Obs, TL, occ_star, occ_startTimes[occ_star], char_mode)\n fEZ = SU.fEZ[occ_earths].to('1/arcsec2').value/u.arcsec**2\n dMag = SU.dMag[occ_earths]\n WA = SU.WA[occ_earths]\n earthlike_inttimes = OS.calc_intTime(TL, occ_star, fZ, fEZ, dMag, WA, char_mode) * (1 + self.charMargin)\n earthlike_inttime = earthlike_inttimes[(earthlike_inttimes < occ_maxIntTime)]\n if len(earthlike_inttime) > 0:\n occ_intTimes[occ_star] = np.max(earthlike_inttime)\n else:\n occ_intTimes[occ_star] = np.max(earthlike_inttimes)\n occ_endTimes = occ_startTimes + (occ_intTimes * char_mode['timeMultiplier']) + Obs.settlingTime + char_mode['syst']['ohTime']\n\n occ_sInds = occ_sInds[(occ_intTimes[occ_sInds] <= occ_maxIntTime)] # Filters targets exceeding maximum intTime\n occ_sInds = occ_sInds[(occ_intTimes[occ_sInds] > 0.0*u.d)] # Filters with an inttime of 0\n \n if occ_maxIntTime.value <= 0:\n occ_sInds = np.asarray([],dtype=int)\n\n if len(sInds.tolist()) > 0:\n intTimes[sInds] = self.calc_targ_intTime(sInds, startTimes[sInds], det_modes[0])\n sInds = sInds[np.where((intTimes[sInds] <= maxIntTime) & (intTimes[sInds] > 0.0*u.d))] # Filters targets exceeding end of OB\n endTimes = startTimes + intTimes\n \n if maxIntTime.value <= 0:\n sInds = np.asarray([],dtype=int)\n \n # 5.2 find spacecraft orbital END positions (for each candidate target), \n # and filter out unavailable targets\n if len(occ_sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try: # endTimes may exist past koTimes so we have an exception to hand this case\n tmpIndsbool = list()\n for i in np.arange(len(occ_sInds)):\n koTimeInd = np.where(np.round(occ_endTimes[occ_sInds[i]].value)-self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(occ_koMap[occ_sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n occ_sInds = occ_sInds[tmpIndsbool]\n del tmpIndsbool\n except:\n occ_sInds = np.asarray([],dtype=int)\n\n if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try: # endTimes may exist past koTimes so we have an exception to hand this case\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n koTimeInd = np.where(np.round(endTimes[sInds[i]].value)-self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(koMap[sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except:\n sInds = np.asarray([],dtype=int)\n\n # 5.3 Filter off current occulter target star from detection list\n if old_occ_sInd is not None:\n sInds = sInds[np.where(sInds != old_occ_sInd)[0]]\n occ_sInds = occ_sInds[np.where(occ_sInds != old_occ_sInd)[0]]\n\n # 6.1 Filter off any stars visited by the occulter 3 or more times\n if np.any(occ_sInds):\n occ_sInds = occ_sInds[np.where(self.occ_starVisits[occ_sInds] < self.occ_max_visits)[0]]\n\n # 6.2 Filter off coronograph stars with > 3 visits and no detections\n no_dets = np.logical_and((self.starVisits[sInds] > self.n_det_remove), (self.sInd_detcounts[sInds] == 0))\n sInds = sInds[np.where(np.invert(no_dets))[0]]\n\n max_dets = np.where(self.sInd_detcounts[sInds] < self.max_successful_dets)[0]\n sInds = sInds[max_dets]\n\n # 7 Filter off cornograph stars with too-long inttimes\n available_time = None\n if self.occ_arrives > TK.currentTimeAbs:\n available_time = self.occ_arrives - TK.currentTimeAbs.copy()\n if np.any(sInds[intTimes[sInds] < available_time]):\n sInds = sInds[intTimes[sInds] < available_time]\n\n # 8 remove occ targets on ignore_stars list\n occ_sInds = np.setdiff1d(occ_sInds, np.intersect1d(occ_sInds, self.ignore_stars))\n\n t_det = 0*u.d\n det_mode = copy.deepcopy(det_modes[0])\n occ_sInd = old_occ_sInd\n\n # 8 Choose best target from remaining\n # if the starshade has arrived at its destination, or it is the first observation\n if np.any(occ_sInds):\n if old_occ_sInd is None or ((TK.currentTimeAbs.copy() + t_det) >= self.occ_arrives and self.ready_to_update):\n occ_sInd = self.choose_next_occulter_target(old_occ_sInd, occ_sInds, occ_intTimes)\n if old_occ_sInd is None:\n self.occ_arrives = TK.currentTimeAbs.copy()\n else:\n self.occ_arrives = occ_startTimes[occ_sInd]\n self.occ_slewTime = slewTimes[occ_sInd]\n self.occ_sd = sd[occ_sInd]\n # if not np.any(sInds):\n # sInd = occ_sInd\n self.ready_to_update = False\n # self.occ_starVisits[occ_sInd] += 1\n elif not np.any(sInds):\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + 1*u.d)\n continue\n\n if occ_sInd is not None:\n sInds = sInds[np.where(sInds != occ_sInd)[0]]\n\n if np.any(sInds):\n\n # choose sInd of next target\n sInd = self.choose_next_telescope_target(old_sInd, sInds, intTimes[sInds])\n\n # Perform dual band detections if necessary\n if self.WAint[sInd] > det_modes[1]['IWA'] and self.WAint[sInd] < det_modes[1]['OWA']:\n det_mode['BW'] = det_mode['BW'] + det_modes[1]['BW']\n det_mode['inst']['sread'] = det_mode['inst']['sread'] + det_modes[1]['inst']['sread']\n det_mode['inst']['idark'] = det_mode['inst']['idark'] + det_modes[1]['inst']['idark']\n det_mode['inst']['CIC'] = det_mode['inst']['CIC'] + det_modes[1]['inst']['CIC']\n det_mode['syst']['optics'] = np.mean((det_mode['syst']['optics'], det_modes[1]['syst']['optics']))\n det_mode['instName'] = 'combined'\n\n t_det = self.calc_targ_intTime(np.array([sInd]), np.array([startTimes[sInd]]), det_mode)[0]\n\n if t_det > maxIntTime and maxIntTime > 0*u.d:\n t_det = maxIntTime\n if available_time is not None and available_time > 0*u.d:\n if t_det > available_time:\n t_det = available_time.copy().value * u.d\n else:\n sInd = None\n\n # if no observable target, call the TimeKeeping.wait() method\n if not np.any(sInds) and not np.any(occ_sInds):\n self.vprint('No Observable Targets at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n return DRM, None, None, None, None, None, None\n break\n\n else:\n self.logger.info('Mission complete: no more time available')\n self.vprint('Mission complete: no more time available')\n return DRM, None, None, None, None, None, None\n\n if TK.mission_is_over(OS, Obs, det_mode):\n self.logger.info('Mission complete: no more time available')\n self.vprint('Mission complete: no more time available')\n return DRM, None, None, None, None, None, None\n\n occ_earths = np.intersect1d(np.where(SU.plan2star == occ_sInd)[0], self.known_earths).astype(int)\n\n return DRM, sInd, occ_sInd, t_det, sd, occ_sInds, det_mode" ]
[ [ "numpy.sqrt", "numpy.asarray", "numpy.in1d", "numpy.round", "numpy.max", "numpy.mean", "numpy.any", "numpy.argmin", "numpy.where", "numpy.hstack", "numpy.unique", "numpy.arange", "numpy.intersect1d", "numpy.zeros", "numpy.invert", "numpy.min", "numpy.union1d", "numpy.array", "numpy.logical_and", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
turnmanh/scikit-opt
[ "8d9fc314081cc25b767e22a15db4f7ec9d20203b" ]
[ "sko/PSO.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/20\n# @Author : github.com/guofei9987\n\nimport numpy as np\nfrom sko.tools import func_transformer\nfrom .base import SkoBase\nfrom tqdm import trange\n\n\nclass PSO(SkoBase):\n \"\"\"\n Do PSO (Particle swarm optimization) algorithm.\n\n This algorithm was adapted from the earlier works of J. Kennedy and\n R.C. Eberhart in Particle Swarm Optimization [IJCNN1995]_.\n\n The position update can be defined as:\n\n .. math::\n\n x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)\n\n Where the position at the current step :math:`t` is updated using\n the computed velocity at :math:`t+1`. Furthermore, the velocity update\n is defined as:\n\n .. math::\n\n v_{ij}(t + 1) = w * v_{ij}(t) + c_{p}r_{1j}(t)[y_{ij}(t) − x_{ij}(t)]\n + c_{g}r_{2j}(t)[\\hat{y}_{j}(t) − x_{ij}(t)]\n\n Here, :math:`cp` and :math:`cg` are the cognitive and social parameters\n respectively. They control the particle's behavior given two choices: (1) to\n follow its *personal best* or (2) follow the swarm's *global best* position.\n Overall, this dictates if the swarm is explorative or exploitative in nature.\n In addition, a parameter :math:`w` controls the inertia of the swarm's\n movement.\n\n .. [IJCNN1995] J. Kennedy and R.C. Eberhart, \"Particle Swarm Optimization,\"\n Proceedings of the IEEE International Joint Conference on Neural\n Networks, 1995, pp. 1942-1948.\n\n Parameters\n --------------------\n func : function\n The func you want to do optimal\n dim : int\n Number of dimension, which is number of parameters of func.\n pop : int\n Size of population, which is the number of Particles. We use 'pop' to keep accordance with GA\n max_iter : int\n Max of iter iterations\n lb : array_like\n The lower bound of every variables of func\n ub : array_like\n The upper bound of every variables of func\n constraint_eq : tuple\n equal constraint. Note: not available yet.\n constraint_ueq : tuple\n unequal constraint\n Attributes\n ----------------------\n pbest_x : array_like, shape is (pop,dim)\n best location of every particle in history\n pbest_y : array_like, shape is (pop,1)\n best image of every particle in history\n gbest_x : array_like, shape is (1,dim)\n general best location for all particles in history\n gbest_y : float\n general best image for all particles in history\n gbest_y_hist : list\n gbest_y of every iteration\n\n\n Examples\n -----------------------------\n see https://scikit-opt.github.io/scikit-opt/#/en/README?id=_3-psoparticle-swarm-optimization\n \"\"\"\n\n def __init__(self, func, n_dim=None, pop=40, max_iter=150, lb=-1e5, ub=1e5, w=0.8, c1=0.5, c2=0.5,\n constraint_eq=tuple(), constraint_ueq=tuple(), verbose=False\n , dim=None):\n\n n_dim = n_dim or dim # support the earlier version\n\n self.func = func_transformer(func)\n self.w = w # inertia\n self.cp, self.cg = c1, c2 # parameters to control personal best, global best respectively\n self.pop = pop # number of particles\n self.n_dim = n_dim # dimension of particles, which is the number of variables of func\n self.max_iter = max_iter # max iter\n self.verbose = verbose # print the result of each iter or not\n\n self.lb, self.ub = np.array(lb) * np.ones(self.n_dim), np.array(ub) * np.ones(self.n_dim)\n assert self.n_dim == len(self.lb) == len(self.ub), 'dim == len(lb) == len(ub) is not True'\n assert np.all(self.ub > self.lb), 'upper-bound must be greater than lower-bound'\n\n self.has_constraint = bool(constraint_ueq)\n self.constraint_ueq = constraint_ueq\n self.is_feasible = np.array([True] * pop)\n\n self.X = np.random.uniform(low=self.lb, high=self.ub, size=(self.pop, self.n_dim))\n v_high = self.ub - self.lb\n self.V = np.random.uniform(low=-v_high, high=v_high, size=(self.pop, self.n_dim)) # speed of particles\n self.Y = self.cal_y() # y = f(x) for all particles\n self.pbest_x = self.X.copy() # personal best location of every particle in history\n self.pbest_y = np.array([[np.inf]] * pop) # best image of every particle in history\n self.gbest_x = self.pbest_x.mean(axis=0).reshape(1, -1) # global best location for all particles\n self.gbest_y = np.inf # global best y for all particles\n self.gbest_y_hist = [] # gbest_y of every iteration\n self.update_gbest()\n\n # record verbose values\n self.record_mode = False\n self.record_value = {'X': [], 'V': [], 'Y': []}\n self.best_x, self.best_y = self.gbest_x, self.gbest_y # history reasons, will be deprecated\n\n def check_constraint(self, x):\n # gather all unequal constraint functions\n for constraint_func in self.constraint_ueq:\n if constraint_func(x) > 0:\n return False\n return True\n\n def update_V(self):\n r1 = np.random.rand(self.pop, self.n_dim)\n r2 = np.random.rand(self.pop, self.n_dim)\n self.V = self.w * self.V + \\\n self.cp * r1 * (self.pbest_x - self.X) + \\\n self.cg * r2 * (self.gbest_x - self.X)\n\n def update_X(self):\n self.X = self.X + self.V\n self.X = np.clip(self.X, self.lb, self.ub)\n\n def cal_y(self):\n # calculate y for every x in X\n self.Y = self.func(self.X).reshape(-1, 1)\n return self.Y\n\n def update_pbest(self):\n '''\n personal best\n :return:\n '''\n self.need_update = self.pbest_y > self.Y\n for idx, x in enumerate(self.X):\n if self.need_update[idx]:\n self.need_update[idx] = self.check_constraint(x)\n\n self.pbest_x = np.where(self.need_update, self.X, self.pbest_x)\n self.pbest_y = np.where(self.need_update, self.Y, self.pbest_y)\n\n def update_gbest(self):\n '''\n global best\n :return:\n '''\n idx_min = self.pbest_y.argmin()\n if self.gbest_y > self.pbest_y[idx_min]:\n self.gbest_x = self.X[idx_min, :].copy()\n self.gbest_y = self.pbest_y[idx_min]\n\n def recorder(self):\n if not self.record_mode:\n return\n self.record_value['X'].append(self.X)\n self.record_value['V'].append(self.V)\n self.record_value['Y'].append(self.Y)\n\n def run(self, max_iter=None, precision=1e-7, N=20):\n '''\n precision: None or float\n If precision is None, it will run the number of max_iter steps\n If precision is a float, the loop will stop if continuous N difference between pbest less than precision\n N: int\n '''\n self.max_iter = max_iter or self.max_iter\n c = 0\n for iter_num in trange(self.max_iter, desc=\"step \"):\n self.update_V()\n self.recorder()\n self.update_X()\n self.cal_y()\n self.update_pbest()\n self.update_gbest()\n if precision is not None:\n tor_iter = np.amax(self.pbest_y) - np.amin(self.pbest_y)\n if tor_iter < precision:\n c = c + 1\n if c > N:\n break\n else:\n c = 0\n if self.verbose:\n print('Iter: {}, Best fit: {} at {}'.format(iter_num, self.gbest_y, self.gbest_x))\n\n self.gbest_y_hist.append(self.gbest_y)\n self.best_x, self.best_y = self.gbest_x, self.gbest_y\n return self.best_x, self.best_y\n\n fit = run\n" ]
[ [ "numpy.amax", "numpy.clip", "numpy.amin", "numpy.ones", "numpy.all", "numpy.random.rand", "numpy.random.uniform", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yuanz271/PyDSTool
[ "886c143cdd192aea204285f3a1cb4968c763c646", "886c143cdd192aea204285f3a1cb4968c763c646", "886c143cdd192aea204285f3a1cb4968c763c646", "886c143cdd192aea204285f3a1cb4968c763c646", "886c143cdd192aea204285f3a1cb4968c763c646" ]
[ "PyDSTool/Toolbox/optimizers/line_search/backtracking_search.py", "PyDSTool/ModelContext.py", "tests/generator/test_events.py", "PyDSTool/Generator/Radau_ODEsystem.py", "tests/test_saveload.py" ]
[ "\n# Matthieu Brucher\n# Last Change : 2007-08-26 19:43\n\nimport numpy\n\nclass BacktrackingSearch(object):\n \"\"\"\n The backtracking algorithm for enforcing Armijo rule\n \"\"\"\n def __init__(self, rho = 0.1, alpha_step = 1., alpha_factor = 0.5, **kwargs):\n \"\"\"\n Can have :\n - a coefficient for the Armijo rule (rho = 0.1)\n - an alpha factor to modulate the step (alpha_step = 1.)\n - an alpha factor < 1 that will decrease the step size until the rule is valid (alpha_factor = 0.5)\n \"\"\"\n self.rho = rho\n self.stepSize = alpha_step\n self.stepFactor = alpha_factor\n\n def __call__(self, origin, function, state, **kwargs):\n \"\"\"\n Tries to find an acceptable candidate\n \"\"\"\n direction = state['direction']\n if 'initial_alpha_step' in state:\n alpha = state['initial_alpha_step']\n else:\n alpha = self.stepSize\n\n f1temp = function(origin)\n gradient = state['gradient']\n while(True):\n ftemp = function(origin + alpha * direction)\n #Armijo rule\n if ftemp <= f1temp + self.rho * alpha * numpy.dot(gradient, direction):\n state['alpha_step'] = alpha\n return origin + alpha * direction\n alpha = alpha * self.stepFactor\n", "\"\"\"Features, conditions, model contexts and interfaces\n for hybrid dynamical systems and model estimation tasks.\n\n Robert Clewley, September 2007.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport copy\nimport sys, traceback\nimport numpy as npy\n\n# PyDSTool imports\nfrom . import Events, ModelSpec, Symbolic, Trajectory\nfrom . import utils, common\nfrom .parseUtils import symbolMapClass\nfrom .errors import *\n\n# --------------------\n# public exports\n\n_classes = ['ql_feature_node', 'ql_feature_leaf',\n 'qt_feature_node', 'qt_feature_leaf',\n 'binary_feature', 'always_feature',\n 'feature', 'condition', 'context',\n 'GeneratorInterface', 'ModelInterface',\n 'extModelInterface', 'intModelInterface'\n ]\n\n_functions = []\n\n_constants = ['LARGE_PENALTY']\n\n__all__ = _classes + _functions + _constants\n\n# ----------------------------------------------------------------------------\n\nLARGE_PENALTY = 2000.\n\nclass feature(object):\n \"\"\"End users of concrete sub-classes provide (required) evaluate method and\n (optional) prepare, finish methods.\"\"\"\n def __init__(self, name, description='', pars=None, ref_traj=None):\n self.name = name\n self.description = description\n if pars is None:\n self.pars = common.args()\n elif isinstance(pars, dict):\n self.pars = common.args(**pars)\n elif isinstance(pars, common.args):\n self.pars = pars\n else:\n raise PyDSTool_TypeError(\"Invalid type for pars argument\")\n if 'verbose_level' not in self.pars:\n self.pars.verbose_level = 0\n if 'debug' not in self.pars:\n self.pars.debug = False\n # penalty used if an error occurs during residual calculation\n if 'penalty' not in self.pars:\n self.pars.penalty = LARGE_PENALTY\n self.ref_traj = ref_traj\n self.results = common.args()\n self.super_pars = common.args()\n self.super_results = common.args()\n # perform any sub-class specific initializations,\n # such as providing a metric (and its output array length)\n try:\n self._local_init()\n except AttributeError:\n pass\n self.subfeatures = []\n\n def __hash__(self):\n return hash((self.name,common.className(self)))\n\n def __eq__(self, other):\n try:\n res = self.name == other.name\n except AttributeError:\n return False\n if hasattr(self, 'subfeatures'):\n if hasattr(other, 'subfeatures'):\n res = res and self.subfeatures == other.subfeatures\n else:\n return False\n elif hasattr(other, 'subfeatures'):\n return False\n return res\n\n def __ne__(self, other):\n return not self == other\n\n def _find_idx(self):\n \"\"\"Internal function for finding index in trajectory meshpoints\n at which containment first failed. Defaults to returning None and\n must be overridden by a class that has access to a mesh.\"\"\"\n return None\n\n def __call__(self, target):\n try:\n self.prepare(target)\n satisfied = self.evaluate(target)\n except KeyboardInterrupt:\n raise\n except:\n display_error = self.pars.verbose_level > 0 or self.pars.debug\n if display_error:\n exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()\n print(\"******************************************\")\n print(\"Problem evaluating feature:\" + self.name)\n print(\" %s %s\" % (exceptionType, exceptionValue))\n for line in traceback.format_exc().splitlines()[-12:-1]:\n print(\" \" + line)\n print(\" originally on line:%d\" % traceback.tb_lineno(exceptionTraceback))\n if self.pars.debug: #and self.pars.verbose_level > 1:\n raise\n else:\n print(\"(Proceeding as 'unsatisfied')\\n\")\n satisfied = False\n if hasattr(self, 'metric'):\n self.metric.results = self.pars.penalty * \\\n npy.ones((self.metric_len,), float)\n for sf in self.subfeatures:\n if hasattr(sf, 'metric'):\n sf.metric.results = self.pars.penalty * \\\n npy.ones((sf.metric_len,), float)\n if satisfied:\n self.finish(target)\n self.results.satisfied = satisfied\n return satisfied\n\n def set_ref_traj(self, ref_traj):\n \"\"\"May or may not be used by the feature. If not used,\n it will be ignored.\"\"\"\n raise NotImplementedError(\"Override in concrete sub-class\")\n\n def evaluate(self, target):\n raise NotImplementedError(\"Override in concrete sub-class\")\n\n def prepare(self, target):\n \"\"\"Operations to prepare for testing (optional).\n Override in concrete sub-class if desired\"\"\"\n pass\n\n def finish(self, target):\n \"\"\"Operations to complete only if evaluate was True (optional).\n Override in concrete sub-class if desired\"\"\"\n pass\n\n def info(self):\n utils.info(self, self.name)\n\n def postprocess_ref_traj(self):\n \"\"\"User-definable by overriding in a sub-class\"\"\"\n pass\n\n def validate(self):\n \"\"\"NOT YET IMPLEMENTED. Would test for reachability of a feature?\n This is not easy and may be unnecessary!\"\"\"\n return True\n\n def reset_metric(self):\n try:\n self.metric.results = None\n except AttributeError:\n # no metric for this feature\n pass\n\n\nclass feature_leaf(feature):\n \"\"\"Abstract super-class for feature leaf nodes.\n \"\"\"\n def set_ref_traj(self, ref_traj):\n self.ref_traj = ref_traj\n self.postprocess_ref_traj()\n\n def _residual_info(self, feats, sizes):\n \"\"\"Update feats and sizes lists in place with metric info, if any.\n \"\"\"\n try:\n sizes.append(self.metric_len)\n # touch self.metric to ensure it exists!\n self.metric\n except AttributeError:\n # no metric present\n return\n else:\n feats.append(self)\n\n def __str__(self):\n return \"Feature %s\"%self.name\n\n __repr__ = __str__\n\n\nclass feature_node(feature):\n \"\"\"Abstract super-class for feature regular nodes (supporting sub-features).\n \"\"\"\n def __init__(self, name, description='', pars=None,\n ref_traj=None, subfeatures=None):\n \"\"\"Sub-features is an ordered sequence of QL or QT feature instances\n which are (by default) evaluated in this order on a trajectory segment\n unless evaluation method is overridden.\n\n For more sophisticated use of sub-features, they should be provided as\n a dictionary mapping feature names to the feature instance.\n \"\"\"\n feature.__init__(self, name, description, pars, ref_traj)\n if subfeatures is None:\n self.subfeatures = ()\n self._namemap = {}\n elif isinstance(subfeatures, (list, tuple)):\n for sf in subfeatures:\n assert isinstance(sf, feature), \\\n \"Only define quantitative or qualitative features\"\n self.subfeatures = subfeatures\n self._namemap = dict(zip([sf.name for sf in subfeatures],\n subfeatures))\n elif isinstance(subfeatures, dict):\n for sfname, sf in subfeatures.items():\n assert isinstance(sf, feature), \\\n \"Only define quantitative or qualitative features\"\n self.subfeatures = subfeatures\n self._namemap = subfeatures\n else:\n raise TypeError(\"List or dictionary of sub-features expected\")\n\n\n def _residual_info(self, feats, sizes):\n \"\"\"Update feats and sizes lists in place with metric info, if any.\n \"\"\"\n try:\n sizes.append(self.metric_len)\n # touch self.metric to ensure it exists!\n self.metric\n except AttributeError:\n # no metric present\n pass\n else:\n feats.append(self)\n # continue gathering from sub-features\n for sf in self._namemap.values():\n sf._residual_info(feats, sizes)\n\n def __str__(self):\n s = \"Feature %s \"%self.name\n if len(list(self._namemap.keys())) > 0:\n s += \"- \" + str(list(self._namemap.keys()))\n return s\n\n __repr__ = __str__\n\n def __getitem__(self, featname):\n \"\"\"Return named sub-feature\"\"\"\n return self._namemap[featname]\n\n def propagate_verbosity(self, sf):\n # subfeatures inherit one lower level of verbosity\n if 'verbose_level' in self.pars:\n v = max([0,self.pars.verbose_level - 1])\n if isinstance(sf, common._seq_types):\n for sf_i in sf:\n sf_i.pars.verbose_level = v\n else:\n sf.pars.verbose_level = v\n if 'debug' in self.pars:\n if isinstance(sf, common._seq_types):\n for sf_i in sf:\n sf_i.pars.debug = self.pars.debug\n else:\n sf.pars.debug = self.pars.debug\n\n def evaluate(self, target):\n \"\"\"Default method: evaluate sub-features in order (assumes they\n are stored as a list).\n\n Can override with more sophisticated method (e.g. for use with a\n dictionary of sub-features).\n \"\"\"\n # initial value\n satisfied = True\n # this loop only works if subfeatures is a list\n # (must retain correct order for this list so don't use _namemap.values())\n for sf in self.subfeatures:\n try:\n self.propagate_verbosity(sf)\n except KeyboardInterrupt:\n raise\n except:\n if not isinstance(self.subfeatures, common._seq_types):\n raise TypeError(\"You must override the evaluate method for \"\n \"dictionary-based sub-features\")\n else:\n raise\n sf.super_pars.update(self.pars)\n sf.super_results.update(self.results)\n sf.reset_metric()\n if self.pars.verbose_level > 1:\n print(\"feature_node.evaluate: sf=%r\" % sf)\n error_raised = False\n try:\n new_result = sf(target)\n except KeyboardInterrupt:\n raise\n except:\n # catch errors in prepare or finish (evaluate was trapped\n # in __call__)\n new_result = False\n error_raised = True\n if sf.pars.debug: # and sf.pars.verbose_level > 1:\n raise\n # have to compute new separately to ensure sf computes its results\n # for potential use by a residual function\n satisfied = satisfied and new_result\n if error_raised:\n print(\" ... error raised\")\n if hasattr(self, 'metric'):\n # kludgy penalty function in lieu of something smarter\n if sf.metric.results is None:\n sf.metric.results = self.pars.penalty * \\\n npy.ones((sf.metric_len,),float)\n else:\n self.results.update(sf.results)\n return satisfied\n\n def set_ref_traj(self, ref_traj):\n \"\"\"May or may not be used by the feature. If not used, it will be\n ignored.\"\"\"\n self.ref_traj = ref_traj\n self.postprocess_ref_traj()\n if isinstance(self.subfeatures, dict):\n sfs = list(self.subfeatures.values())\n else:\n sfs = self.subfeatures\n for sf in sfs:\n self.propagate_verbosity(sf)\n sf.super_pars.update(self.pars)\n sf.super_results.update(self.results)\n sf.set_ref_traj(ref_traj)\n self.pars.update(sf.pars)\n\n\nclass ql_feature_leaf(feature_leaf):\n \"\"\"Qualitative feature (leaf node).\n Add description to note assumptions used for defining feature.\n\n input: a trajectory segment\n output: a vector of boolean-valued event detections (non-terminal events or even\n non-linked python events) or other function tests (e.g. existence of a fixed point)\n stored in a list.\n \"\"\"\n\n\nclass qt_feature_leaf(feature_leaf):\n \"\"\"Quantitative feature (leaf node).\n Add description to note assumptions used for defining feature.\n\n input: a trajectory segment\n output: a vector of boolean-valued tolerance tests on the discrepancies between ideal and\n actual features defined by a list of function tests\n e.g. a test returns (residual of ideal-actual) < tolerance\n \"\"\"\n\n\nclass ql_feature_node(feature_node):\n \"\"\"Qualitative feature (regular node).\n Add description to note assumptions used for defining feature.\n\n input: a trajectory segment\n output: a vector of boolean-valued event detections (non-terminal events or even\n non-linked python events) or other function tests (e.g. existence of a fixed point)\n stored in a list.\n \"\"\"\n\n\nclass qt_feature_node(feature_node):\n \"\"\"Quantitative feature (regular node).\n Add description to note assumptions used for defining feature.\n\n input: a trajectory segment\n output: a vector of boolean-valued tolerance tests on the discrepancies between ideal and\n actual features defined by a list of function tests\n e.g. a test returns (residual of ideal-actual) < tolerance\n \"\"\"\n\n# -----------------------------------------------------------------------------\n\n\nclass condition(object):\n \"\"\"Model context condition, made up of a boolean composition of wanted and\n unwanted features.\n This is specified by a dictionary of feature objects mapping to True\n (wanted feature) or False (unwanted feature).\n \"\"\"\n\n def __init__(self, feature_composition_dict):\n # fcd maps feature objects to True (wanted feature) or\n # False (unwanted feature)\n self.namemap = {}\n try:\n for f, c in feature_composition_dict.items():\n assert isinstance(c, bool), \\\n \"Feature composition dictionary requires boolean values\"\n assert isinstance(f, (ql_feature_leaf, qt_feature_leaf,\n ql_feature_node, qt_feature_node)), \\\n \"Only define quantitative or qualitative features\"\n self.namemap[f.name] = f\n except AttributeError:\n raise TypeError(\"Dictionary of features to Booleans expected\")\n self.fcd = feature_composition_dict\n self.results = common.args()\n\n __hash__ = None\n\n def __eq__(self, other):\n try:\n return self.namemap == other.namemap\n except AttributeError:\n return False\n\n def __ne__(self, other):\n try:\n return self.namemap != other.namemap\n except AttributeError:\n return True\n\n def keys(self):\n return self.namemap.keys()\n\n def values(self):\n return self.namemap.values()\n\n def items(self):\n return self.namemap.items()\n\n def __getitem__(self, name):\n return self.namemap[name]\n\n def __contains__(self, name):\n return name in self.namemap\n\n def set_ref_traj(self, ref_traj):\n \"\"\"Set reference trajectory for the features (if used, otherwise will\n be ignored or overridden in feature _local_init methods).\n \"\"\"\n for f,c in self.fcd.items():\n f.set_ref_traj(ref_traj)\n\n def evaluate(self, target):\n \"\"\"Apply conditions to trajectory segments\n and returns True if all are satisfied.\"\"\"\n satisfied = True\n for f,c in self.fcd.items():\n # have to call new separately to ensure f calcs its residual\n new = f(target) == c\n satisfied = satisfied and new\n self.results[f.name] = f.results\n return satisfied\n\n __call__ = evaluate\n\n def __str__(self):\n s = \"Condition \"\n if len(list(self.namemap.keys())) > 0:\n s += \"- \" + str(list(self.namemap.keys()))\n return s\n\n __repr__ = __str__\n\n def _find_idx(self):\n min_ix = npy.Inf\n for f in self.fcd.keys():\n f_ix = f._find_idx()\n if f_ix is not None and f_ix < min_ix:\n min_ix = f_ix\n if npy.isfinite(min_ix):\n return min_ix\n else:\n return None\n\n def _residual_info(self):\n \"\"\"Update metric information used for residual / objective function,\n from all sub-features.\"\"\"\n #feats and sizes updated in place\n feats = []\n sizes = []\n for f in self.fcd.keys():\n f._residual_info(feats, sizes)\n return {'features': dict(zip(feats,sizes)),\n 'total_size': sum(sizes)}\n\n def collate_results(self, result_name, merge_lists=False,\n feature_names=None):\n res = []\n if feature_names is None:\n feature_list = list(self.fcd.keys())\n else:\n feature_list = [self.namemap[f] for f in feature_names]\n for f in feature_list:\n try:\n resval = getattr(f.results, result_name)\n except AttributeError:\n # no such result name\n continue\n else:\n if merge_lists and isinstance(resval, list):\n res.extend(resval)\n else:\n res.append(resval)\n return res\n\n\nclass context(object):\n \"\"\"A collection of related model interfaces that apply to a model.\n interface_pairs are a list of ModelInterface instance (test) and class (ref) pairs,\n the latter to be instantiated on a model.\n\n Set the debug_mode attribute at any time, or as the optional argument at initializiation,\n to ensure that any exceptions that arise from interacting model interfaces and their\n features are fully passed back to the caller of the context containing them.\n \"\"\"\n def __init__(self, interface_pairs, debug_mode=False):\n self.interfaces = dict(interface_pairs)\n self.debug_mode = debug_mode\n # Determine which qt features have metrics to use to make a\n # residual function. Keep multiple views of this data for efficient\n # access in different ways.\n metric_features = {}\n res_feature_list = []\n tot_size = 0\n for test_mi, ref_mi_class in self.interfaces.items():\n # list of suitable features for each test_mi\n metric_features[test_mi] = test_mi.conditions._residual_info()\n tot_size += metric_features[test_mi]['total_size']\n res_feature_list.extend([(test_mi, f) for f in \\\n metric_features[test_mi]['features'].keys()])\n self.metric_features = metric_features\n self.res_feature_list = res_feature_list\n self.res_len = tot_size\n # instances cleared on each evaluation of a model\n self.ref_interface_instances = []\n # default weights are all 1, and set up weights dictionary\n self.reset_weights()\n\n def reset_weights(self, old_weights=None):\n \"\"\"Reset weights to unity, unless old_weights array\n is given, in which case reset to that.\n \"\"\"\n if old_weights is None:\n self.weights = npy.ones(self.res_len, 'float')\n else:\n self.weights = old_weights\n self.weight_index_mapping = {}\n self.feat_weights = {}\n ix = 0\n for test_mi, feat_dict in self.metric_features.items():\n self.weight_index_mapping[test_mi] = {}\n for f, size in feat_dict['features'].items():\n self.weight_index_mapping[test_mi][f] = (ix, ix+size)\n # weights are constant for a given feature\n self.feat_weights[(test_mi, f)] = self.weights[ix]\n ix += size\n\n def set_single_feat_weights(self, feat, weight=1):\n \"\"\"Set weights for a single feature given as an (interface, feature)\n pair, setting all others to zero.\"\"\"\n wdict = {}\n for test_mi, feat_dict in self.metric_features.items():\n if test_mi != feat[0]:\n continue\n w = {}.fromkeys(feat_dict['features'].keys(), 0)\n if feat[1] in w:\n w[feat[1]] = weight\n wdict[test_mi] = w\n self.set_weights(wdict)\n\n def set_weights(self, weight_dict):\n \"\"\"Update weights with a dictionary keyed by test_mi, whose values are\n either:\n (1) dicts of feature -> scalar weight.\n (2) a scalar which will apply to all features of that model interface\n Features and model interfaces must correspond to those declared for the\n context.\n \"\"\"\n for test_mi, fs in weight_dict.items():\n try:\n flist = list(self.metric_features[test_mi]['features'].keys())\n except KeyError:\n raise AssertionError(\"Invalid test model interface\")\n if isinstance(fs, common._num_types):\n feat_dict = {}.fromkeys(flist, fs)\n elif isinstance(fs, dict):\n assert npy.alltrue([isinstance(w, common._num_types) for \\\n w in fs.values()]), \"Invalid scalar weight\"\n assert npy.alltrue([f in flist for f in fs.keys()]), \\\n \"Invalid features given for this test model interface\"\n feat_dict = fs\n for f, w in feat_dict.items():\n self.feat_weights[(test_mi, f)] = w\n # update weight value\n start_ix, end_ix = self.weight_index_mapping[test_mi][f]\n self.weights[start_ix:end_ix] = w\n\n def show_res_info(self, resvec):\n \"\"\"Show detail of feature -> residual mapping for a given residual\n vector.\"\"\"\n i = 0\n for test_mi, feat_dict in self.metric_features.items():\n print(\"Test model interface:\", test_mi)\n for f in feat_dict['features']:\n if self.feat_weights[(test_mi, f)] == 0:\n continue\n ix0, ix1 = self.weight_index_mapping[test_mi][f]\n len_w = ix1-ix0\n f_str = \" \"+f.name\n # ' unweighted:' is 13 chars long\n extra_space_w = \" \"*max([0, 13-len(f_str)])\n extra_space_unw = \" \"*max([0, len(f_str)-13])\n print(f_str + extra_space_w + \"%r\" % resvec[i:i+len_w])\n try:\n print(\" unweighted:\" + extra_space_unw +\n \"%r\" % (resvec[i:i+len_w]/self.weights[ix0:ix1]))\n except ZeroDivisionError:\n print(\" (unweighted values unavailable)\")\n i += len_w\n\n def _map_to_features(self, x):\n \"\"\"Utility to map 1D array x onto the model interface's\n features with non-zero weights, returning a dictionary.\n\n x is assumed to have correct length.\n \"\"\"\n out = {}\n i = 0\n for test_mi, feat_dict in self.metric_features.items():\n for f in feat_dict['features']:\n if self.feat_weights[(test_mi, f)] == 0:\n continue\n ix0, ix1 = self.weight_index_mapping[test_mi][f]\n len_w = ix1-ix0\n try:\n out[test_mi][f] = x[i:i+len_w]\n except KeyError:\n out[test_mi] = {f: x[i:i+len_w]}\n i += len_w\n return out\n\n def evaluate(self, model):\n \"\"\"Evaluate whole context on a model instance, returning a single\n Boolean.\n \"\"\"\n result = True\n # typically, test_mi is an external interface (e.g., for data)\n # and ref_mi is an internal interface (e.g., for a model)\n self.ref_interface_instances = []\n for test_mi, ref_mi_class in common.sortedDictItems(self.interfaces, byvalue=False):\n # evaluate test_mi on model, via the associated ref_mi\n ref_mi = ref_mi_class(model)\n self.ref_interface_instances.append(ref_mi)\n try:\n new_result = test_mi(ref_mi)\n except KeyboardInterrupt:\n raise\n except:\n if self.debug_mode:\n raise\n else:\n print(\"******************************************\")\n print(\"Problem evaluating interface %s on %s\" % (test_mi,ref_mi))\n print(\" %s %s\" % (sys.exc_info()[0], sys.exc_info()[1]))\n new_result = False\n # must create new_res first, to ensure all interfaces are\n # evaluated (to create their results for possible post-processing)\n result = result and new_result\n return result\n\n def residual(self, model, include_raw=False):\n \"\"\"Evaluate whole context on a model instance, returning an array\n of residual error between quantitative features in the model trajectory\n and their target values.\n\n Residual array will be weighted if one was set. Any weights set to zero\n will cause those features to *not appear* in the residual.\n\n Provide include_raw=True argument to also return the raw, unweighted residual.\n (Mainly for internal use.)\n \"\"\"\n # discard the boolean, just compute the residuals through the calls to\n # metric, and access them through the feature list\n self.evaluate(model)\n raw_residual = npy.concatenate(tuple([mf[1].metric.results for \\\n mf in self.res_feature_list]))\n residual = process_raw_residual(raw_residual, self.weights)\n if include_raw:\n return residual, raw_residual\n else:\n return residual\n\n\ndef process_raw_residual(raw_residual, weights):\n ixs = npy.nonzero(weights)\n residual = (weights*raw_residual)[ixs]\n nan_ixs = npy.where(npy.asarray(npy.isnan(residual),int))\n for ix in nan_ixs:\n residual[ix] = 100.\n return residual\n\n\n# -----------------------------------------------------------------\n\nclass always_feature(ql_feature_leaf):\n \"\"\"Use this for a single vector field model that uses discrete\n event mappings.\"\"\"\n def evaluate(self, target):\n return True\n\nclass binary_feature(ql_feature_leaf):\n \"\"\"Use this as a binary switch feature, toggled\n by a given variable name 'varname' that is supplied\n in the pars dict at initialization.\"\"\"\n def evaluate(self, target):\n try:\n pts = target.test_traj.sample(coords=[self.pars.varname])\n except AttributeError:\n raise AttributeError(\"No variable name given for switch\")\n except KeyboardInterrupt:\n raise\n except:\n print(\"Failed to find trajectory values for given variable name: %s\"%self.pars.varname)\n raise\n self.results.output = pts\n return all(self.results.output==1)\n\n def _find_idx(self):\n if self.results.satisfied:\n # Trajectory satisfied contraint!\n return None\n res = self.results.output\n if res[0] == 1:\n adjusted_res = list((res - 1) != 0)\n else:\n if 1 not in res:\n # never goes to excited state so no index to return\n raise RuntimeError\n adjusted_res = list(res != 0)\n # find first index at which value is non-zero\n # should never raise ValueError because this method is\n # only run if there was a sign change found\n return adjusted_res.index(True)\n\n\n# ----------------------------------------------------------------------------\n## ModelInterface-related classes\n\n# Private class\nclass dsInterface(object):\n \"\"\"Generic and abstract interface class for dynamical systems.\"\"\"\n # _getkeys shown for reference\n # _getkeys = ['indepvariable', 'algparams', 'funcspec',\n # 'diagnostics', 'variables',\n # 'pars', 'inputs',\n # 'eventstruct', 'globalt0', 'Rhs', 'Jacobian',\n # 'JacobianP', 'MassMatrix', 'AuxVars']\n _setkeys = ['globalt0', 'tdata', 'pars', 'algparams', 'inputs']\n # the query key list is copied from Model.Model\n _querykeys = ['pars', 'parameters', 'events', 'submodels',\n 'ics', 'initialconditions', 'vars', 'variables',\n 'auxvariables', 'auxvars', 'vardomains', 'abseps']\n\n def get_test_traj(self):\n raise NotImplementedError(\"Only call this on a concrete sub-class\")\n\n def query(self, querykey=''):\n return self.model.query(querykey)\n\n @property\n def name(self):\n try:\n return self.__name__\n except AttributeError:\n return self.__class__.__name__\n\n def __lt__(self, other):\n return self.name < other.name\n\n def __gt__(self, other):\n return self.name > other.name\n\n def __eq__(self, other):\n return self.name == other.name\n\n def __ne__(self, other):\n return self.name != other.name\n\n def __le__(self, other):\n return self.name <= other.name\n\n def __ge__(self, other):\n return self.name >= other.name\n\n def __hash__(self):\n return hash(self.name)\n\n\nclass GeneratorInterface(dsInterface):\n \"\"\"Wrapper for Generator (for non-hybrid models) that shares similar API\n with ModelInterface for use in HybridModel objects.\"\"\"\n\n def __init__(self, model, FScompatibleNames=None,\n FScompatibleNamesInv=None):\n \"\"\"model argument must be a Generator only\"\"\"\n self.model = model\n if FScompatibleNames is None:\n if self.model._FScompatibleNames is None:\n self.model._FScompatibleNames = symbolMapClass()\n else:\n self.model._FScompatibleNames = FScompatibleNames\n if FScompatibleNamesInv is None:\n if self.model._FScompatibleNamesInv is None:\n self.model._FScompatibleNamesInv = symbolMapClass()\n else:\n self.model._FScompatibleNamesInv = FScompatibleNamesInv\n self.eventstruct = Events.EventStruct()\n #self.diagnostics = common.Diagnostics()\n\n def get(self, key, ics=None, t0=0):\n # self.model is a Generator\n return self.model.get(key)\n\n def set(self, key, value, ics=None, t0=0):\n if key in self._setkeys:\n self.model.set(**{key:value})\n else:\n raise KeyError(\"Invalid or unsupported 'set' key: %s\"%key)\n\n def Rhs(self, t, xdict, pdict):\n \"\"\"Direct access to a generator's Rhs function.\"\"\"\n return self.model.Rhs(t, xdict, pdict)\n\n def Jacobian(self, t, xdict, pdict, idict=None):\n \"\"\"Direct access to a generator's Jacobian function (if defined).\"\"\"\n return self.model.Jacobian(t, xdict, pdict)\n\n def JacobianP(self, t, xdict, pdict):\n \"\"\"Direct access to a generator's JacobianP function (if defined).\"\"\"\n return self.model.JacobianP(t, xdict, pdict)\n\n def MassMatrix(self, t, xdict, pdict):\n \"\"\"Direct access to a generator's MassMatrix function (if defined).\"\"\"\n return self.model.MassMatrix(t, xdict, pdict)\n\n def AuxVars(self, t, xdict, pdict):\n \"\"\"Direct access to a generator's auxiliary variables\n definition (if defined).\"\"\"\n return self.model.AuxVars(t, xdict, pdict)\n\n\nclass ModelInterface(dsInterface):\n \"\"\"Model constraints expressed as a uni-directional interface to another\n formal system model:\n - Made up of conditions imposed on the other system's test trajectory.\n - Defines evaluation criteria for any view (e.g. from experimental data and\n test conditions).\n This is an abstract superclass for the 'internal' and 'external'\n sub-classes.\n \"\"\"\n _trajname = 'test_iface_traj'\n\n def __init__(self):\n # Cache (3-tuple) for the ics, t0 and initiator last specified\n self._initiator_cache = None\n self.eventstruct = Events.EventStruct()\n #self.diagnostics = common.Diagnostics()\n\n def _get_initiator_cache(self, ics=None, t0=0):\n \"\"\"\n Return initating object for given initial conditions and start time.\n For internal use only.\n \"initiator\" is the ModelInterface or GeneratorInterface object that\n will be chosen to begin an integration at the specified ICs and t0.\n \"\"\"\n if self._initiator_cache is None:\n if ics is None:\n raise ValueError(\"Must pass initial conditions\")\n else:\n initiator = self.model._findTrajInitiator(None, 0,\n t0, dict(ics))[0]\n self._initiator_cache = (ics, t0, initiator)\n else:\n if npy.alltrue(self._initiator_cache[0] == ics) and \\\n self._initiator_cache[1] == t0:\n ics, t0, initiator = self._initiator_cache\n elif ics is None:\n raise ValueError(\"Must pass initial conditions\")\n else:\n # initial conditions or t0 don't match -- don't use cache\n initiator = self.model._findTrajInitiator(None, 0,\n t0, dict(ics))[0]\n self._initiator_cache = (ics, t0, initiator)\n return (ics, t0, initiator)\n\n def set(self, key, value, ics=None, t0=0):\n # set own conditions then propagate them down to sub-models\n self.model.set(**{key:value})\n ics, t0, initiator = self._get_initiator_cache(ics, t0)\n# print \"ModelInterface.set %s = %s for %s (type %s)\"%(str(key), str(value), initiator.model.name, type(initiator))\n if key in self._setkeys:\n initiator.set(key, value, ics, t0)\n initiator.model.set(**{key:value})\n else:\n raise KeyError(\"Invalid or unsupported 'set' key: %s\"%key)\n\n def get(self, key, ics=None, t0=0):\n ics, t0, initiator = self._get_initiator_cache(ics, t0)\n# if key in self._getkeys:\n try:\n return initiator.get(key, ics, t0)\n except AttributeError:\n raise ValueError(\"Invalid or unsupported 'get' key: %s\"%key)\n\n def Rhs(self, t, xdict, pdict):\n \"\"\"Direct access to a generator's Rhs function.\"\"\"\n ics_ignore, t_ignore, ds = self._get_initiator_cache(xdict, t)\n try:\n return self.model.Rhs(ds._supermodel.name, t, xdict, pdict)\n except AttributeError:\n # ds is not a MI with attribute _supermodel\n return self.model.Rhs(t, xdict, pdict)\n\n def Jacobian(self, t, xdict, pdict, idict=None):\n \"\"\"Direct access to a generator's Jacobian function (if defined).\"\"\"\n ics_ignore, t_ignore, ds = self._get_initiator_cache(xdict, t)\n try:\n return self.model.Jacobian(ds._supermodel.name, t, xdict, pdict)\n except AttributeError:\n # ds is not a MI with attribute _supermodel\n return self.model.Jacobian(t, xdict, pdict)\n\n def JacobianP(self, t, xdict, pdict):\n \"\"\"Direct access to a generator's JacobianP function (if defined).\"\"\"\n ics_ignore, t_ignore, ds = self._get_initiator_cache(xdict, t)\n try:\n return self.model.JacobianP(ds._supermodel.name, t, xdict, pdict)\n except AttributeError:\n # ds is not a MI with attribute _supermodel\n return self.model.JacobianP(t, xdict, pdict)\n\n def MassMatrix(self, t, xdict, pdict):\n \"\"\"Direct access to a generator's MassMatrix function (if defined).\"\"\"\n ics_ignore, t_ignore, ds = self._get_initiator_cache(xdict, t)\n try:\n return self.model.MassMatrix(ds._supermodel.name, t, xdict, pdict)\n except AttributeError:\n # ds is not a MI with attribute _supermodel\n return self.model.MassMatrix(t, xdict, pdict)\n\n def AuxVars(self, t, xdict, pdict):\n \"\"\"Direct access to a generator's auxiliary variables\n definition (if defined).\"\"\"\n ics_ignore, t_ignore, ds = self._get_initiator_cache(xdict, t)\n try:\n return ds.model.AuxVars(ds._supermodel.name, t, xdict, pdict)\n except AttributeError:\n # ds is not a MI with attribute _supermodel\n return ds.model.AuxVars(t, xdict, pdict)\n\n def setup_conditions(self, conditions, traj):\n # in case the conditions use this model trajectory as a reference\n # then provide them with it\n if conditions is None:\n self.conditions = None\n else:\n # really need to copy conditions?\n self.conditions = conditions\n try:\n self.conditions.set_ref_traj(traj)\n except AttributeError:\n raise\n\n def evaluate(self, target, force=False):\n \"\"\"Evaluate interface consistency against target interface's trajectory\n on specified conditions.\n\n Optional force argument forces model to recompute its test trajectory,\n e.g. because of a known change in model parameters, ics, etc.\n \"\"\"\n assert isinstance(target, ModelInterface), \\\n \"Target argument must be another interface object\"\n if len(self.compatibleInterfaces) > 0 and \\\n target.__class__.__name__ not in self.compatibleInterfaces \\\n and not npy.sometrue([common.compareBaseClass(target, ctype) \\\n for ctype in self.compatibleInterfaces]):\n raise ValueError(\"Target interface not of compatible type\")\n try:\n self.conditions\n except AttributeError:\n self.setup_conditions(None, self.get_test_traj())\n force = force or target.test_traj is None\n if force:\n # discard returned traj here (still accessible via target.test_traj)\n target.get_test_traj(force=force)\n self.prepare_conditions(target)\n try:\n result = self.conditions(target)\n except KeyError:\n raise KeyError(\"Condition evaluation failed\")\n return result\n\n __call__ = evaluate\n\n def postprocess_test_traj(self, test_traj):\n \"\"\"Called by another interface via get_test_traj.\n Default post-processing of test trajectory is the identity\n function, i.e. no processing.\n\n Override this method to return a processed version of the\n trajectory or perform other post-computation clean-up, e.g.\n prepare auxiliary feature/condition-related information based\n on end state of trajectory so that HybridModel can use it to\n decide on next hybrid state to switch to.\n \"\"\"\n return test_traj\n\n def prepare_conditions(self, target):\n \"\"\"Called automatically by evaluate. Override with user-defined access\n to the target interface or processing of trajectory after return of the\n target's test trajectory.\n \"\"\"\n pass\n\n\nclass intModelInterface(ModelInterface):\n \"\"\"Interface providing internal evaluation criteria between models.\n Optional conditions (object) argument used to specify these criteria.\n \"\"\"\n def __init__(self, model, conditions=None, compatibleInterfaces=None,\n test_traj=None):\n \"\"\"Set model that generates test trajectories from which the dictionary\n of conditions can be imposed on a connected model.\n\n If no conditions are specified then the model is trivially wrapped in\n an \"empty\" interface.\n\n Optionally, a dummy test traj can be supplied in case of a dummy interface\n for a trivial condition test that does not need to evaluate a trajectory\n to determine the result.\n \"\"\"\n ModelInterface.__init__(self)\n # Avoid circular import with Model module\n #assert isinstance(model, Model.Model), \"Invalid Model object passed\"\n self.model = model #copy.deepcopy(model) # ???\n #print \"TEMP: (intModelInterface.__init__) -- should model be copied?\"\n self.test_traj = test_traj # may be initially a temporary value, None\n\n def ensure_has_test_traj(self):\n \"\"\"Cause recomputation of test trajectory if not already present in\n model, returning boolean for whether recomputation was performed.\n \"\"\"\n info = self.model.current_defining_args()\n # include any interface-specific changes that would be made\n new_args = self.initialize_model()\n if new_args is not None:\n info.update(new_args)\n if self.model.has_exact_traj(self._trajname, info):\n # this verifies that the traj that would be computed\n # already exists\n return False\n else:\n try:\n self.compute_traj(need_init=False, new_args=new_args)\n except KeyboardInterrupt:\n raise\n except:\n print(\"Model interface compute_traj method for model \" + \\\n \"'%s' failed\" % self.model.name)\n print(\"%s %s\" % (sys.exc_info()[0], sys.exc_info()[1]))\n return False\n else:\n return True\n\n def has_test_traj(self):\n return self.test_traj is not None\n\n def compute_traj(self, need_init=True, new_args=None):\n if need_init:\n new_args = self.initialize_model()\n if new_args is not None and len(new_args) > 0:\n old_info = self.model.current_defining_args()\n self.model.set(**new_args)\n else:\n old_info = None\n self.model.compute(trajname=self._trajname, force=True)\n if old_info is not None:\n # restore \"standard\" state\n self.model.set(**dict(old_info))\n\n def get_test_traj(self, force=False):\n \"\"\"Called by another interface.\n Return model's test trajectory, using any post-processing\n specified by user-defined process_test_traj method.\n\n Use force option if model is known to have changed and trajectory\n needs refreshing.\n \"\"\"\n if force and not isinstance(self.test_traj, Trajectory):\n self.compute_traj()\n recomputed = True\n else:\n recomputed = self.ensure_has_test_traj()\n if recomputed or self.test_traj is None:\n self.test_traj = \\\n self.postprocess_test_traj(self.model[self._trajname])\n return self.test_traj\n\n def initialize_model(self):\n \"\"\"Return any unique model-specific settings here, as a dictionary with\n keys that can include initial conditions, parameters, tdata, algorithmic\n parameters. Use the same keys that are suitable for a call to the\n Model.set method, i.e. 'pars', 'ics', 'tdata', and 'algparams'.\n\n Override in a sub-class to use. This method will be called\n before any trajectory computation of the model.\n \"\"\"\n pass\n\n\nclass extModelInterface(ModelInterface):\n \"\"\"Interface from a trajectory of numerical data and test conditions\n providing external evaluation criteria for a model.\n Optional conditions (object) argument used to specify these criteria.\n \"\"\"\n def __init__(self, traj=None, conditions=None, compatibleInterfaces=None):\n ModelInterface.__init__(self)\n self.setup_conditions(conditions, traj)\n self.set_test_traj(traj)\n if compatibleInterfaces is None:\n self.compatibleInterfaces = []\n else:\n self.compatibleInterfaces = compatibleInterfaces\n\n def set_test_traj(self, traj):\n \"\"\"Do any user-defined preprocessing to the given trajectory, including\n converting it to a different type of trajectory.\n \"\"\"\n self.test_traj = self.postprocess_test_traj(traj)\n # propagate ref traj to conditions if they use it\n self.conditions.set_ref_traj(self.test_traj)\n\n def ensure_has_test_traj(self):\n \"\"\"Never needs to recompute trajectory as it is fixed, so always\n returns False.\n \"\"\"\n assert self.has_test_traj(), \"Test trajectory missing\"\n return False\n\n def has_test_traj(self):\n return isinstance(self.test_traj, Trajectory.Trajectory)\n\n def get_test_traj(self, force=False):\n \"\"\"Called by another interface.\n Optional force argument is ignored for this class, as the\n trajectory is fixed.\"\"\"\n return self.test_traj\n\n\n\n# old code for internal interface\n ## currently both 'ics' and 'initialconditions' keys are valid, but want only one\n ## present here and we'll make sure it's stored as 'ics' here\n #assert len(common.intersect([\"initialconditions\", \"ics\"], conditions.keys())) == 1, \\\n #\"Conditions must include one list of initial conditions\"\n #if 'ics' not in conditions:\n #self.conditions['ics'] = conditions['initialconditions']\n #del self.conditions['initialconditions']\n\n\n\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, print_function\n\nimport pytest\n\nfrom numpy import linspace, sin\nfrom numpy.testing import assert_almost_equal\nfrom PyDSTool import (\n args,\n Events\n)\nfrom PyDSTool.Generator import (\n Dopri_ODEsystem,\n Euler_ODEsystem,\n InterpolateTable,\n Radau_ODEsystem,\n Vode_ODEsystem,\n)\n\nfrom .helpers import clean_files\n\n\[email protected]\ndef dsargs():\n timeData = linspace(0, 10, 20)\n sindata = sin(20 * timeData)\n xData = {'in': sindata}\n my_input = InterpolateTable({\n 'tdata': timeData,\n 'ics': xData,\n 'name': 'interp1d',\n 'method': 'linear',\n 'checklevel': 1,\n 'abseps': 1e-5\n }).compute('interp')\n\n fvarspecs = {\n \"w\": \"k*w + sin(t) + myauxfn1(t)*myauxfn2(w)\",\n 'aux_wdouble': 'w*2 + globalindepvar(t)',\n 'aux_other': 'myauxfn1(2*t) + initcond(w)'\n }\n fnspecs = {\n 'myauxfn1': (['t'], '2.5*cos(3*t)'),\n 'myauxfn2': (['w'], 'w/2')\n }\n # targetlang is optional if the default python target is desired\n DSargs = args(fnspecs=fnspecs, name='event_test')\n DSargs.varspecs = fvarspecs\n DSargs.tdomain = [0.1, 2.1]\n DSargs.pars = {'k': 2, 'a': -0.5}\n DSargs.vars = 'w'\n DSargs.ics = {'w': 3}\n DSargs.inputs = {'in': my_input.variables['in']}\n DSargs.algparams = {'init_step': 0.01}\n DSargs.checklevel = 2\n ev_args_nonterm = {\n 'name': 'monitor',\n 'eventtol': 1e-4,\n 'eventdelay': 1e-5,\n 'starttime': 0,\n 'active': True,\n 'term': False,\n 'precise': True\n }\n thresh_ev_nonterm = Events.makeZeroCrossEvent(\n 'in',\n 0,\n ev_args_nonterm,\n inputnames=['in'],\n targetlang='c'\n )\n\n ev_args_term = {\n 'name': 'threshold',\n 'eventtol': 1e-4,\n 'eventdelay': 1e-5,\n 'starttime': 0,\n 'active': True,\n 'term': True,\n 'precise': True\n }\n\n thresh_ev_term = Events.makeZeroCrossEvent(\n 'w-20',\n 1,\n ev_args_term,\n ['w'],\n targetlang='c'\n )\n DSargs.events = [thresh_ev_nonterm, thresh_ev_term]\n\n return DSargs\n\n\ndef test_dopri_event(dsargs):\n \"\"\"\n Test Dopri_ODEsystem with events involving external inputs.\n\n Robert Clewley, September 2006.\n \"\"\"\n\n _run_checks(Dopri_ODEsystem(dsargs))\n\n\ndef test_radau_event(dsargs):\n \"\"\"\n Test Radau_ODEsystem with events involving external inputs.\n\n Robert Clewley, September 2006.\n \"\"\"\n\n _run_checks(Radau_ODEsystem(dsargs))\n\n\ndef test_vode_event(dsargs):\n \"\"\"\n Test Vode_ODEsystem with events involving external inputs.\n \"\"\"\n\n _run_checks(Vode_ODEsystem(dsargs))\n\n\ndef test_euler_event(dsargs):\n \"\"\"\n Test Euler_ODEsystem with events involving external inputs.\n \"\"\"\n\n dsargs.algparams['init_step'] = 0.0001\n _run_checks(Euler_ODEsystem(dsargs))\n\n\ndef _run_checks(ode):\n\n traj = ode.compute('traj')\n\n assert ode.diagnostics.hasWarnings()\n assert ode.diagnostics.findWarnings(10) != []\n assert ode.diagnostics.findWarnings(20) != []\n\n assert_almost_equal(traj.indepdomain[1], 1.14417, 3)\n assert_almost_equal(traj.getEventTimes()['monitor'][0], 0.80267, 4)\n\n\ndef teardown_module():\n clean_files(['event_test'])\n", "# Radau ODE system\r\nfrom __future__ import division, absolute_import, print_function\r\n\r\nimport imp\r\n\r\nfrom .allimports import *\r\nfrom PyDSTool.Generator import ODEsystem as ODEsystem\r\nfrom .baseclasses import theGenSpecHelper, genDB, _pollInputs\r\nfrom .mixins import CompiledMixin, full_path\r\nfrom PyDSTool.utils import *\r\nfrom PyDSTool.common import *\r\n# for future cleanup of * imports\r\nfrom PyDSTool import utils\r\nfrom PyDSTool import common\r\nfrom PyDSTool.ModelSpec import QuantSpec\r\nfrom PyDSTool.integrator import integrator\r\nimport numpy as npy\r\n\r\n# Other imports\r\nfrom numpy import Inf, NaN, isfinite, int, int32, float, float64, \\\r\n sometrue, alltrue, any, all, concatenate, transpose, array, zeros\r\nimport operator\r\nfrom copy import copy, deepcopy\r\n\r\n\r\nclass radau(integrator):\r\n \"\"\"Radau 5 specialization of the basic integrator class.\"\"\"\r\n\r\n def __init__(self, modname, rhs='default_name', phaseDim=0, paramDim=0,\r\n nAux=0, nEvents=0, nExtInputs=0, hasJac=0, hasJacP=0,\r\n hasMass=0, extraSpace=0, defaultBound=1e8):\r\n integrator.__init__(self, rhs=rhs, phaseDim=phaseDim, paramDim=paramDim,\r\n nAux=nAux, nEvents=nEvents, nExtInputs=nExtInputs, hasJac=hasJac,\r\n hasJacP=hasJacP, hasMass=hasMass, extraSpace=extraSpace,\r\n defaultBound=defaultBound)\r\n self.modname = modname\r\n try:\r\n self._integMod = imp.load_module(\r\n modname, *imp.find_module(modname, [\"radau5_temp\"]))\r\n except:\r\n print(\"Error in importing compiled vector field and integrator.\")\r\n print(\"Did you compile the RHS C code?\")\r\n raise\r\n # check module's directory\r\n assert 'Integrate' in dir(self._integMod), \\\r\n \"radau library does not contain Integrate()\"\r\n\r\n self.safety = []\r\n self.jacRecompute = []\r\n self.newtonStop = []\r\n self.stepChangeLB = []\r\n self.stepChangeUB = []\r\n self.stepSizeLB = []\r\n self.stepSizeUB = []\r\n self.hessenberg = []\r\n self.maxNewton = []\r\n self.newtonStart = []\r\n self.index1dim = []\r\n self.index2dim = []\r\n self.index3dim = []\r\n self.stepSizeStrategy = []\r\n self.DAEstructureM1 = []\r\n self.DAEstructureM2 = []\r\n\r\n retval = self._integMod.InitBasic(self.phaseDim, self.paramDim, self.nAux,\r\n self.nEvents, self.nExtInputs, self.hasJac,\r\n self.hasJacP, self.hasMass, self.extraSpace)\r\n\r\n if retval[0] != 1:\r\n raise PyDSTool_InitError('Call to InitBasic failed! (radau)')\r\n\r\n self.initBasic = True\r\n\r\n\r\n def Run(self, hinit=0, hmax=1.0, checkAux=0, calcSpecTimes=0, verbose=0,\r\n safety=0.9, jacRecompute=0.001, newtonStop=-1, stepChangeLB=1,\r\n stepChangeUB=1.2, stepSizeLB=0.2, stepSizeUB=8.0, hessenberg=0,\r\n maxNewton=7, newtonStart=0, index1dim=-1, index2dim=0, index3dim=0,\r\n stepSizeStrategy=1, DAEstructureM1=0, DAEstructureM2=0, useJac=0, useMass=0):\r\n if not self.initBasic:\r\n raise PyDSTool_InitError('initBasic is False (radau)')\r\n if not self.initEvents:\r\n raise PyDSTool_InitError('initEvents is False (radau)')\r\n if not self.initIntegrate:\r\n raise PyDSTool_InitError('initInteg is False (radau)')\r\n if not self.setParams:\r\n raise PyDSTool_InitError('setParams is False (radau)')\r\n if self.nExtInputs > 0 and not self.initExtInputs:\r\n raise PyDSTool_InitError('initExtInputs is False (radau)')\r\n\r\n self.setRadauParams(hinit=hinit, hmax=hmax, checkAux=checkAux,\r\n calcSpecTimes=calcSpecTimes,\r\n verbose=verbose, safety=safety,\r\n jacRecompute=jacRecompute, newtonStop=newtonStop,\r\n stepChangeLB=stepChangeLB, stepChangeUB=stepChangeUB,\r\n stepSizeLB=stepSizeLB, stepSizeUB=stepSizeUB,\r\n hessenberg=hessenberg,maxNewton=maxNewton,\r\n newtonStart=newtonStart, index1dim=index1dim,\r\n index2dim=index2dim, index3dim=index3dim,\r\n stepSizeStrategy=stepSizeStrategy,\r\n DAEstructureM1=DAEstructureM1,\r\n DAEstructureM2=DAEstructureM2,\r\n useJac=useJac,useMass=useMass)\r\n\r\n # For a run, we want to ensure indices are set to 0\r\n self.Reset()\r\n T, P, A, Stats, H, Err, EvtT, EvtP = self._integMod.Integrate(self.ic,\r\n self.t0,\r\n self.hinit,\r\n self.hmax,\r\n self.safety,\r\n self.jacRecompute,\r\n self.newtonStop,\r\n self.stepChangeLB,\r\n self.stepChangeUB,\r\n self.stepSizeLB,\r\n self.stepSizeUB,\r\n self.hessenberg,\r\n self.maxNewton,\r\n self.newtonStart,\r\n self.index1dim,\r\n self.index2dim,\r\n self.index3dim,\r\n self.stepSizeStrategy,\r\n self.DAEstructureM1,\r\n self.DAEstructureM2,\r\n self.useJac,\r\n self.useMass,\r\n self.verbose,\r\n self.checkAux,\r\n self.calcSpecTimes)\r\n self.points = P\r\n self.times = T\r\n self.auxPoints = A\r\n self.eventTimes = EvtT\r\n self.eventPoints = EvtP\r\n self.errors = Err\r\n self.stats = Stats\r\n self.step = H\r\n\r\n try:\r\n self.lastTime = self.times[-1]\r\n self.lastPoint = [self.points[i][-1] for i in range(self.phaseDim)]\r\n self.lastStep = self.step\r\n except IndexError:\r\n self.lastTime = self.t0\r\n self.lastPoint = self.ic\r\n self.lastStep = self.hinit\r\n self.numRuns += 1\r\n self.canContinue = True\r\n\r\n return T, P, A, Stats, H, Err, EvtT, EvtP\r\n\r\n\r\n def Continue(self, tend, params=[], calcSpecTimes=0, verbose=0,\r\n extInputChanged=False, extInputVals=[], extInputTimes=[],\r\n bounds=[]):\r\n if not self.initBasic:\r\n raise PyDSTool_InitError('initBasic is False (radau)')\r\n if not self.initEvents:\r\n raise PyDSTool_InitError('initEvents is False (radau)')\r\n if not self.initIntegrate:\r\n raise PyDSTool_InitError('initInteg is False (radau)')\r\n if not self.setParams:\r\n raise PyDSTool_InitError('setParams is False (radau)')\r\n if self.nExtInputs > 0 and not self.initExtInputs:\r\n raise PyDSTool_InitError('initExtInputs is False (radau)')\r\n\r\n if not self.canContinue:\r\n raise PyDSTool_ContError('Unable to continue trajectory -- '\r\n 'have you run the integrator and reset events, etc?')\r\n\r\n self.setContParams(tend=tend, params=copy(params),\r\n calcSpecTimes=calcSpecTimes, verbose=verbose, extInputChanged=extInputChanged,\r\n extInputVals=copy(extInputVals), extInputTimes=copy(extInputTimes),\r\n bounds=copy(bounds))\r\n\r\n # For a continue, we do not set indices to 0\r\n T, P, A, Stats, H, Err, EvtT, EvtP = \\\r\n self._integMod.Integrate(self.lastPoint,\r\n self.lastTime,\r\n self.lastStep, self.hmax,\r\n self.safety,\r\n self.jacRecompute,\r\n self.newtonStop,\r\n self.stepChangeLB,\r\n self.stepChangeUB,\r\n self.stepSizeLB,\r\n self.stepSizeUB,\r\n self.hessenberg,\r\n self.maxNewton,\r\n self.newtonStart,\r\n self.index1dim,\r\n self.index2dim,\r\n self.index3dim,\r\n self.stepSizeStrategy,\r\n self.DAEstructureM1,\r\n self.DAEstructureM2,\r\n self.useJac,\r\n self.useMass,\r\n self.verbose,\r\n self.checkAux,\r\n self.calcSpecTimes)\r\n\r\n self.points = P\r\n self.times = T\r\n self.auxPoints = A\r\n self.eventTimes = EvtT\r\n self.eventPoints = EvtP\r\n self.errors = Err\r\n self.stats = Stats\r\n self.step = H\r\n\r\n try:\r\n self.lastTime = self.times[-1]\r\n self.lastPoint = [self.points[i][-1] for i in range(self.phaseDim)]\r\n self.lastStep = self.step\r\n except IndexError:\r\n self.lastTime = self.t0\r\n self.lastPoint = self.ic\r\n self.lastStep = self.hinit\r\n self.numRuns += 1\r\n self.numContinues += 1\r\n self.canContinue = True\r\n\r\n return T, P, A, Stats, H, Err, EvtT, EvtP\r\n\r\n\r\n def setRadauParams(self, hinit, hmax, checkAux, calcSpecTimes,\r\n verbose, safety, jacRecompute, newtonStop,\r\n stepChangeLB, stepChangeUB, stepSizeLB, stepSizeUB,\r\n hessenberg, maxNewton, newtonStart, index1dim,\r\n index2dim, index3dim, stepSizeStrategy,\r\n DAEstructureM1, DAEstructureM2, useJac, useMass):\r\n useJac = int(useJac)\r\n useMass = int(useMass)\r\n checkAux = int(checkAux)\r\n calcSpecTimes = int(calcSpecTimes)\r\n hessenberg = int(hessenberg)\r\n\r\n if not isinstance(hinit, _num_types):\r\n raise TypeError(\"hinit must be int, float\")\r\n\r\n if not isinstance(hmax, _num_types):\r\n raise TypeError(\"hmax must be int, float\")\r\n\r\n if abs(hinit) > abs(hmax):\r\n raise ValueError(\"Abs value of hinit (%g) must be less than hmax (%g)\"%(hinit,hmax))\r\n\r\n if not isinstance(checkAux, _int_types):\r\n raise TypeError(\"checkAux must be int\")\r\n if checkAux not in (0,1):\r\n raise TypeError(\"checkAux must be 0 or 1\")\r\n if checkAux == 1 and self.nAux <= 0:\r\n raise ValueError(\"checkAux cannot be 1 if nAux is 0\")\r\n\r\n if not isinstance(verbose, _int_types):\r\n raise TypeError(\"verbose must be int\")\r\n if verbose not in (0,1):\r\n if verbose >= 2:\r\n # interpret all greater values as 1\r\n verbose = 1\r\n else:\r\n raise TypeError(\"verbose must be 0 or 1\")\r\n\r\n if not isinstance(calcSpecTimes, _int_types):\r\n raise TypeError(\"calcSpecTimes must be int\")\r\n if calcSpecTimes not in (0,1):\r\n raise TypeError(\"calcSpecTimes must be 0 or 1\")\r\n if calcSpecTimes == 1 and len(self.specTimes) <= 0:\r\n raise ValueError(\"calcSpecTimes cannot be 1 if specTimes is empty\")\r\n\r\n if safety < 0:\r\n raise ValueError(\"safety must be non-negative\")\r\n if jacRecompute <= 0.0:\r\n raise ValueError(\"jacRecompute must be positive\")\r\n if newtonStop < 0:\r\n newtonStop = 0\r\n if stepChangeLB <= 0:\r\n raise ValueError(\"stepChangeLB must be positive\")\r\n if stepChangeUB <= 0:\r\n raise ValueError(\"stepChangeUB must be positive\")\r\n if stepSizeLB <= 0:\r\n raise ValueError(\"stepSizeLB must be positive\")\r\n if stepSizeUB <= 0:\r\n raise ValueError(\"stepSizeUB must be positive\")\r\n\r\n if stepChangeLB > stepChangeUB: # was >= but this allows fac1=fac2=1\r\n raise ValueError(\"stepChangeLB must be less than stepChangeUB\")\r\n if stepSizeLB >= stepSizeUB:\r\n raise ValueError(\"stepSizeLB must be less than stepSizeUB\")\r\n\r\n if hessenberg not in (0,1):\r\n raise ValueError(\"hessenberg must be 0 or 1\")\r\n if hessenberg == 1 and useMass != 0:\r\n raise ValueError(\"hessenberg form cannot be used for implicit systems (mass matrix)\")\r\n if not isinstance(maxNewton, _int_types):\r\n raise TypeError(\"maxNewton must be int\")\r\n if maxNewton <= 0:\r\n raise ValueError(\"maxNewton must be positive\")\r\n\r\n if newtonStart not in (0,1):\r\n raise ValueError(\"newtonStart must be 0 or 1\")\r\n\r\n if index1dim <= 0:\r\n index1dim = self.phaseDim\r\n if index2dim != 0:\r\n raise ValueError(\"Currently index2dim must be 0\")\r\n if index3dim != 0:\r\n raise ValueError(\"Currently index3dim must be 0\")\r\n\r\n if stepSizeStrategy not in (1,2):\r\n raise ValueError(\"stepSizeStrategy must be 1 or 2\")\r\n\r\n if DAEstructureM1 != 0:\r\n raise ValueError(\"Currently DAEstructureM1 must be 0\")\r\n if DAEstructureM2 != 0:\r\n raise ValueError(\"Currently DAEstructureM2 must be 0\")\r\n\r\n if useJac not in (0,1):\r\n raise ValueError(\"useJac must be 0 or 1\")\r\n if useMass not in (0,1):\r\n raise ValueError(\"useMass must be 0 or 1\")\r\n\r\n if useJac == 1 and self.hasJac != 1:\r\n raise ValueError(\"useJac must be 0 if hasJac is not 1\")\r\n if useMass == 1 and self.hasMass != 1:\r\n raise ValueError(\"useMass must be 0 if hasMass is not 1\")\r\n\r\n self.hinit = hinit\r\n self.hmax = hmax\r\n self.safety = safety\r\n self.jacRecompute = jacRecompute\r\n self.newtonStop = newtonStop\r\n self.stepChangeLB = stepChangeLB\r\n self.stepChangeUB = stepChangeUB\r\n self.stepSizeLB = stepSizeLB\r\n self.stepSizeUB = stepSizeUB\r\n self.hessenberg = hessenberg\r\n self.maxNewton = maxNewton\r\n self.newtonStart = newtonStart\r\n self.index1dim = index1dim\r\n self.index2dim = index2dim\r\n self.index3dim = index3dim\r\n self.stepSizeStrategy = stepSizeStrategy\r\n self.DAEstructureM1 = DAEstructureM1\r\n self.DAEstructureM2 = DAEstructureM2\r\n self.useJac = useJac\r\n self.useMass = useMass\r\n self.verbose = verbose\r\n self.checkAux = checkAux\r\n self.calcSpecTimes = calcSpecTimes\r\n\r\n\r\nclass Radau_ODEsystem(ODEsystem, CompiledMixin):\r\n \"\"\"Wrapper for Radau integrator (with support for differential-algebraic equations).\r\n\r\n Uses C target language only for functional specifications\"\"\"\r\n _paraminfo = {'rtol': 'Relative error tolerance.',\r\n 'atol': 'Absolute error tolerance.',\r\n 'safety': 'Safety factor in the step size prediction, default 0.9.',\r\n 'max_step': 'Maximal step size, default tend-tstart.',\r\n 'init_step': 'Initial step size, default is a guess computed by the function init_step.',\r\n 'fac1': 'Parameter for step size selection; the new step size is chosen subject to the restriction fac1 <= new_step/old_step <= fac2. Default value is 1.0.',\r\n 'fac2': 'Parameter for step size selection; the new step size is chosen subject to the restriction fac1 <= new_step/old_step <= fac2. Default value is 1.2.',\r\n 'stepLB': '',\r\n 'stepUB': '',\r\n 'refine': 'Refine output by adding points interpolated using the RK4 polynomial (0, 1 or 2).',\r\n 'step_strategy': \"\"\"Switch for step size strategy;\r\nIf step_strategy=1 mod. predictive controller (Gustafsson).\r\nIf step_strategy=2 classical step size control.\r\nThe default value (for step_strategy=0) is step_strategy=1.\r\nthe choice step_strategy=1 seems to produce safer results;\r\nfor simple problems, the choice step_strategy=2 produces\r\noften slightly faster runs.\"\"\",\r\n 'jac_recompute': \"\"\"Decides whether the Jacobian should be recomputed;\r\nincrease jac_recompute to 0.1 say, when Jacobian evaluations\r\nare costly. for small systems jac_recompute should be smaller\r\n(0.001, say). negative jac_recompute forces the code to\r\ncompute the Jacobian after every accepted step.\r\nDefault 0.001.\"\"\",\r\n 'newton_start': \"\",\r\n 'newton_stop': \"\",\r\n 'max_newton': \"Maximum number of Newton iterations to take in solving the implicit system at each step (default 7)\",\r\n 'DAEstructureM1': \"\",\r\n 'DAEstructureM2': \"\",\r\n 'hessenberg': \"\",\r\n 'index1dim': \"\",\r\n 'index2dim': \"\",\r\n 'index3dim': \"\",\r\n 'use_special': \"Switch for using special times\",\r\n 'specialtimes': \"List of special times to use during integration\",\r\n 'check_aux': \"Switch\",\r\n 'extraspace': \"\"\r\n }\r\n\r\n def __init__(self, kw):\r\n \"\"\"Use the nobuild key to postpone building of the library, e.g. in\r\n order to provide additional build options to makeLibSource and\r\n compileLib methods or to make changes to the C code by hand.\r\n No build options can be specified otherwise.\"\"\"\r\n\r\n # delete because not covered in ODEsystem\r\n nobuild = kw.pop('nobuild', False)\r\n ODEsystem.__init__(self, kw)\r\n self._solver = None\r\n self.diagnostics._errorcodes = {\r\n 0: 'Unrecognized error code returned (see stderr output)',\r\n -1 : 'input is not consistent',\r\n -2 : 'larger nmax is needed',\r\n 2 : 'larger nmax or maxevtpts is probably needed (error raised by solout)',\r\n -3 : 'step size becomes too small',\r\n -4 : 'the matrix is repeatedly singular (interrupted)',\r\n -8 : 'The solution exceeded a magbound (poor choice of initial step)'}\r\n self.diagnostics.outputStatsInfo = {\r\n 'last_step': 'Predicted step size of the last accepted step (useful for a subsequent call to radau).',\r\n 'num_steps': 'Number of used steps.',\r\n 'num_accept': 'Number of accepted steps.',\r\n 'num_reject': 'Number of rejected steps.',\r\n 'num_fcns': 'Number of function evaluations.',\r\n 'num_jacs': 'Number of Jacobian evaluations.',\r\n 'num_dec': 'Number of LU-decompositions',\r\n 'num_subs': 'Number of forward-backward substitutions',\r\n 'errorStatus': 'Error status on completion.'\r\n }\r\n\r\n # currently the final four of these params are for event handling\r\n algparams_def = {'poly_interp': False,\r\n 'init_step': 0,\r\n 'max_step': 0,\r\n 'rtol': [1e-9 for i in range(self.dimension)],\r\n 'atol': [1e-12 for i in range(self.dimension)],\r\n 'fac1': 1.0,\r\n 'fac2': 1.2,\r\n 'stepLB': 0.2,\r\n 'stepUB': 8.0,\r\n 'safety': 0.9,\r\n 'max_pts': 10000,\r\n 'refine': 0,\r\n 'maxbisect': [], # for events\r\n 'maxevtpts': 1000, # for events\r\n 'eventInt': [], # set using setEventInterval only\r\n 'eventDelay': [], # set using setEventDelay only\r\n 'eventTol': [], # set using setEventTol only\r\n 'use_special': 0,\r\n 'specialtimes': [],\r\n 'check_aux': 1,\r\n 'extraspace': 100,\r\n 'verbose': 0,\r\n 'jac_recompute': 0.001,\r\n 'step_strategy': 1,\r\n 'index1dim': -1,\r\n 'index2dim': 0,\r\n 'index3dim': 0,\r\n 'DAEstructureM1': 0,\r\n 'DAEstructureM2': 0,\r\n 'hessenberg': 0,\r\n 'newton_start': 0,\r\n 'newton_stop': -1,\r\n 'max_newton': 7,\r\n 'hasJac': 0,\r\n 'hasJacP': 0,\r\n 'checkBounds': self.checklevel\r\n }\r\n for k, v in algparams_def.items():\r\n if k not in self.algparams:\r\n self.algparams[k] = v\r\n # verify that no additional keys are present in algparams, after\r\n # defaults are added above\r\n if len(self.algparams) != len(algparams_def):\r\n raise ValueError(\"Invalid keys present in algparams argument: \" \\\r\n + str(remain(self.algparams.keys(),algparams_def.keys())))\r\n # Check for non-constant mass matrix\r\n if self.haveMass():\r\n mspec = self.funcspec.auxfns['massMatrix']\r\n lensig = len(mspec[1])\r\n body_str = mspec[0][lensig:].replace('\\n','')\r\n qbody = QuantSpec('__body__', body_str, treatMultiRefs=False,\r\n ignoreSpecial=['[',']','{','}'])\r\n self._const_massmat = intersect(['Y_','t'], qbody.usedSymbols) == []\r\n else:\r\n self._const_massmat = True\r\n\r\n self._prepareEventSpecs()\r\n self._inputVarList = []\r\n self._inputTimeList = []\r\n\r\n if nobuild:\r\n print(\"Build the library using the makeLib method, or in \")\r\n print(\"stages using the makeLibSource and compileLib methods.\")\r\n else:\r\n self.makeLib()\r\n\r\n @property\r\n def integrator(self):\r\n return {\r\n 'name': ('radau5' if self._const_massmat else 'radau5v', 'Radau'),\r\n 'description': \"Radau5 integrator\" + \\\r\n \"\" if self._const_massmat else \" (version for non-constant mass matrices)\",\r\n 'src': [\"radau5mod.c\"],\r\n 'cflags': [\"-D__RADAU__\"],\r\n 'libs': [\r\n ('radau5', {\r\n 'sources': full_path(['radau5.f' if self._const_massmat else 'radau5v.f']),\r\n 'extra_f77_compile_args': utils.extra_arch_arg(['-w']),\r\n }),\r\n ('lapack_lite', {\r\n 'sources': full_path(['lapackc.f', 'lapack.f', 'dc_lapack.f']),\r\n 'extra_f77_compile_args': utils.extra_arch_arg(['-w']),\r\n })\r\n ],\r\n }\r\n\r\n def _prepareEventSpecs(self):\r\n eventActive = []\r\n eventTerm = []\r\n eventDir = []\r\n eventDelay = []\r\n eventTol = []\r\n maxbisect = []\r\n eventInt = []\r\n # convert event specs (term, active, etc.) into integparam specs\r\n self._eventNames = self.eventstruct.sortedEventNames()\r\n for evname in self._eventNames:\r\n ev = self.eventstruct.events[evname]\r\n assert isinstance(ev, LowLevelEvent), (\"Radau can only \"\r\n \"accept low level events\")\r\n # if event 'precise' flags set to False then set their tolerances\r\n # to be > max_step\r\n maxstep = self.algparams['max_step']\r\n for evname in self._eventNames:\r\n ev = self.eventstruct.events[evname]\r\n eventActive.append(int(ev.activeFlag))\r\n eventTerm.append(int(ev.termFlag))\r\n eventDir.append(ev.dircode)\r\n eventInt.append(ev.eventinterval)\r\n eventDelay.append(ev.eventdelay)\r\n if ev.preciseFlag:\r\n eventTol.append(ev.eventtol)\r\n maxbisect.append(ev.bisectlimit)\r\n else:\r\n eventTol.append(maxstep*1.5)\r\n maxbisect.append(1)\r\n self.algparams['eventTol'] = eventTol\r\n self.algparams['eventDelay'] = eventDelay\r\n self.algparams['eventInt'] = eventInt\r\n self.algparams['maxbisect'] = maxbisect\r\n self.algparams['eventActive'] = eventActive\r\n self.algparams['eventTerm'] = eventTerm\r\n self.algparams['eventDir'] = eventDir\r\n\r\n def compute(self, trajname, dirn='f', ics=None):\r\n continue_integ = ODEsystem.prepDirection(self, dirn)\r\n if ics is not None:\r\n self.set(ics=ics)\r\n self.validateICs()\r\n self.diagnostics.clearWarnings()\r\n self.diagnostics.clearErrors()\r\n if isinstance(self.algparams['rtol'], list):\r\n if len(self.algparams['rtol']) != self.dimension:\r\n raise ValueError('rtol list must have same length as phase dimension')\r\n else:\r\n rtol = self.algparams['rtol']\r\n self.algparams['rtol'] = [rtol for i in range(self.dimension)]\r\n if isinstance(self.algparams['atol'], list):\r\n if len(self.algparams['atol']) != self.dimension:\r\n raise ValueError('atol list must have same length as phase dimension')\r\n else:\r\n atol = self.algparams['atol']\r\n self.algparams['atol'] = [atol for i in range(self.dimension)]\r\n anames = self.funcspec.auxvars\r\n # Check i.c.'s are well defined (finite)\r\n self.checkInitialConditions()\r\n self.setEventICs(self.initialconditions, self.globalt0)\r\n # update event params in case changed since last run\r\n self._prepareEventSpecs()\r\n # Main integration\r\n t0 = self.indepvariable.depdomain[0]\r\n t1 = self.indepvariable.depdomain[1]\r\n plist = sortedDictValues(self.pars)\r\n self.algparams['hasJac'] = self.haveJacobian()\r\n self.algparams['hasJacP'] = self.haveJacobian_pars()\r\n self._ensure_solver()\r\n if self._dircode == 1:\r\n tbegin = t0\r\n tend = t1\r\n elif self._dircode == -1:\r\n # radau does reverse time integration simply by switching t0 and t1\r\n tbegin = t1\r\n tend = t0\r\n if len(self.algparams['specialtimes'])>0:\r\n use_special = self.algparams['use_special']\r\n else:\r\n use_special = 0\r\n bounds = [[],[]] # lower, then upper\r\n for v in self.funcspec.vars:\r\n bds = self.xdomain[v]\r\n try:\r\n bounds[0].append(bds[0])\r\n bounds[1].append(bds[1])\r\n except TypeError:\r\n print(\"%r %s %r\" % (v, type(bds), bds))\r\n print(self.xdomain)\r\n raise\r\n for p in self.funcspec.pars:\r\n bds = self.pdomain[p]\r\n try:\r\n bounds[0].append(bds[0])\r\n bounds[1].append(bds[1])\r\n except TypeError:\r\n print(\"%s %r\" % (type(bds), bds))\r\n raise\r\n if continue_integ:\r\n x0 = self._solver.lastPoint\r\n # overwrite t0 from self.indepvariable.domain, but use its t1\r\n tbegin = self._solver.lastTime\r\n if abs(self._solver.lastStep) < abs(self.algparams['init_step']):\r\n self.algparams['init_step'] = self._solver.lastStep\r\n if abs(t1-tbegin) < abs(self.algparams['init_step']):\r\n raise ValueError(\"Integration end point too close to initial \"\r\n \"point\")\r\n# if self.inputs and self._extInputsChanged:\r\n# self._extInputsChanged = False\r\n# self._solver.setContParams(tend, plist,\r\n# use_special,\r\n# self.algparams['verbose'],\r\n# True, deepcopy(self._inputVarList),\r\n# deeppcopy(self._inputTimeList))\r\n else:\r\n if self._solver.numRuns > 0:\r\n self._solver.clearAll()\r\n x0 = sortedDictValues(self.initialconditions, self.funcspec.vars)\r\n self._solver.setInteg(maxpts=self.algparams['max_pts'],\r\n rtol=self.algparams['rtol'], atol=self.algparams['atol'])\r\n self._solver.setRunParams(ic=x0, params=plist,\r\n t0=tbegin, tend=tend, gt0=self.globalt0,\r\n refine=self.algparams['refine'],\r\n specTimes=self.algparams['specialtimes'],\r\n bounds=bounds)\r\n if self.inputs:\r\n # self._extInputsChanged if global t0 changed so that can\r\n # adjust times given to the integrator (it is blind to global t0\r\n # when accesses input variable times)\r\n self._ensure_inputs(self._extInputsChanged)\r\n # hinit only set if not continue_integ\r\n if len(anames)>0:\r\n check_aux = self.algparams['check_aux']\r\n else:\r\n check_aux = 0\r\n if self.algparams['max_step'] == 0:\r\n max_step = abs(tend-tbegin)\r\n else:\r\n max_step = self.algparams['max_step']\r\n init_step = self.algparams['init_step']\r\n if self._dircode == 1:\r\n if init_step < 0:\r\n init_step = -init_step\r\n if max_step < 0:\r\n max_step = -max_step\r\n else:\r\n if init_step > 0:\r\n init_step = -init_step\r\n if max_step > 0:\r\n max_step = -max_step\r\n if continue_integ:\r\n # record needed for bounds checking and truncation\r\n old_highest_ix = self._solver.points.shape[1]\r\n alltData, X, A, Stats, H, Err, Evtimes, \\\r\n Evpoints = self._solver.Continue(tend, plist,\r\n use_special, self.algparams['verbose'],\r\n self._extInputsChanged,\r\n deepcopy(self._inputVarList),\r\n deepcopy(self._inputTimeList), bounds)\r\n else:\r\n old_highest_ix = 0\r\n self._solver.setEvents(eventActive=self.algparams['eventActive'],\r\n eventTerm=self.algparams['eventTerm'],\r\n eventDir=self.algparams['eventDir'],\r\n eventDelay=self.algparams['eventDelay'],\r\n eventInt=self.algparams['eventInt'],\r\n eventTol=self.algparams['eventTol'],\r\n maxevtpts=self.algparams['maxevtpts'],\r\n maxbisect=self.algparams['maxbisect'])\r\n alltData, X, A, Stats, H, Err, Evtimes, \\\r\n Evpoints = self._solver.Run(init_step,\r\n max_step,\r\n check_aux,\r\n use_special,\r\n self.algparams['verbose'],\r\n self.algparams['safety'],\r\n self.algparams['jac_recompute'],\r\n self.algparams['newton_stop'],\r\n self.algparams['fac1'],\r\n self.algparams['fac2'],\r\n self.algparams['stepLB'],\r\n self.algparams['stepUB'],\r\n self.algparams['hessenberg'],\r\n self.algparams['max_newton'],\r\n self.algparams['newton_start'],\r\n self.algparams['index1dim'],\r\n self.algparams['index2dim'],\r\n self.algparams['index3dim'],\r\n self.algparams['step_strategy'],\r\n self.algparams['DAEstructureM1'],\r\n self.algparams['DAEstructureM2'],\r\n self.haveJacobian(),\r\n self.haveMass())\r\n self._extInputsChanged = False # reset this now\r\n self.diagnostics.outputStats = {'last_step': H,\r\n 'last_time': self._solver.lastTime,\r\n 'last_point': self._solver.lastPoint,\r\n 'num_fcns': Stats[0],\r\n 'num_jacs': Stats[1],\r\n 'num_steps': Stats[2],\r\n 'num_accept': Stats[3],\r\n 'num_reject': Stats[4],\r\n 'num_dec': Stats[5],\r\n 'num_subs': Stats[6],\r\n 'errorStatus': Err\r\n }\r\n if self._dircode == -1:\r\n # reverse the array object (no reverse method!)\r\n alltData = alltData[::-1]\r\n X = X[:,::-1]\r\n if anames != []:\r\n A = A[:,::-1]\r\n xnames = self._var_ixmap\r\n # Package up computed trajectory in Variable variables\r\n # Add external inputs warnings to self.diagnostics.warnings, if any\r\n## for f in inputVarList:\r\n## for winfo in f.diagnostics.warnings:\r\n## self.diagnostics.warnings.append((W_NONTERMSTATEBD,\r\n## (winfo[0], f.name, winfo[1],\r\n## f.depdomain)))\r\n eventslist = self.eventstruct.query(['lowlevel', 'active'])\r\n termevents = self.eventstruct.query(['term'], eventslist)\r\n if self._eventNames != []:\r\n # build self.diagnostics.warnings because events happened --\r\n # and keep a record of which times terminal events happened because\r\n # Model.py's event handling procedure assumes multiple events\r\n # happening at one time are listed in one warning\r\n termevtimes = {}\r\n nontermevtimes = {}\r\n try:\r\n for evix in range(len(self._eventNames)):\r\n if Evpoints[evix] is None:\r\n continue\r\n evname = self._eventNames[evix]\r\n numevs = len(Evtimes[evix])\r\n if self.algparams['eventTerm'][evix]:\r\n if numevs > 1:\r\n print(\"Event info: %r %r\" % (Evpoints, Evtimes))\r\n assert numevs <= 1, (\"Internal error: more than one \"\r\n \"terminal event of same type found\")\r\n # For safety, we should assert that this event\r\n # also appears in termevents, but we don't\r\n if Evtimes[evix][0] in termevtimes.keys():\r\n # append event name to this warning\r\n warning_ix = termevtimes[Evtimes[evix][0]]\r\n self.diagnostics.warnings[warning_ix][1][1].append(evname)\r\n else:\r\n # make new termevtime entry for the new warning\r\n termevtimes[Evtimes[evix][0]] = \\\r\n len(self.diagnostics.warnings)\r\n self.diagnostics.warnings.append((W_TERMEVENT,\r\n (Evtimes[evix][0],\r\n [evname])))\r\n else:\r\n for ev in range(numevs):\r\n if Evtimes[evix][ev] in nontermevtimes.keys():\r\n # append event name to this warning\r\n warning_ix = nontermevtimes[Evtimes[evix][ev]]\r\n self.diagnostics.warnings[warning_ix][1][1].append(evname)\r\n else:\r\n # make new nontermevtime entry for the new warning\r\n nontermevtimes[Evtimes[evix][ev]] = \\\r\n len(self.diagnostics.warnings)\r\n self.diagnostics.warnings.append((W_NONTERMEVENT,\r\n (Evtimes[evix][ev],\r\n [evname])))\r\n except IndexError:\r\n print(\"Events returned from integrator are the wrong size.\")\r\n print(\" Did you change the system and not refresh the C \" \\\r\n + \"library using the forcelibrefresh() method?\")\r\n raise\r\n termcount = 0\r\n for (w,i) in self.diagnostics.warnings:\r\n if w == W_TERMEVENT or w == W_TERMSTATEBD:\r\n if termcount > 0:\r\n raise ValueError(\"Internal error: more than one terminal \"\r\n \"event found\")\r\n termcount += 1\r\n # post-process check of variable bounds (if defined and algparams['checkBounds'] True)\r\n if self._dircode > 0:\r\n compare = operator.lt\r\n last_ix = Inf\r\n else:\r\n compare = operator.gt\r\n last_ix = -Inf\r\n highest_ix = X.shape[1]-1\r\n last_t = Inf\r\n if self.algparams['checkBounds'] > 0:\r\n # temp storage for repeatedly used object attributes (for lookup efficiency)\r\n depdomains = dict(zip(range(self.dimension),\r\n [self.variables[xn].depdomain for xn in xnames]))\r\n offender_ix = None\r\n for xi in range(self.dimension):\r\n if not any(depdomains[xi].isfinite()):\r\n # no point in checking when the bounds are +/- infinity\r\n continue\r\n next_last_ix = array_bounds_check(X[xi][old_highest_ix:],\r\n depdomains[xi], self._dircode) + old_highest_ix\r\n if compare(next_last_ix, last_ix):\r\n # won't count as truncating unless the following checks\r\n # hold\r\n last_ix = next_last_ix\r\n offender_ix = xi\r\n if not isfinite(last_ix) and last_ix < 0:\r\n # only use +Inf hereon to flag no truncation needed\r\n last_ix = Inf\r\n elif last_ix >= 0 and last_ix < highest_ix:\r\n # truncate data\r\n last_t = alltData[last_ix]\r\n print(\"Warning; domain bound reached (because algparams['checkBounds'] > 0)\")\r\n self.diagnostics.warnings.append((W_TERMSTATEBD,\r\n (last_t, xnames[offender_ix],\r\n X[offender_ix, last_ix],\r\n depdomains[offender_ix].get())))\r\n # Create variables (self.variables contains no actual data)\r\n variables = copyVarDict(self.variables)\r\n # build event pointset information (reset previous trajectory's)\r\n # don't include events after any truncation due to state bound violation\r\n self.trajevents = {}\r\n for evix in range(len(self._eventNames)):\r\n evname = self._eventNames[evix]\r\n if Evpoints[evix] is None:\r\n self.trajevents[evname] = None\r\n else:\r\n try:\r\n ev_a_list = []\r\n for t in Evtimes[evix]:\r\n tix = find(alltData, t)\r\n ev_a_list.append(A[:,tix])\r\n ev_array = concatenate((Evpoints[evix],\r\n transpose(array(ev_a_list, 'd'))))\r\n del ev_a_list, tix\r\n except TypeError:\r\n # A is empty\r\n ev_array = Evpoints[evix]\r\n if last_ix >= 0 and last_ix < highest_ix:\r\n # don't count last_ix = -1 which is the same as highest_ix\r\n last_ev_tix = npy.argmax(Evtimes[evix] >= alltData[last_ix])\r\n if last_ev_tix == 0 and Evtimes[evix][0] >= last_t:\r\n # checks that there was actually a violation\r\n # - so no events to record\r\n self.trajevents[evname] = None\r\n else:\r\n # truncation needed\r\n ev_array = ev_array[:, :last_ev_tix+1]\r\n ev_times = Evtimes[evix][:last_ev_tix+1]\r\n self.trajevents[evname] = Pointset({'coordnames': xnames+anames,\r\n 'indepvarname': 't',\r\n 'coordarray': ev_array,\r\n 'indepvararray': ev_times})\r\n else:\r\n # no truncation needed\r\n self.trajevents[evname] = Pointset({'coordnames': xnames+anames,\r\n 'indepvarname': 't',\r\n 'coordarray': ev_array,\r\n 'indepvararray': Evtimes[evix]})\r\n if last_ix >= 0 and last_ix < highest_ix:\r\n # truncate\r\n X = X[:, :last_ix]\r\n alltData = alltData[:last_ix]\r\n try:\r\n allxDataDict = dict(zip(xnames,X))\r\n except IndexError:\r\n print(\"Integration returned variable values of unexpected dimensions.\")\r\n print(\" Did you change the system and not refresh the C library\" \\\r\n + \" using the forcelibrefresh() method?\")\r\n raise\r\n # storage of all auxiliary variable data\r\n anames = self.funcspec.auxvars\r\n try:\r\n if anames != []:\r\n if last_ix < highest_ix:\r\n A = A[:, :last_ix]\r\n try:\r\n allaDataDict = dict(zip(anames,A))\r\n except TypeError:\r\n print(\"Internal error! Type of A: %s\" % type(A))\r\n raise\r\n except IndexError:\r\n print(\"Integration returned auxiliary values of unexpected dimensions.\")\r\n print(\" Did you change the system and not refresh the C library\" \\\r\n + \" using the forcelibrefresh() method?\")\r\n raise\r\n if int(Err) == 1 or (int(Err) == 2 and termcount == 1):\r\n # output OK\r\n if self.algparams['poly_interp']:\r\n rhsfn = self._solver.Rhs\r\n # when Dopri can output the Rhs values alongside variable\r\n # values then this won't be necessary\r\n dxvals = zeros((len(alltData),self.dimension),float)\r\n for tix, tval in enumerate(alltData):\r\n # solver's Rhs function already contains the inputs so no\r\n # need to recompute and provide here.\r\n #i = _pollInputs(sortedDictValues(self.inputs), tval,\r\n # self.checklevel)\r\n # X is the output variable array, but rhsfn demands a list\r\n dxvals[tix] = rhsfn(tval, list(X[:,tix]), plist)[0]\r\n for xi, x in enumerate(xnames):\r\n if len(alltData) > 1:\r\n if self.algparams['poly_interp']:\r\n interp = PiecewisePolynomial(alltData,\r\n array([allxDataDict[x], dxvals[:,xi]]).T, 2)\r\n else:\r\n interp = interp1d(alltData, allxDataDict[x])\r\n variables[x] = Variable(interp, 't', x, x)\r\n else:\r\n raise PyDSTool_ValueError(\"Fewer than 2 data points computed\")\r\n for a in anames:\r\n if len(alltData) > 1:\r\n variables[a] = Variable(interp1d(alltData,allaDataDict[a]),\r\n 't', a, a)\r\n else:\r\n raise PyDSTool_ValueError(\"Fewer than 2 data points computed\")\r\n # final checks\r\n #self.validateSpec()\r\n self.defined = True\r\n return Trajectory(trajname, list(variables.values()),\r\n abseps=self._abseps, globalt0=self.globalt0,\r\n checklevel=self.checklevel,\r\n FScompatibleNames=self._FScompatibleNames,\r\n FScompatibleNamesInv=self._FScompatibleNamesInv,\r\n events=self.trajevents,\r\n modelNames=self.name,\r\n modelEventStructs=self.eventstruct)\r\n else:\r\n try:\r\n diagnost_info = self.diagnostics._errorcodes[int(Err)]\r\n except TypeError:\r\n # errcode messed up from Radau\r\n print(\"Error code: %d\" % Err)\r\n diagnost_info = self.diagnostics._errorcodes[0]\r\n if self._solver.verbose:\r\n info(self.diagnostics.outputStats, \"Output statistics\")\r\n self.defined = False\r\n # Did the solver run out of memory?\r\n if (len(alltData) == self.algparams['max_pts'] or \\\r\n self.diagnostics.outputStats['num_steps'] >= self.algparams['max_pts']) \\\r\n and alltData[-1] < tend:\r\n print(\"max_pts algorithmic parameter too small: current \" + \\\r\n \"value is %i\"%self.algparams['max_pts'])\r\n# avstep = (self.algparams['init_step']+self.diagnostics.outputStats['last_step'])/2.\r\n if self.diagnostics.outputStats['last_time']-tbegin > 0:\r\n ms = str(int(round(self.algparams['max_pts'] / \\\r\n (self.diagnostics.outputStats['last_time'] - \\\r\n tbegin)*(tend-tbegin))))\r\n else:\r\n ms = 'Inf'\r\n print(\"(recommended value for this trajectory segment is \" + \\\r\n \"estimated to be %s (saved in diagnostics.errors attribute))\"%str(ms))\r\n diagnost_info += \" -- recommended value is \" + ms\r\n self.diagnostics.errors.append((E_COMPUTFAIL,\r\n (self._solver.lastTime, diagnost_info)))\r\n raise PyDSTool_ExistError(\"No trajectory created\")\r\n\r\n\r\n def Rhs(self, t, xdict, pdict=None, asarray=True):\r\n \"\"\"asarray is an unused, dummy argument for compatibility with Model.Rhs\"\"\"\r\n # must convert names to FS-compatible as '.' sorts before letters\r\n # while '_' sorts after!\r\n x = sortedDictValues(filteredDict(self._FScompatibleNames(xdict),\r\n self.funcspec.vars))\r\n if pdict is None:\r\n pdict = self.pars\r\n # internal self.pars already is FS-compatible\r\n p = sortedDictValues(pdict)\r\n else:\r\n p = sortedDictValues(self._FScompatibleNames(pdict))\r\n i = _pollInputs(sortedDictValues(self.inputs),\r\n t, self.checklevel)\r\n self._ensure_solver({'params': p, 't0': 0, 'tend': 1})\r\n self._ensure_inputs()\r\n return self._solver.Rhs(t, x, p+i)[0]\r\n\r\n\r\n def Jacobian(self, t, xdict, pdict=None, asarray=True):\r\n \"\"\"asarray is an unused, dummy argument for compatibility with\r\n Model.Jacobian\"\"\"\r\n if self.haveJacobian():\r\n x = sortedDictValues(filteredDict(self._FScompatibleNames(xdict),\r\n self.funcspec.vars))\r\n if pdict is None:\r\n pdict = self.pars\r\n # internal self.pars already is FS-compatible\r\n p = sortedDictValues(pdict)\r\n else:\r\n p = sortedDictValues(self._FScompatibleNames(pdict))\r\n i = _pollInputs(sortedDictValues(self.inputs),\r\n t, self.checklevel)\r\n self._ensure_solver({'params': p, 't0': 0, 'tend': 1})\r\n self._ensure_inputs()\r\n return self._solver.Jacobian(t, x, p+i)[0]\r\n else:\r\n raise PyDSTool_ExistError(\"Jacobian not defined\")\r\n\r\n\r\n def JacobianP(self, t, xdict, pdict=None, asarray=True):\r\n \"\"\"asarray is an unused, dummy argument for compatibility with\r\n Model.JacobianP\"\"\"\r\n if self.haveJacobian_pars():\r\n x = sortedDictValues(filteredDict(self._FScompatibleNames(xdict),\r\n self.funcspec.vars))\r\n if pdict is None:\r\n pdict = self.pars\r\n # internal self.pars already is FS-compatible\r\n p = sortedDictValues(pdict)\r\n else:\r\n p = sortedDictValues(self._FScompatibleNames(pdict))\r\n i = _pollInputs(sortedDictValues(self.inputs),\r\n t, self.checklevel)\r\n self._ensure_solver({'params': p, 't0': 0, 'tend': 1})\r\n self._ensure_inputs()\r\n return self._solver.JacobianP(t, x, p+i)[0]\r\n else:\r\n raise PyDSTool_ExistError(\"Jacobian w.r.t. parameters not defined\")\r\n\r\n\r\n def AuxVars(self, t, xdict, pdict=None, asarray=True):\r\n \"\"\"asarray is an unused, dummy argument for compatibility with\r\n Model.AuxVars\"\"\"\r\n x = sortedDictValues(filteredDict(self._FScompatibleNames(xdict),\r\n self.funcspec.vars))\r\n if pdict is None:\r\n pdict = self.pars\r\n # internal self.pars already is FS-compatible\r\n p = sortedDictValues(pdict)\r\n else:\r\n p = sortedDictValues(self._FScompatibleNames(pdict))\r\n i = _pollInputs(sortedDictValues(self.inputs),\r\n t, self.checklevel)\r\n self._ensure_solver({'params': p, 't0': 0, 'tend': 1})\r\n self._ensure_inputs()\r\n return self._solver.AuxFunc(t, x, p+i)[0]\r\n\r\n\r\n def MassMatrix(self, t, xdict, pdict=None, asarray=True):\r\n \"\"\"asarray is an unused, dummy argument for compatibility with\r\n Model.MassMatrix\"\"\"\r\n if self.haveMass():\r\n x = sortedDictValues(filteredDict(self._FScompatibleNames(xdict),\r\n self.funcspec.vars))\r\n if pdict is None:\r\n pdict = self.pars\r\n # internal self.pars already is FS-compatible\r\n p = sortedDictValues(pdict)\r\n else:\r\n p = sortedDictValues(self._FScompatibleNames(pdict))\r\n i = _pollInputs(sortedDictValues(self.inputs),\r\n t, self.checklevel)\r\n self._ensure_solver({'params': p, 't0': 0, 'tend': 1})\r\n self._ensure_inputs()\r\n return self._solver.MassMatrix(t, x, p+i)[0]\r\n else:\r\n raise PyDSTool_ExistError(\"Mass matrix not defined\")\r\n\r\n\r\n def _ensure_solver(self, pars=None):\r\n if self._solver is None:\r\n x0 = sortedDictValues(filteredDict(self.initialconditions, self.funcspec.vars))\r\n# _integMod = self._ensureLoaded(self.modname)\r\n self._solver = radau(self.modname,\r\n rhs=self.name, phaseDim=self.dimension,\r\n paramDim=self.numpars,\r\n nAux=len(self.funcspec.auxvars),\r\n nEvents=len(self._eventNames),\r\n nExtInputs=len(self.inputs),\r\n hasJac=self.haveJacobian(),\r\n hasJacP=self.haveJacobian_pars(),\r\n hasMass=self.haveMass(),\r\n extraSpace=self.algparams['extraspace'])\r\n try:\r\n genDB.register(self)\r\n except PyDSTool_KeyError:\r\n errstr = \"Generator \" + self.name + \": this vector field's \" +\\\r\n \"DLL is already in use\"\r\n raise RuntimeError(errstr)\r\n if pars is not None:\r\n # tend value doesn't matter\r\n self._solver.setRunParams(\r\n ic=sortedDictValues(filteredDict(self.initialconditions,\r\n self.funcspec.vars)),\r\n params=pars['params'],\r\n t0=pars['t0'], tend=pars['tend'],\r\n gt0=self.globalt0,\r\n refine=0, specTimes=[])\r\n\r\n def _ensure_inputs(self, force=False):\r\n if not self.inputs:\r\n return\r\n if force:\r\n listOK = False\r\n else:\r\n try:\r\n listOK = self._inputTimest0 == self.globalt0\r\n except AttributeError:\r\n # not yet defined, so proceed\r\n listOK = False\r\n if not listOK:\r\n self._inputVarList = []\r\n self._inputTimeList = []\r\n self._inputTimest0 = self.globalt0\r\n # inputVarList is a list of Variables or Pointsets\r\n for inp in sortedDictValues(self.inputs):\r\n if isinstance(inp, Variable):\r\n pts = inp.getDataPoints()\r\n if pts is None:\r\n raise TypeError(\"Can only pass external input Variable objects if based on\"\r\n \" an underlying mesh\")\r\n else:\r\n tvals = copy(pts[inp.indepvarname])\r\n tvals -= self.globalt0\r\n self._inputVarList.append(pts[inp.coordname].tolist())\r\n self._inputTimeList.append(tvals.tolist())\r\n elif isinstance(inp, Pointset):\r\n tvals = copy(inp.indepvararray)\r\n tvals -= self.globalt0\r\n self._inputVarList.append(inp[inp.coordname].tolist())\r\n self._inputTimeList.append(tvals.tolist())\r\n else:\r\n raise TypeError(\"Invalid type of input\")\r\n if not self._solver.initExtInputs:\r\n self._solver.setExtInputs(True, deepcopy(self._inputVarList),\r\n deepcopy(self._inputTimeList))\r\n elif not listOK:\r\n self._solver.clearExtInputs()\r\n self._solver.setExtInputs(True, deepcopy(self._inputVarList),\r\n deepcopy(self._inputTimeList))\r\n self._solver.canContinue=True\r\n\r\n\r\n def __del__(self):\r\n genDB.unregister(self)\r\n ODEsystem.__del__(self)\r\n\r\n\r\n\r\n# Register this Generator with the database\r\n\r\nsymbolMapDict = {'abs': 'fabs', 'sign': 'signum', 'mod': 'fmod'}\r\n# in future, provide appropriate mappings for libraries math,\r\n# random, etc. (for now it's left to FuncSpec)\r\ntheGenSpecHelper.add(Radau_ODEsystem, symbolMapDict, 'c')\r\n", "\"\"\"Test pickling for saving and loading various PyDSTool objects\"\"\"\n\nimport os\nfrom tempfile import mkstemp\nfrom numpy import (\n array,\n float64,\n Inf,\n)\nfrom PyDSTool import (\n Interval,\n loadObjects,\n Point,\n Pointset,\n saveObjects,\n Variable,\n Trajectory,\n Events,\n)\nfrom PyDSTool.Generator import (\n InterpolateTable,\n Vode_ODEsystem,\n ExplicitFnGen,\n ImplicitFnGen,\n)\nimport pytest\n\[email protected]\ndef fname():\n _, fname = mkstemp()\n return fname\n\n\ndef test_saveload_array(fname):\n \"\"\"Test pickling for saving and loading array\"\"\"\n a = array([1, Inf])\n b = [Inf, 0]\n\n saveObjects([a, b], fname, True)\n loadedObjs = loadObjects(fname)\n assert a[0] == loadedObjs[0][0]\n assert a[1] == loadedObjs[0][1]\n assert b[0] == loadedObjs[1][0]\n os.remove(fname)\n\n\ndef test_saveload_interval(fname):\n \"\"\"Test pickling for saving and loading 'Interval'\"\"\"\n\n m = Interval('test1', float, (-Inf, 1))\n s = Interval('a_singleton', float, 0.4)\n saveObjects([m, s], fname, True)\n objs_ivals = loadObjects(fname)\n assert objs_ivals[0].get(1) == 1\n\n # Try loading partial list from a larger file\n objs_part = loadObjects(fname, ['a_singleton'])\n assert objs_part[0] == s\n os.remove(fname)\n\n\ndef test_saveload_point_and_pointset(fname):\n \"\"\"Test pickling for saving and loading 'Point' and 'Pointset'\"\"\"\n\n x = Point(\n coorddict={\n 'x0': [1.123456789],\n 'x1': [-0.4],\n 'x2': [4000]\n },\n coordtype=float64\n )\n\n v = Pointset(\n coorddict={\n 'x0': 0.2,\n 'x1': -1.2\n },\n indepvardict={'t': 0.01},\n coordtype=float,\n indepvartype=float\n )\n\n saveObjects([x, v], fname, True)\n objs_pts = loadObjects(fname)\n assert objs_pts[0] == x\n assert objs_pts[1] == v\n os.remove(fname)\n\n\ndef test_saveload_variable(fname):\n \"\"\"Test pickling for saving and loading 'Variable'\"\"\"\n\n var1 = Variable(\n Pointset(\n coordarray=array(range(10), float) * 0.1,\n indepvararray=array(range(10), float) * 0.5\n ),\n name='v1'\n )\n saveObjects(var1, fname, True)\n obj_var = loadObjects(fname)[0]\n assert obj_var(1.5) == var1(1.5)\n os.remove(fname)\n\n\ndef test_saveload_trajectory(fname):\n \"\"\"Test pickling for saving and loading 'Trajectory'\"\"\"\n\n var1 = Variable(\n Pointset(\n coordarray=array(range(10), float) * 0.1,\n indepvararray=array(range(10), float) * 0.5\n ),\n name='v1'\n )\n var2 = Variable(\n Pointset(\n coordarray=array(range(10), float) * 0.25 + 1.0,\n indepvararray=array(range(10), float) * 0.5\n ),\n name='v2'\n )\n traj = Trajectory('traj1', [var1, var2])\n saveObjects(traj, fname, True)\n traj_loaded = loadObjects(fname)[0]\n assert traj_loaded(2.0) == traj(2.0)\n os.remove(fname)\n\n\[email protected]\ndef interptable():\n timeData = array([0.1, 1.1, 2.1])\n xData = dict(zip(\n ['x1', 'x2'],\n [array([10.2, -1.4, 4.1]), array([0.1, 0.01, 0.4])]))\n itableArgs = {\n 'tdata': timeData,\n 'ics': xData,\n 'name': 'interp',\n }\n return InterpolateTable(itableArgs)\n\n\ndef test_saveload_interpolated_table_generator(interptable, fname):\n \"\"\"Test pickling for saving and loading 'InterpolateTable' Generator\"\"\"\n itabletraj = interptable.compute('itable')\n saveObjects(itabletraj, fname, True)\n obj_itab = loadObjects(fname)\n t = 0.1\n while t < 2.1:\n assert obj_itab[0](t) == itabletraj(t)\n t += 0.1\n os.remove(fname)\n\n\ndef test_saveload_vode_odesystem(interptable, fname):\n \"\"\"Test pickling for saving and loading 'Vode_ODEsystem' Generator\"\"\"\n\n # Vode object with event and external input trajectory (defined earlier)\n fvarspecs = {\n \"w\": \"k*w + a*itable + sin(t) + myauxfn1(t)*myauxfn2(w)\",\n 'aux_wdouble': 'w*2 + globalindepvar(t)',\n 'aux_other': 'myauxfn1(2*t) + initcond(w)'\n }\n fnspecs = {\n 'myauxfn1': (['t'], '2.5*cos(3*t)'),\n 'myauxfn2': (['w'], 'w/2')\n }\n ev_args = {\n 'name': 'threshold',\n 'eventtol': 1e-4,\n 'eventdelay': 1e-5,\n 'starttime': 0,\n 'term': True,\n }\n thresh_ev = Events.makePythonStateZeroCrossEvent('w', 20, 1, ev_args)\n DSargs = {\n 'tdomain': [0.1, 2.1],\n 'tdata': [0.11, 2.1],\n 'ics': {'w': 3.0},\n 'pars': {'k': 2, 'a': -0.5},\n 'inputs': {'itable': interptable.variables['x1']},\n 'auxvars': ['aux_wdouble', 'aux_other'],\n 'algparams': {'init_step': 0.01, 'strict': False},\n 'events': thresh_ev,\n 'checklevel': 2,\n 'name': 'ODEtest',\n 'fnspecs': fnspecs,\n 'varspecs': fvarspecs\n }\n testODE = Vode_ODEsystem(DSargs)\n odetraj = testODE.compute('testode')\n saveObjects([odetraj, testODE], fname, True)\n objs_ode = loadObjects(fname)\n objs_ode[1].diagnostics.clearWarnings()\n assert len(objs_ode[1].diagnostics.warnings) == 0\n odetraj2 = objs_ode[1].compute('testode2')\n assert odetraj2(0.6) == odetraj(0.6)\n assert len(objs_ode[1].diagnostics.warnings) == 1\n os.remove(fname)\n\n\ndef test_saveload_explicitfngen(fname):\n \"\"\"Test pickling for saving and loading 'ExplicitFnGen'\"\"\"\n\n args = {\n 'tdomain': [-50, 50],\n 'pars': {'speed': 1},\n 'xdomain': {'s': [-1., 1.]},\n 'name': 'sine',\n 'globalt0': 0.4,\n 'pdomain': {'speed': [0, 200]},\n 'varspecs': {'s': \"sin(globalindepvar(t)*speed)\"}\n }\n sin_gen = ExplicitFnGen(args)\n sintraj1 = sin_gen.compute('sine1')\n sin_gen.set(pars={'speed': 2})\n sintraj2 = sin_gen.compute('sine2')\n saveObjects([sin_gen, sintraj1, sintraj2], fname, True)\n objs_sin = loadObjects(fname)\n assert sintraj1(0.55) == objs_sin[1](0.55)\n assert sintraj2(0.55) == objs_sin[2](0.55)\n os.remove(fname)\n\n\ndef test_saveload_implicitfngen(fname):\n \"\"\"Test pickling for saving and loading 'ImplicitFnGen'\"\"\"\n\n argsi = {\n 'varspecs': {\n \"y\": \"t*t+y*y-r*r\",\n \"x\": \"t\"\n },\n 'algparams': {'solvemethod': 'newton', 'atol': 1e-4},\n 'xdomain': {'y': [-2, 2]},\n 'ics': {'y': 0.75},\n 'tdomain': [-2, 0],\n 'pars': {'r': 2},\n 'vars': ['y'],\n 'checklevel': 2,\n 'name': 'imptest',\n }\n\n testimp = ImplicitFnGen(argsi)\n traj1 = testimp.compute('traj1')\n saveObjects([testimp, traj1], fname, True)\n objs_imp = loadObjects(fname)\n assert objs_imp[0].xdomain['y'] == [-2, 2]\n assert traj1(-0.4) == objs_imp[1](-0.4)\n os.remove(fname)\n\n\ndef test_saveload_model(fname):\n \"\"\"Test pickling for saving and loading 'Model'\"\"\"\n pass\n" ]
[ [ "numpy.dot" ], [ "numpy.isfinite", "numpy.nonzero", "numpy.isnan", "numpy.ones", "numpy.alltrue" ], [ "numpy.testing.assert_almost_equal", "numpy.linspace", "numpy.sin" ], [ "numpy.int", "numpy.array", "numpy.argmax", "numpy.isfinite" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JohanComparat/nbody-npt-functions
[ "a034db4e5a9b2f87dc42eeb6059c4dd280589e4a" ]
[ "bin/bin_SMHMr/MD10_add_Ms_2.py" ]
[ "# overall python packages\nimport glob\nimport astropy.io.fits as fits\nimport os\nimport time\nimport numpy as n\nimport sys \n# specific functions\nfrom scipy.stats import norm\n# dedicated packages\n#import StellarMass\n\nmeanSM= lambda Mh, z : n.log10(Mh * 2. * ( 0.0351 - 0.0247 * z/(1.+z)) / ((Mh/ (10**(11.79 + 1.5 * z/(1.+z))) )**(- 0.9 + 0.5 * z/(1.+z)) + ( Mh /(10**(11.79 + 1.5 * z/(1.+z))) )**(0.67 + 0.2 * z/(1.+z)) ) )\n\nfun = lambda mmm : norm.rvs( loc = mmm, scale = 0.15 )\n\n\ndef create_catalogs_out(fileList, z):\n\t\"\"\"\n\tAdds stellar mass using the Moster et al. 2013 model to the rockstar outputs. \n\t\"\"\"\n\tfor fileName in fileList:\n\t\tt0=time.time()\n\t\toutFile = fileName[:-5]+\"_Ms.fits\"\n\t\thd = fits.open(fileName)\n\t\tmean_SM = meanSM(10**hd[1].data['mvir']/0.6777, z)\n\t\t#print \"mean mgal\", mean_SM\n\t\tMgal_mvir_Mo13 = n.array([fun(el) for el in mean_SM]) # n.array(pool.starmap( fun, mean_SM ))\n\t\t#print \"res mgal\", Mgal_mvir_Mo13\n\t\t#print \"diff mgal - mvir\", n.mean(mean_SM-Mgal_mvir_Mo13) \n\t\t#print \"mean, std magl - mh\",n.mean(mean_SM-Mgal_mvir_Mo13), n.std(mean_SM-Mgal_mvir_Mo13)\n\t\tsel = (hd[1].data['mvir']>0)\n\t\t\n\t\tMgal_mvir_Mo13[sel==False] = n.zeros_like(Mgal_mvir_Mo13[sel==False])\n\t\t\n\t\tcol00 = fits.Column(name='stellar_mass_Mo13_mvir',format='D', unit='logMsun', array = Mgal_mvir_Mo13 )\n\t\tcol01 = fits.Column(name='stellar_mass_reliable', format='L', array = sel )\n\n\t\t#define the table hdu \n\t\tcolArray = []\n\t\tcolArray.append(hd[1].columns[0])\n\t\t# Mvir stellar mass\n\t\tcolArray.append(col00)\n\t\tcolArray.append(col01)\n\n\t\thdu_cols = fits.ColDefs(colArray)\n\t\ttb_hdu = fits.BinTableHDU.from_columns( hdu_cols )\n\n\t\t#define the header\n\t\tprihdr = fits.Header()\n\t\tprihdr['author'] = 'JC'\n\t\tprihdr['SAMfile'] = os.path.basename(fileName)\n\t\tprihdu = fits.PrimaryHDU(header=prihdr)\n\t\t#writes the file\n\t\tthdulist = fits.HDUList([prihdu, tb_hdu])\n\t\tif os.path.isfile(outFile):\n\t\t\tos.system(\"rm \"+outFile)\n\n\t\tthdulist.writeto(outFile)\n\t\tprint( time.time()-t0)\n\n# open the output file_type\nsumm = fits.open(os.path.join(os.environ[\"MD10\"], 'output_MD_1.0Gpc.fits'))[1].data\t\n\nfor ii in range(len(summ))[18:27]:\n\tprint( summ[ii])\n\tfileList = n.array(glob.glob(os.path.join(os.environ[\"MD10\"], 'work_agn', 'out_'+summ['snap_name'][ii]+'_SAM_Nb_?.fits')))\n\t#outFile = fileName[:-5]+\"_Ms.fits\"\n\tz = summ['redshift'][ii]\n\tprint( fileList)\n\tcreate_catalogs_out(fileList, z)\n\n\n" ]
[ [ "numpy.log10", "scipy.stats.norm.rvs", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kellielu/ReAgent
[ "c538992672220453cdc95044def25c4e0691a8b0", "c538992672220453cdc95044def25c4e0691a8b0", "c538992672220453cdc95044def25c4e0691a8b0", "c538992672220453cdc95044def25c4e0691a8b0" ]
[ "reagent/optimizer/soft_update.py", "reagent/gym/preprocessors/replay_buffer_inserters.py", "reagent/gym/policies/scorers/slate_q_scorer.py", "reagent/test/base/test_utils.py" ]
[ "#!/usr/bin/env python3\n\nimport torch\n\n\nclass SoftUpdate(torch.optim.Optimizer):\n def __init__(self, target_params, source_params, tau=0.1):\n \"\"\"\n Perform soft-update on target_params. Soft-update gradually blends\n source_params into target_params with this update equation:\n\n target_param = tau * source_param + (1 - tau) * target_param\n \"\"\"\n target_params = list(target_params)\n source_params = list(source_params)\n\n if len(target_params) != len(source_params):\n raise ValueError(\n \"target and source must have the same number of parameters\"\n )\n\n for t_param, s_param in zip(target_params, source_params):\n if t_param.shape != s_param.shape:\n raise ValueError(\n \"The shape of target parameter doesn't match that of the source\"\n )\n\n params = target_params + source_params\n defaults = dict(\n tau=tau, lr=1.0\n ) # set a dummy learning rate because optimizers are expected to have one\n super().__init__(params, defaults)\n\n for group in self.param_groups:\n tau = group[\"tau\"]\n if tau > 1.0 or tau < 0.0:\n raise ValueError(f\"tau should be in [0.0, 1.0]; got {tau}\")\n\n @classmethod\n def make_optimizer_scheduler(cls, target_params, source_params, tau):\n su = cls(target_params, source_params, tau)\n return {\"optimizer\": su}\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n params = group[\"params\"]\n n = len(params)\n tau = group[\"tau\"]\n for target_param, source_param in zip(params[: n // 2], params[n // 2 :]):\n if target_param is source_param:\n # skip soft-updating when the target network share s the parameter with\n # the network being train.\n continue\n new_param = tau * source_param.data + (1.0 - tau) * target_param.data\n target_param.data.copy_(new_param)\n return loss\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport logging\nfrom typing import Any, Callable, List, Tuple\n\nimport gym\nimport numpy as np\nfrom reagent.gym.types import Transition\nfrom reagent.replay_memory.circular_replay_buffer import ReplayBuffer\n\n\nlogger = logging.getLogger(__name__)\n\n\ntry:\n from recsim.simulator.recsim_gym import RecSimGymEnv\n\n HAS_RECSIM = True\nexcept ImportError:\n HAS_RECSIM = False\n logger.warning(f\"ReplayBuffer.create_from_env() will not recognize RecSim env\")\n\n\n# Arguments: replay_buffer, obs, action, reward, terminal, log_prob\nReplayBufferInserter = Callable[[ReplayBuffer, Transition], None]\n\n\ndef make_replay_buffer_inserter(env: gym.Env) -> ReplayBufferInserter:\n if HAS_RECSIM and isinstance(env.unwrapped, RecSimGymEnv):\n return RecSimReplayBufferInserter.create_for_env(env)\n return BasicReplayBufferInserter()\n\n\nclass BasicReplayBufferInserter:\n def __call__(self, replay_buffer: ReplayBuffer, transition: Transition):\n replay_buffer.add(**transition.asdict())\n\n\nclass RecSimReplayBufferInserter:\n def __init__(\n self,\n *,\n num_docs: int,\n num_responses: int,\n discrete_keys: List[str],\n box_keys: List[str],\n response_discrete_keys: List[Tuple[str, int]],\n response_box_keys: List[Tuple[str, Tuple[int]]],\n augmentation_discrete_keys: List[str],\n augmentation_box_keys: List[str],\n ):\n self.num_docs = num_docs\n self.num_responses = num_responses\n self.discrete_keys = discrete_keys\n self.box_keys = box_keys\n self.response_discrete_keys = response_discrete_keys\n self.response_box_keys = response_box_keys\n self.augmentation_discrete_keys = augmentation_discrete_keys\n self.augmentation_box_keys = augmentation_box_keys\n\n @classmethod\n def create_for_env(cls, env: gym.Env):\n obs_space = env.observation_space\n assert isinstance(obs_space, gym.spaces.Dict)\n user_obs_space = obs_space[\"user\"]\n if not isinstance(user_obs_space, gym.spaces.Box):\n raise NotImplementedError(\n f\"User observation space {type(user_obs_space)} is not supported\"\n )\n\n doc_obs_space = obs_space[\"doc\"]\n if not isinstance(doc_obs_space, gym.spaces.Dict):\n raise NotImplementedError(\n f\"Doc space {type(doc_obs_space)} is not supported\"\n )\n\n # Assume that all docs are in the same space\n\n discrete_keys: List[str] = []\n box_keys: List[str] = []\n\n key_0 = next(iter(doc_obs_space.spaces))\n doc_0_space = doc_obs_space[key_0]\n\n if isinstance(doc_0_space, gym.spaces.Dict):\n for k, v in doc_0_space.spaces.items():\n if isinstance(v, gym.spaces.Discrete):\n if v.n > 0:\n discrete_keys.append(k)\n elif isinstance(v, gym.spaces.Box):\n shape_dim = len(v.shape)\n if shape_dim <= 1:\n box_keys.append(k)\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError(\n f\"Doc feature {k} with the observation space of {type(v)}\"\n \" is not supported\"\n )\n elif isinstance(doc_0_space, gym.spaces.Box):\n pass\n else:\n raise NotImplementedError(f\"Unknown space {doc_0_space}\")\n\n augmentation_discrete_keys: List[str] = []\n augmentation_box_keys: List[str] = []\n augmentation = obs_space.spaces.get(\"augmentation\", None)\n if augmentation is not None:\n aug_0_space = list(augmentation.spaces.values())[0]\n for k, v in aug_0_space.spaces.items():\n if isinstance(v, gym.spaces.Discrete):\n if v.n > 0:\n augmentation_discrete_keys.append(k)\n elif isinstance(v, gym.spaces.Box):\n shape_dim = len(v.shape)\n if shape_dim <= 1:\n augmentation_box_keys.append(k)\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError(\n f\"Augmentation {k} with the observation space \"\n f\" of {type(v)} is not supported\"\n )\n\n response_space = obs_space[\"response\"][0]\n assert isinstance(response_space, gym.spaces.Dict)\n response_box_keys: List[Tuple[str, Tuple[int]]] = []\n response_discrete_keys: List[Tuple[str, int]] = []\n for k, v in response_space.spaces.items():\n if isinstance(v, gym.spaces.Discrete):\n response_discrete_keys.append((k, v.n))\n elif isinstance(v, gym.spaces.Box):\n response_box_keys.append((k, v.shape))\n else:\n raise NotImplementedError\n\n return cls(\n num_docs=len(doc_obs_space.spaces),\n num_responses=len(obs_space[\"response\"]),\n discrete_keys=discrete_keys,\n box_keys=box_keys,\n response_box_keys=response_box_keys,\n response_discrete_keys=response_discrete_keys,\n augmentation_box_keys=augmentation_box_keys,\n augmentation_discrete_keys=augmentation_discrete_keys,\n )\n\n def __call__(self, replay_buffer: ReplayBuffer, transition: Transition):\n transition_dict = transition.asdict()\n obs = transition_dict.pop(\"observation\")\n user = obs[\"user\"]\n\n kwargs = {}\n\n if self.box_keys or self.discrete_keys:\n doc_obs = obs[\"doc\"]\n for k in self.box_keys:\n kwargs[f\"doc_{k}\"] = np.stack([v[k] for v in doc_obs.values()])\n for k in self.discrete_keys:\n kwargs[f\"doc_{k}\"] = np.array([v[k] for v in doc_obs.values()])\n else:\n kwargs[\"doc\"] = np.stack(list(obs[\"doc\"].values()))\n\n # Augmentation\n\n if self.augmentation_box_keys or self.augmentation_discrete_keys:\n aug_obs = obs[\"augmentation\"]\n for k in self.augmentation_box_keys:\n kwargs[f\"augmentation_{k}\"] = np.stack([v[k] for v in aug_obs.values()])\n for k in self.augmentation_discrete_keys:\n kwargs[f\"augmentation_{k}\"] = np.array([v[k] for v in aug_obs.values()])\n\n # Responses\n\n response = obs[\"response\"]\n # We need to handle None below because the first state won't have response\n for k, d in self.response_box_keys:\n if response is not None:\n kwargs[f\"response_{k}\"] = np.stack([v[k] for v in response])\n else:\n kwargs[f\"response_{k}\"] = np.zeros(\n (self.num_responses, *d), dtype=np.float32\n )\n for k, _n in self.response_discrete_keys:\n if response is not None:\n kwargs[f\"response_{k}\"] = np.array([v[k] for v in response])\n else:\n kwargs[f\"response_{k}\"] = np.zeros(\n (self.num_responses,), dtype=np.int64\n )\n\n transition_dict.update(kwargs)\n replay_buffer.add(observation=user, **transition_dict)\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport reagent.core.types as rlt\nimport torch\nimport torch.nn.functional as F\nfrom reagent.gym.types import Scorer\nfrom reagent.models.base import ModelBase\n\n\ndef slate_q_scorer(num_candidates: int, q_network: ModelBase) -> Scorer:\n @torch.no_grad()\n def score(state: rlt.FeatureData) -> torch.Tensor:\n tiled_state = state.repeat_interleave(repeats=num_candidates, axis=0)\n candidate_docs = state.candidate_docs\n assert candidate_docs is not None\n actions = candidate_docs.as_feature_data()\n\n q_network.eval()\n scores = q_network(tiled_state, actions).view(-1, num_candidates)\n q_network.train()\n\n select_prob = F.softmax(candidate_docs.value, dim=1)\n assert select_prob.shape == scores.shape\n\n return select_prob * scores\n\n return score\n\n\ndef slate_q_serving_scorer(num_candidates: int, q_network: torch.nn.Module) -> Scorer:\n @torch.no_grad()\n def score(state: rlt.FeatureData) -> torch.Tensor:\n # pyre-fixme[28]: Unexpected keyword argument `axis`.\n tiled_state = state.float_features.repeat_interleave(\n repeats=num_candidates, axis=0\n )\n candidate_docs = state.candidate_docs\n assert candidate_docs is not None\n actions = candidate_docs.as_feature_data().float_features\n\n q_network.eval()\n action_names, q_values = q_network(\n (tiled_state, torch.ones_like(tiled_state)),\n (actions, torch.ones_like(actions)),\n )\n scores = q_values.view(-1, num_candidates)\n q_network.train()\n\n select_prob = F.softmax(candidate_docs.value, dim=1)\n assert select_prob.shape == scores.shape\n\n return select_prob * scores\n\n return score\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport unittest\n\nimport numpy as np\nimport numpy.testing as npt\nimport torch\nfrom reagent.core.torch_utils import masked_softmax, rescale_torch_tensor\n\n\nclass TestUtils(unittest.TestCase):\n def test_rescale_torch_tensor(self):\n rows, cols = 3, 5\n original_tensor = torch.randint(low=10, high=40, size=(rows, cols)).float()\n prev_max_tensor = torch.ones(1, 5) * 40.0\n prev_min_tensor = torch.ones(1, 5) * 10.0\n new_min_tensor = torch.ones(1, 5) * -1.0\n new_max_tensor = torch.ones(1, 5).float()\n\n print(\"Original tensor: \", original_tensor)\n rescaled_tensor = rescale_torch_tensor(\n original_tensor,\n new_min_tensor,\n new_max_tensor,\n prev_min_tensor,\n prev_max_tensor,\n )\n print(\"Rescaled tensor: \", rescaled_tensor)\n reconstructed_original_tensor = rescale_torch_tensor(\n rescaled_tensor,\n prev_min_tensor,\n prev_max_tensor,\n new_min_tensor,\n new_max_tensor,\n )\n print(\"Reconstructed Original tensor: \", reconstructed_original_tensor)\n\n comparison_tensor = torch.eq(original_tensor, reconstructed_original_tensor)\n self.assertTrue(torch.sum(comparison_tensor), rows * cols)\n\n def test_masked_softmax(self):\n # Postive value case\n x = torch.tensor([[15.0, 6.0, 9.0], [3.0, 2.0, 1.0]])\n temperature = 1\n mask = torch.tensor([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0]])\n out = masked_softmax(x, mask, temperature)\n expected_out = torch.tensor([[0.9975, 0.0000, 0.0025], [0, 0.7311, 0.2689]])\n npt.assert_array_almost_equal(out, expected_out, 4)\n\n # Postive value case (masked value goes to inf)\n x = torch.tensor([[150.0, 2.0]])\n temperature = 0.01\n mask = torch.tensor([[0.0, 1.0]])\n out = masked_softmax(x, mask, temperature)\n expected_out = torch.tensor([[0.0, 1.0]])\n npt.assert_array_almost_equal(out, expected_out, 4)\n\n # Negative value case\n x = torch.tensor([[-10.0, -1.0, -5.0]])\n temperature = 0.01\n mask = torch.tensor([[1.0, 1.0, 0.0]])\n out = masked_softmax(x, mask, temperature)\n expected_out = torch.tensor([[0.0, 1.0, 0.0]])\n npt.assert_array_almost_equal(out, expected_out, 4)\n\n # All values in a row are masked case\n x = torch.tensor([[-5.0, 4.0, 3.0], [2.0, 1.0, 2.0]])\n temperature = 1\n mask = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])\n out = masked_softmax(x, mask, temperature)\n expected_out = torch.tensor([[0.0, 0.0, 0.0], [0.4223, 0.1554, 0.4223]])\n npt.assert_array_almost_equal(out, expected_out, 4)\n" ]
[ [ "torch.no_grad", "torch.enable_grad" ], [ "numpy.array", "numpy.zeros", "numpy.stack" ], [ "torch.nn.functional.softmax", "torch.no_grad", "torch.ones_like" ], [ "torch.ones", "torch.randint", "torch.eq", "torch.sum", "torch.tensor", "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
snandasena/udacity-dl
[ "7ea13ec7ebd992f1199f43bd5300782436ed71e5" ]
[ "src/direction_of_the_gradient.py" ]
[ "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport pickle\n\n\n# Read in an image\nimage = mpimg.imread('../images/signs_vehicles_xygrad.png')\n\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n\n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # 2) Take the gradient in x and y separately\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0,ksize=sobel_kernel )\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1,ksize=sobel_kernel )\n # 3) Take the absolute value of the x and y gradients\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient\n direction_gradient = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n # 5) Create a binary mask where direction thresholds are met\n binary_output = np.zeros_like(direction_gradient)\n binary_output[(direction_gradient >= thresh[0]) & (direction_gradient <= thresh[1])] = 1\n # 6) Return this mask as your binary_output image\n return binary_output\n\n\n# Run the function\ndir_binary = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))\n# Plot the result\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\nf.tight_layout()\nax1.imshow(image)\nax1.set_title('Original Image', fontsize=50)\nax2.imshow(dir_binary, cmap='gray')\nax2.set_title('Thresholded Grad. Dir.', fontsize=50)\nplt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\nplt.show()" ]
[ [ "numpy.absolute", "matplotlib.pyplot.subplots", "matplotlib.image.imread", "numpy.zeros_like", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jakedolan443/search-algorithm-visualizer
[ "331c22886ef8017add16bc63a8e75df9643f4fe9" ]
[ "gui.py" ]
[ "import tkinter as tk\nimport tktools\nfrom algs.astar import astar\nfrom algs.dijkstra import dijkstra\nimport numpy as np\nimport threading\nfrom tkinter import filedialog\nimport random\nfrom grid import Grid\n\n\n\nclass Frame(tk.Frame):\n def __init__(self, *args):\n tk.Frame.__init__(self, *args)\n \n def get_root(self):\n return self.master.get_root()\n\nclass Mainframe(tk.Frame):\n def __init__(self, *args):\n tk.Frame.__init__(self, *args)\n self.get_root().mainframe = self\n \n self.images = {}\n self.images['start.png'] = tk.PhotoImage(file=\"images/start.png\")\n self.images['finish.png'] = tk.PhotoImage(file=\"images/finish.png\")\n self.images['wall.png'] = tk.PhotoImage(file=\"images/wall.png\")\n self.images['remove.png'] = tk.PhotoImage(file=\"images/remove.png\")\n \n \n gridframe = Frame(self)\n gridframe.pack(side=tk.RIGHT, anchor='n', expand=True, fill='y')\n menuframe = Frame(self)\n menuframe.pack(side=tk.LEFT, anchor='w', expand=True, fill='y')\n \n self.grid = Grid(gridframe)\n self.grid.pack(anchor='e')\n \n \n \n \n text = tk.Label(menuframe, text=\"\\n Search Algorithm \\nVisualizer\", font='System, 13')\n text.pack(side=tk.TOP)\n hyperlink = tktools.Hyperlink(menuframe, url=\"https://github.com/jakedolan443/search-algorithm-visualizer\", text=\"https://github.com/jakedolan443/\\nsearch-algorithm-visualizer\")\n hyperlink.pack()\n separation = tk.Label(menuframe, text=\"\\n\")\n separation.pack(fill='x')\n \n optionList = ('Dijkstra', 'Astar')\n self.v = tk.StringVar()\n self.v.set(optionList[0])\n self.v.trace(\"w\", self.set_alg)\n algmenu = tk.OptionMenu(menuframe, self.v, *optionList)\n algmenu.config(width=13, font='System, 8')\n algmenu.pack()\n \n status_label = tk.Label(menuframe, textvariable=self.get_root().status_var, font='System, 8')\n status_label.pack()\n \n separation = tk.Label(menuframe, text=\"\\n\")\n separation.pack(fill='x')\n \n action_menu = tk.LabelFrame(menuframe, text=\"Actions\")\n simulate_button = tk.Button(action_menu, text=\"Simulate\", font='System, 10', width=13, command=self.simulate)\n simulate_button.pack()\n reset_button = tk.Button(action_menu, text=\"Reset\", font='System, 10', width=13, command=self.grid.reset)\n reset_button.pack()\n action_menu.pack()\n separation = tk.Label(menuframe, text=\"\\n\")\n separation.pack(fill='x')\n \n togglemenu = tktools.ToggleMenu(menuframe, text=\"Tools\", command=self.set_tool)\n self.tool_menu = togglemenu\n togglemenu.pack()\n togglemenu.add_toggle(\"Start\", self.images['start.png'], \"start\")\n togglemenu.add_toggle(\"Finish\", self.images['finish.png'], \"finish\")\n togglemenu.add_toggle(\"Wall\", self.images['wall.png'], \"wall\")\n togglemenu.add_toggle(\"Remove\", self.images['remove.png'], \"remove\")\n menuframe.bind(\"<Button-1>\", lambda e: togglemenu.raise_all())\n separation = tk.Label(menuframe, text=\"\\n\")\n separation.pack(fill='x')\n \n wall_menu = tk.LabelFrame(menuframe, text=\"Grid\")\n clear_button = tk.Button(wall_menu, text=\"Clear\", font='System, 10', width=13, command=self.grid.draw)\n clear_button.pack()\n random_button = tk.Button(wall_menu, text=\"Random\", font='System, 10', width=13, command=self.grid.random_draw)\n random_button.pack()\n load_blueprint = tk.Button(wall_menu, text=\"Load Blueprint\", font='System, 10', width=13, command=self.import_grid)\n load_blueprint.pack()\n save_blueprint = tk.Button(wall_menu, text=\"Save Blueprint\", font='System, 10', width=13, command=self.export_grid)\n save_blueprint.pack()\n wall_menu.pack()\n \n def get_root(self):\n return self.master.get_root()\n \n def import_grid(self):\n filename = filedialog.askopenfilename(defaultextension=\"*.visgrid\", initialdir=\"templates/\", filetypes=[('Search Algorithm Grid File','*.visgrid')])\n f = open(\"{}\".format(filename), \"rb\")\n data = f.read().decode()\n f.close()\n \n \n metadata = data.split(\"///\\n\")[0]\n self.get_root().options['grid_size'][0], self.get_root().options['grid_size'][1] = int(metadata.split(\"x\")[0]), int(metadata.split(\"x\")[1]) \n \n grid = data.split(\"///\\n\")[1]\n grid = grid.split(\"\\n\")\n for i in range(len(grid)):\n grid[i] = grid[i].split(\",\")\n for j in range(len(grid[i])):\n grid[i][j] = int(grid[i][j])\n \n self.grid.draw(data=grid)\n \n def export_grid(self):\n metadata = \"{}x{}\".format(self.get_root().options['grid_size'][0], self.get_root().options['grid_size'][1])\n grid = self.grid.export_walls()\n grid = \"{}///\\n{}\".format(metadata, grid)\n\n f = filedialog.asksaveasfile(mode='wb', defaultextension=\"*.visgrid\", initialdir=\"templates/\", filetypes=[('Search Algorithm Grid File','*.visgrid')])\n if f is None: \n return\n f.write(grid.encode())\n f.close() \n \n def deselect_all(self):\n self.tool_menu.raise_all()\n \n def set_tool(self, tool):\n self.get_root().mode = tool\n \n def set_alg(self, *args):\n self.get_root().algorithm = self.v.get()\n \n def simulate(self):\n if not self.grid.in_search:\n self.tool_menu.raise_all()\n self.grid.reset()\n grid = self.grid.get()\n grid = np.asarray(grid)\n thread = threading.Thread(target = self.get_root().algorithms[self.get_root().algorithm], args = (self.grid, grid, self.get_root().coord_data['start'], self.get_root().coord_data['finish'], ))\n thread.start()\n \n\n\nclass Menu(tk.Menu):\n def __init__(self, *args):\n tk.Menu.__init__(self, *args)\n self.get_root().menu = self\n\n submenu = tk.Menu(self, tearoff=0)\n \n self.grid_size_var = tk.StringVar()\n submenu.add_radiobutton(label=\"8x8\", command=self._size_update, variable=self.grid_size_var)\n submenu.add_radiobutton(label=\"16x16\", command=self._size_update, variable=self.grid_size_var)\n submenu.add_radiobutton(label=\"32x32\", command=self._size_update, variable=self.grid_size_var)\n submenu.add_radiobutton(label=\"64x64\", command=self._size_update, variable=self.grid_size_var)\n submenu.add_radiobutton(label=\"128x128\", command=self._size_update, variable=self.grid_size_var)\n self.add_cascade(label=\"Grid\", menu=submenu)\n \n submenu = tk.Menu(self, tearoff=0)\n \n self.search_speed_var = tk.StringVar(); self.search_speed_var.set(\"1ms\")\n submenu.add_radiobutton(label=\"Max\", command=self._speed_update, variable=self.search_speed_var)\n submenu.add_radiobutton(label=\"1ms\", command=self._speed_update, variable=self.search_speed_var)\n submenu.add_radiobutton(label=\"10ms\", command=self._speed_update, variable=self.search_speed_var)\n submenu.add_radiobutton(label=\"100ms\", command=self._speed_update, variable=self.search_speed_var)\n\n self.add_cascade(label=\"Speed\", menu=submenu)\n \n def get_root(self):\n return self.master.get_root()\n\n def _speed_update(self):\n if self.search_speed_var.get() == \"Max\":\n self.search_speed_var.set(\"0\")\n self.get_root().options['speed'] = float(self.search_speed_var.get().split(\"ms\")[0])\n\n def _size_update(self):\n self.get_root().options['grid_size'] = (list(map(int, self.grid_size_var.get().split(\"x\"))))\n self.get_root().grid.draw()\n\n\n\n\nclass App(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.geometry(\"980x860\")\n self.title(\"Search Algorithm Visualizer\")\n self.tk_setPalette(background='#DDDDDD')\n self.resizable(False, False)\n \n self.status_var = tk.StringVar(); self.status_var.set(\"\\n\\n\")\n self.algorithm = \"Dijkstra\"\n self.algorithms = {\"Dijkstra\":dijkstra, \"Astar\":astar}\n self.options = {'grid_size':[64, 64], 'speed':0.1}\n \n menu = Menu(self)\n self.config(menu=menu)\n \n self.mode = None\n mainframe = Mainframe(self)\n mainframe.pack(fill='both', expand=True)\n \n \n \n self.mainloop()\n \n def get_root(self):\n return self\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
modichirag/fm_eor
[ "1615fea4e2d617bb6ef00770a49698901227daa8" ]
[ "codes/cosmo4d/pmeshengine.py" ]
[ "from __future__ import absolute_import\n\nimport numpy\nfrom abopt.vmad2 import ZERO, Engine, statement, programme, CodeSegment, Literal\nfrom abopt.abopt2 import VectorSpace\nfrom pmesh.pm import ParticleMesh, RealField, ComplexField\n\ndef nyquist_mask(factor, v):\n # any nyquist modes are set to 0 if the transfer function is complex\n mask = (numpy.imag(factor) == 0) | \\\n ~numpy.bitwise_and.reduce([(ii == 0) | (ii == ni // 2) for ii, ni in zip(v.i, v.Nmesh)])\n return factor * mask\n\nclass ParticleMeshVectorSpace(VectorSpace):\n def __init__(self, pm, q):\n self.qshape = q.shape\n self.pm = pm\n\n def addmul(self, a, b, c, p=1):\n if isinstance(b, RealField):\n r = b.copy()\n r[...] = a + b * c ** p\n return r\n elif isinstance(b, ComplexField):\n r = b.copy()\n if isinstance(c, ComplexField):\n c = c.plain\n if isinstance(a, ComplexField):\n a = a.plain\n r.plain[...] = a + b.plain * c ** p\n return r\n elif numpy.isscalar(b):\n return a + b * c ** p\n elif isinstance(b, numpy.ndarray):\n assert len(b) == self.qshape[0]\n return a + b * c ** p\n else:\n raise TypeError(\"type unknown\")\n\n def dot(self, a, b):\n if type(a) != type(b):\n raise TypeError(\"type mismatch\")\n\n if isinstance(a, RealField):\n return a.cdot(b)\n elif isinstance(a, ComplexField):\n return a.cdot(b)\n elif isinstance(a, numpy.ndarray):\n assert len(a) == len(b)\n assert len(a) == self.qshape[0]\n return self.pm.comm.allreduce(a.dot(b))\n else:\n raise TypeError(\"type unknown\")\n\nclass ParticleMeshEngine(Engine):\n def __init__(self, pm, q=None):\n self.pm = pm\n if q is None:\n q = pm.generate_uniform_particle_grid(shift=0.0, dtype='f4')\n self.q = q\n self.vs = ParticleMeshVectorSpace(self.pm, self.q)\n\n @programme(ain=['s'], aout=['x'])\n def get_x(engine, s, x):\n code = CodeSegment(engine)\n code.add(x1='s', x2=Literal(engine.q), y='x')\n return code\n\n @statement(aout=['real'], ain=['complex'])\n def c2r(engine, real, complex):\n real[...] = complex.c2r()\n\n @c2r.defvjp\n def _(engine, _real, _complex):\n _complex[...] = _real.c2r_vjp()\n\n @c2r.defjvp\n def _(engine, real_, complex_):\n real_[...] = complex_.c2r()\n\n @statement(aout=['complex'], ain=['real'])\n def r2c(engine, complex, real):\n complex[...] = real.r2c()\n\n @r2c.defvjp\n def _(engine, _complex, _real):\n _real[...] = _complex.r2c_vjp()\n\n @r2c.defjvp\n def _(engine, complex_, real_):\n complex_[...] = real_.r2c()\n\n @statement(aout=['complex'], ain=['complex'])\n def decompress(engine, complex):\n return\n\n @decompress.defvjp\n def _(engine, _complex):\n _complex.decompress_vjp(out=Ellipsis)\n\n @decompress.defjvp\n def _(engine, complex_):\n pass # XXX: is this correct?\n\n @staticmethod\n def _lowpass_filter(k, v, Neff):\n k0s = 2 * numpy.pi / v.BoxSize\n mask = numpy.bitwise_and.reduce([abs(ki) <= Neff//2 * k0 for ki, k0 in zip(k, k0s)])\n return v * mask\n\n @statement(aout=['real'], ain=['real'])\n def lowpass(engine, real, Neff):\n real.r2c(out=Ellipsis).apply(\n lambda k, v: engine._lowpass_filter(k, v, Neff),\n out=Ellipsis).c2r(out=Ellipsis)\n\n @lowpass.defvjp\n def _(engine, _real, Neff):\n _real.c2r_vjp().apply(\n lambda k, v: engine._lowpass_filter(k, v, Neff),\n out=Ellipsis).r2c_vjp(out=Ellipsis)\n\n @lowpass.defjvp\n def _(engine, real_, Neff):\n real_.r2c().apply(\n lambda k, v: engine._lowpass_filter(k, v, Neff),\n out=Ellipsis).c2r(out=Ellipsis)\n\n @statement(aout=['layout'], ain=['x'])\n def decompose(engine, layout, x):\n pm = engine.pm\n layout[...] = pm.decompose(x)\n\n @decompose.defvjp\n def _(engine, _layout, _x):\n _x[...] = ZERO\n\n @decompose.defjvp\n def _(engine, layout_, x_):\n layout_[...] = ZERO\n\n @statement(aout=['mesh'], ain=['x', 'layout', 'mass'])\n def paint(engine, x, mesh, layout, mass=Literal(1.0)):\n pm = engine.pm\n N = pm.comm.allreduce(len(x))\n mesh[...] = pm.paint(x, mass=mass, layout=layout, hold=False)\n # to have 1 + \\delta on the mesh\n mesh[...][...] *= 1.0 * pm.Nmesh.prod() / N\n\n @paint.defvjp\n def _(engine, _x, _mesh, x, mass, _mass, layout, _layout):\n pm = engine.pm\n _layout[...] = ZERO\n N = pm.comm.allreduce(len(x))\n _x[...], _mass[...] = pm.paint_vjp(_mesh, x, layout=layout, mass=mass)\n _x[...][...] *= 1.0 * pm.Nmesh.prod() / N\n _mass[...][...] *= 1.0 * pm.Nmesh.prod() / N\n\n @paint.defjvp\n def _(engine, x_, mesh_, x, layout, layout_, mass, mass_):\n pm = engine.pm\n if x_ is ZERO: x_ = None\n if mass_ is ZERO: mass_ = None # force cast it to a scale 0\n mesh_[...] = pm.paint_jvp(x, v_mass=mass_, mass=mass, v_pos=x_, layout=layout)\n\n @statement(aout=['mesh'], ain=['x', 'layout', 'mass'])\n def paintdirect(engine, x, mesh, layout, mass=Literal(1.0)):\n pm = engine.pm\n N = pm.comm.allreduce(len(x))\n mesh[...] = pm.paint(x, mass=mass, layout=layout, hold=False)\n\n @paintdirect.defvjp\n def _(engine, _x, _mesh, x, mass, _mass, layout, _layout):\n pm = engine.pm\n _layout[...] = ZERO\n N = pm.comm.allreduce(len(x))\n _x[...], _mass[...] = pm.paint_vjp(_mesh, x, layout=layout, mass=mass)\n\n @paintdirect.defjvp\n def _(engine, x_, mesh_, x, layout, layout_, mass, mass_):\n pm = engine.pm\n if x_ is ZERO: x_ = None\n if mass_ is ZERO: mass_ = None # force cast it to a scale 0\n mesh_[...] = pm.paint_jvp(x, v_mass=mass_, mass=mass, v_pos=x_, layout=layout)\n\n\n @statement(aout=['value'], ain=['x', 'mesh', 'layout'])\n def readout(engine, value, x, mesh, layout, resampler=None):\n pm = engine.pm\n N = pm.comm.allreduce(len(x))\n value[...] = mesh.readout(x, layout=layout, resampler=resampler)\n\n @readout.defvjp\n def _(engine, _value, _x, _mesh, x, layout, mesh, resampler):\n pm = engine.pm\n _mesh[...], _x[...] = mesh.readout_vjp(x, _value, layout=layout, resampler=resampler)\n\n @readout.defjvp\n def _(engine, value_, x_, mesh_, x, layout, mesh, layout_, resampler):\n pm = engine.pm\n if mesh_ is ZERO: mesh_ = None\n if x_ is ZERO: x_ = None\n value_[...] = mesh.readout_jvp(x, v_self=mesh_, v_pos=x_, layout=layout, resampler=resampler)\n\n @statement(aout=['complex'], ain=['complex'])\n def transfer(engine, complex, tf):\n complex.apply(lambda k, v: nyquist_mask(tf(k), v) * v, out=Ellipsis)\n \n @transfer.defvjp\n def _(engine, tf, _complex):\n _complex.apply(lambda k, v: nyquist_mask(numpy.conj(tf(k)), v) * v, out=Ellipsis)\n\n @transfer.defjvp\n def _(engine, tf, complex_):\n complex_.apply(lambda k, v: nyquist_mask(tf(k), v) * v, out=Ellipsis)\n\n @statement(aout=['residual'], ain=['model'])\n def residual(engine, model, data, sigma, residual):\n \"\"\"\n residual = (model - data) / sigma\n\n J = 1 / sigma\n \"\"\"\n residual[...] = (model - data) / sigma\n\n @residual.defvjp\n def _(engine, _model, _residual, data, sigma):\n _model[...] = _residual / sigma\n\n @residual.defjvp\n def _(engine, model_, residual_, data, sigma):\n residual_[...] = model_ / sigma\n\n# @statement(ain=['vec'], aout=['scalar'])\n# def vec1_to_scalar(engine, vec1, scalar):\n# tmp = \n#\n# @vec1_to_scalar.defvjp\n# def _(engine, _attribute, _value, dim):\n# _value[...] = _attribute[..., dim]\n#\n# @vec1_to_scalar.defjvp\n# def _(engine, attribute_, value_, dim):\n# attribute_[..., dim] = value_\n#\n @statement(ain=['attribute', 'value'], aout=['attribute'])\n def assign_component(engine, attribute, value, dim):\n attribute[..., dim] = value\n\n @assign_component.defvjp\n def _(engine, _attribute, _value, dim):\n _value[...] = _attribute[..., dim]\n\n @assign_component.defjvp\n def _(engine, attribute_, value_, dim):\n attribute_[..., dim] = value_\n\n @statement(ain=['attribute', 'value'], aout=['attribute'])\n def assign_chunk(engine, attribute, value, start, end):\n attribute[..., start:end] = value\n\n @assign_chunk.defvjp\n def _(engine, _attribute, _value, start, end):\n _value[...] = _attribute[..., start:end]\n\n @assign_chunk.defjvp\n def _(engine, attribute_, value_, start, end):\n attribute_[..., start:end] = value_\n\n @statement(ain=['x'], aout=['y'])\n def assign(engine, x, y):\n y[...] = x.copy()\n\n @assign.defvjp\n def _(engine, _y, _x):\n _x[...] = _y\n\n @assign.defjvp\n def _(engine, y_, x_, x):\n try:\n y_[...] = x.copy()\n y_[...][...] = x_\n except:\n y_[...] = x_\n\n @statement(ain=['x1', 'x2'], aout=['y'])\n def add(engine, x1, x2, y):\n y[...] = x1 + x2\n\n @add.defvjp\n def _(engine, _y, _x1, _x2):\n _x1[...] = _y\n _x2[...] = _y\n\n @add.defjvp\n def _(engine, y_, x1_, x2_):\n y_[...] = x1_ + x2_\n\n @statement(aout=['y'], ain=['x1', 'x2'])\n def multiply(engine, x1, x2, y):\n y[...] = x1 * x2\n\n @multiply.defvjp\n def _(engine, _x1, _x2, _y, x1, x2):\n _x1[...] = _y * x2\n _x2[...] = _y * x1\n\n @multiply.defjvp\n def _(engine, x1_, x2_, y_, x1, x2):\n y_[...] = x1_ * x2 + x1 * x2_\n\n\n @statement(aout=['y'], ain=['x1', 'x2'])\n def divide(engine, x1, x2, y):\n y[...] = x1 / x2\n\n @divide.defvjp\n def _(engine, _x1, _x2, _y, x1, x2):\n _x1[...] = _y / x2\n _x2[...] = _y * x1 / x2**2 *-1\n\n @divide.defjvp\n def _(engine, x1_, x2_, y_, x1, x2):\n y_[...] = x1_ / x2 - x1 / x2**2 * x2_\n\n\n @statement(aout=['y'], ain=['x'])\n def matrix_cmul(engine, x, y, W):\n y[...] = numpy.dot(x, W)\n\n @matrix_cmul.defvjp\n def _(engine, _x, _y, W):\n _x[...] = numpy.dot(_y, W.T)\n\n @matrix_cmul.defjvp\n def _(engine, x_, y_, W):\n y_[...] = numpy.dot(x_, W)\n\n\n @statement(ain=['x'], aout=['y'])\n def to_scalar(engine, x, y):\n if isinstance(x, RealField):\n y[...] = x.cnorm()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-2 norm of complex is not a good idea, because the gradient propagation is ambiguous\")\n else:\n y[...] = engine.pm.comm.allreduce((x[...] ** 2).sum(dtype='f8'))\n\n @to_scalar.defvjp\n def _(engine, _y, _x, x):\n _x[...] = x * (2 * _y)\n\n @to_scalar.defjvp\n def _(engine, y_, x_, x):\n if isinstance(x, RealField):\n y_[...] = x.cdot(x_) * 2\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-2 norm of complex is not a good idea, because the gradient propagation is ambiguous\")\n else:\n y_[...] = engine.pm.comm.allreduce((x * x_).sum(dtype='f8')) * 2\n\n\n @statement(ain=['x'], aout=['y'])\n def L1norm(engine, x, y):\n if isinstance(x, RealField):\n y[...] = abs(x).csum()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-1 norm of complex is not a good idea\")\n else:\n y[...] = engine.pm.comm.allreduce(abs(x[...]).sum(dtype='f8'))\n\n @L1norm.defvjp\n def _(engine, _y, _x, x):\n _x[...] = x.copy()\n _x[...][...] = _y * numpy.sign(x)\n #print(type(_y), type(numpy.sign(x)), type(_y * numpy.sign(x)))\n\n @L1norm.defjvp\n def _(engine, y_, x_, x):\n if isinstance(x, RealField):\n y_[...] = ((x_) * numpy.sign(x)).csum()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-1 norm of complex is not a good idea, because the gradient propagation is ambiguous\")\n else:\n y_[...] = engine.pm.comm.allreduce((numpy.sign(x) * x_).sum(dtype='f8')) \n #y_[...] = engine.pm.comm.allreduce((x_).sum(dtype='f8')) \n\n\n\n @statement(ain=['x'], aout=['y'])\n def total(engine, x, y):\n if isinstance(x, RealField):\n y[...] = x.csum()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the total of complex is not a good idea\")\n else:\n y[...] = engine.pm.comm.allreduce((x[...]).sum(dtype='f8'))\n\n @total.defvjp\n def _(engine, _y, _x, x):\n _x[...] = x.copy()\n _x[...][...] = _y \n #print(type(_y), type(numpy.sign(x)), type(_y * numpy.sign(x)))\n\n @total.defjvp\n def _(engine, y_, x_, x):\n if isinstance(x, RealField):\n y_[...] = ((x_) ).csum()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-1 norm of complex is not a good idea, because the gradient propagation is ambiguous\")\n else:\n y_[...] = engine.pm.comm.allreduce((x_).sum(dtype='f8')) \n #y_[...] = engine.pm.comm.allreduce((x_).sum(dtype='f8')) \n\n\ndef check_grad(code, yname, xname, init, eps, rtol, atol=1e-12, verbose=False, toscalar=True):\n from numpy.testing import assert_allclose\n engine = code.engine\n comm = engine.pm.comm\n if isinstance(init[xname], numpy.ndarray):\n x = init[xname]\n if x.ndim == 2:\n cshape = engine.pm.comm.allreduce(x.shape[0]), x.shape[1]\n else:\n cshape = engine.pm.comm.allreduce(x.shape[0]),\n\n def cperturb(pos, ind, eps):\n pos = pos.copy()\n start = sum(comm.allgather(pos.shape[0])[:comm.rank])\n end = sum(comm.allgather(pos.shape[0])[:comm.rank + 1])\n if ind[0] >= start and ind[0] < end:\n ind1 = tuple([ind[i] - start if i == 0 else ind[i] for i in range(len(ind))])\n old = pos[ind1]\n coord = pos[ind[0]-start].copy()\n pos[ind1] = old + eps\n new = pos[ind1]\n else:\n old, new, coord = 0, 0, 0\n diff = comm.allreduce(new - old)\n return pos\n\n def cget(pos, ind):\n if pos is ZERO: return 0\n start = sum(comm.allgather(pos.shape[0])[:comm.rank])\n end = sum(comm.allgather(pos.shape[0])[:comm.rank + 1])\n if ind[0] >= start and ind[0] < end:\n ind1 = tuple([ind[i] - start if i == 0 else ind[i] for i in range(len(ind))])\n old = pos[ind1]\n else:\n old = 0\n return comm.allreduce(old)\n\n elif isinstance(init[xname], RealField):\n cshape = init[xname].cshape\n def cget(real, index):\n if real is ZERO: return 0\n return real.cgetitem(index)\n\n def cperturb(real, index, eps):\n old = real.cgetitem(index)\n r1 = real.copy()\n r1.csetitem(index, old + eps)\n return r1\n\n code = code.copy()\n if toscalar:\n code.to_scalar(x=yname, y='y')\n else:\n code.assign(x=yname, y='y')\n\n y, tape = code.compute('y', init=init, return_tape=True)\n vjp = tape.get_vjp()\n jvp = tape.get_jvp()\n\n _x = vjp.compute('_' + xname, init={'_y' : 1.0})\n\n center = init[xname]\n init2 = init.copy()\n ng_bg = []\n fg_bg = []\n for index in numpy.ndindex(*cshape):\n x1 = cperturb(center, index, eps)\n x0 = cperturb(center, index, -eps)\n analytic = cget(_x, index)\n init2[xname] = x1\n y1 = code.compute('y', init2)\n init2[xname] = x0\n y0 = code.compute('y', init2)\n\n base = (x1 - x0)\n y_ = jvp.compute('y_', init={xname + '_': base})\n\n #logger.DEBUG(\"CHECKGRAD: %s\" % (y1, y0, y1 - y0, get_pos(code.engine, _x, index) * 2 * eps))\n if verbose:\n print(\"CHECKGRAD: \", index, (x1 - x0)[...].max(), y, y1 - y0, y_, cget(_x, index) * 2 * eps)\n\n fg_bg.append([index, y_, cget(_x, index) * 2 * eps])\n\n ng_bg.append([index, y1 - y0, cget(_x, index) * 2 * eps])\n\n fg_bg = numpy.array(fg_bg, dtype='O')\n ng_bg = numpy.array(ng_bg, dtype='O')\n\n def errorstat(stat, rtol, atol):\n g1 = numpy.array([a[1] for a in stat])\n g2 = numpy.array([a[2] for a in stat])\n\n ag1 = abs(g1) + (abs(g1) == 0) * numpy.std(g1)\n ag2 = abs(g2) + (abs(g2) == 0) * numpy.std(g2)\n sig = (g1 - g2) / ((ag1 + ag2) * rtol + atol)\n bins = [-100, -50, -20, -1, 1, 20, 50, 100]\n d = numpy.digitize(sig, bins)\n return d\n\n d1 = errorstat(fg_bg, rtol, atol)\n\n d2 = errorstat(ng_bg, rtol * 10, atol)\n\n \n\n\n if (d1 != 4).any():\n print('ngbg = ', ng_bg)\n print('fgbg = ', fg_bg)\n #print('ngbg = ' ng_bg)\n raise AssertionError(\"FG_BG Bad gradients: %s \" % numpy.bincount(d1))\n\n\n if (d2 != 4).any():\n raise AssertionError(\"NG_BG Bad gradients: %s \" % numpy.bincount(d2))\n\n" ]
[ [ "numpy.dot", "numpy.imag", "numpy.sign", "numpy.std", "numpy.bincount", "numpy.isscalar", "numpy.ndindex", "numpy.digitize", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thisisiron/nmt-attention-tf2
[ "ddcd3b4ed2a20d5a7a1eeac3292abb5e39a95bde" ]
[ "run.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport json\nimport datetime as dt\nimport time\n\nfrom argparse import ArgumentParser, Namespace\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf # TF 2.0\n\nfrom utils import load_dataset, load_vocab, convert_vocab, select_optimizer, loss_function\nfrom model import Encoder, Decoder, AttentionLayer\n\n\ndef test(args: Namespace):\n cfg = json.load(open(args.config_path, 'r', encoding='UTF-8'))\n\n batch_size = 1 # for predicting one sentence.\n\n encoder = Encoder(cfg['vocab_input_size'], cfg['embedding_dim'], cfg['units'], batch_size, 0)\n decoder = Decoder(cfg['vocab_target_size'], cfg['embedding_dim'], cfg['units'], cfg['method'], batch_size, 0)\n optimizer = select_optimizer(cfg['optimizer'], cfg['learning_rate'])\n\n ckpt = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder)\n manager = tf.train.CheckpointManager(ckpt, cfg['checkpoint_dir'], max_to_keep=3)\n ckpt.restore(manager.latest_checkpoint)\n\n while True:\n sentence = input('Input Sentence or If you want to quit, type Enter Key : ')\n\n if sentence == '':\n break\n\n sentence = re.sub(r\"(\\.\\.\\.|[?.!,¿])\", r\" \\1 \", sentence)\n sentence = re.sub(r'[\" \"]+', \" \", sentence)\n\n sentence = '<s> ' + sentence.lower().strip() + ' </s>'\n\n input_vocab = load_vocab('./data/', 'en')\n target_vocab = load_vocab('./data/', 'de')\n\n input_lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='', oov_token='<unk>')\n input_lang_tokenizer.word_index = input_vocab\n\n target_lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='', oov_token='<unk>')\n target_lang_tokenizer.word_index = target_vocab\n\n convert_vocab(input_lang_tokenizer, input_vocab)\n convert_vocab(target_lang_tokenizer, target_vocab)\n\n inputs = [input_lang_tokenizer.word_index[i] if i in input_lang_tokenizer.word_index else input_lang_tokenizer.word_index['<unk>'] for i in sentence.split(' ')]\n inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],\n maxlen=cfg['max_len_input'],\n padding='post')\n\n inputs = tf.convert_to_tensor(inputs)\n\n result = ''\n\n enc_hidden = encoder.initialize_hidden_state()\n enc_cell = encoder.initialize_cell_state()\n enc_state = [[enc_hidden, enc_cell], [enc_hidden, enc_cell], [enc_hidden, enc_cell], [enc_hidden, enc_cell]]\n\n enc_output, enc_hidden = encoder(inputs, enc_state)\n\n dec_hidden = enc_hidden\n #dec_input = tf.expand_dims([target_lang_tokenizer.word_index['<eos>']], 0)\n dec_input = tf.expand_dims([target_lang_tokenizer.word_index['<s>']], 1)\n\n print('dec_input:', dec_input)\n\n h_t = tf.zeros((batch_size, 1, cfg['embedding_dim']))\n\n for t in range(int(cfg['max_len_target'])):\n predictions, dec_hidden, h_t = decoder(dec_input,\n dec_hidden,\n enc_output,\n h_t)\n\n # predeictions shape == (1, 50002)\n\n predicted_id = tf.argmax(predictions[0]).numpy()\n print('predicted_id', predicted_id)\n\n result += target_lang_tokenizer.index_word[predicted_id] + ' '\n\n if target_lang_tokenizer.index_word[predicted_id] == '</s>':\n print('Early stopping')\n break\n\n dec_input = tf.expand_dims([predicted_id], 1)\n print('dec_input:', dec_input)\n\n print('<s> ' + result)\n print(sentence)\n sys.stdout.flush()\n\n\ndef train(args: Namespace):\n input_tensor, target_tensor, input_lang_tokenizer, target_lang_tokenizer = load_dataset('./data/', args.max_len, limit_size=None)\n\n max_len_input = len(input_tensor[0])\n max_len_target = len(target_tensor[0])\n\n print('max len of each seq:', max_len_input, ',', max_len_target)\n\n input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=args.dev_split)\n\n # init hyperparameter\n EPOCHS = args.epoch\n batch_size = args.batch_size\n steps_per_epoch = len(input_tensor_train) // batch_size\n embedding_dim = args.embedding_dim\n units = args.units\n vocab_input_size = len(input_lang_tokenizer.word_index) + 1\n vocab_target_size = len(target_lang_tokenizer.word_index) + 1\n BUFFER_SIZE = len(input_tensor_train)\n learning_rate = args.learning_rate\n\n setattr(args, 'max_len_input', max_len_input)\n setattr(args, 'max_len_target', max_len_target)\n\n setattr(args, 'steps_per_epoch', steps_per_epoch)\n setattr(args, 'vocab_input_size', vocab_input_size)\n setattr(args, 'vocab_target_size', vocab_target_size)\n setattr(args, 'BUFFER_SIZE', BUFFER_SIZE)\n\n dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)\n dataset = dataset.batch(batch_size)\n\n print('dataset shape (batch_size, max_len):', dataset)\n \n encoder = Encoder(vocab_input_size, embedding_dim, units, batch_size, args.dropout)\n decoder = Decoder(vocab_target_size, embedding_dim, units, args.method, batch_size, args.dropout)\n\n optimizer = select_optimizer(args.optimizer, learning_rate)\n\n loss_object = tf.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')\n\n @tf.function\n def train_step(_input, _target, enc_state):\n loss = 0\n\n with tf.GradientTape() as tape:\n enc_output, enc_state = encoder(_input, enc_state)\n\n dec_hidden = enc_state\n\n dec_input = tf.expand_dims([target_lang_tokenizer.word_index['<s>']] * batch_size, 1)\n\n # First input feeding definition\n h_t = tf.zeros((batch_size, 1, embedding_dim))\n\n for idx in range(1, _target.shape[1]):\n # idx means target character index.\n predictions, dec_hidden, h_t = decoder(dec_input,\n dec_hidden,\n enc_output,\n h_t)\n\n # tf.print(tf.argmax(predictions, axis=1))\n\n loss += loss_function(loss_object, _target[:, idx], predictions)\n\n dec_input = tf.expand_dims(_target[:, idx], 1)\n\n batch_loss = (loss / int(_target.shape[1]))\n\n variables = encoder.trainable_variables + decoder.trainable_variables\n\n gradients = tape.gradient(loss, variables)\n\n optimizer.apply_gradients(zip(gradients, variables))\n\n return batch_loss\n\n # Setting checkpoint\n now_time = dt.datetime.now().strftime(\"%m%d%H%M\")\n checkpoint_dir = './training_checkpoints/' + now_time\n setattr(args, 'checkpoint_dir', checkpoint_dir) \n checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\n checkpoint = tf.train.Checkpoint(optimizer=optimizer,\n encoder=encoder,\n decoder=decoder)\n \n os.makedirs(checkpoint_dir, exist_ok=True)\n\n # saving information of the model\n with open('{}/config.json'.format(checkpoint_dir), 'w', encoding='UTF-8') as fout:\n json.dump(vars(args), fout, indent=2, sort_keys=True)\n \n min_total_loss = 1000\n\n for epoch in range(EPOCHS):\n start = time.time()\n\n enc_hidden = encoder.initialize_hidden_state()\n enc_cell = encoder.initialize_cell_state()\n enc_state = [[enc_hidden, enc_cell], [enc_hidden, enc_cell], [enc_hidden, enc_cell], [enc_hidden, enc_cell]]\n\n total_loss = 0\n\n for(batch, (_input, _target)) in enumerate(dataset.take(steps_per_epoch)):\n batch_loss = train_step(_input, _target, enc_state)\n total_loss += batch_loss\n\n if batch % 10 == 0:\n print('Epoch {}/{} Batch {}/{} Loss {:.4f}'.format(epoch + 1,\n EPOCHS,\n batch + 10,\n steps_per_epoch,\n batch_loss.numpy()))\n\n print('Epoch {}/{} Total Loss per epoch {:.4f} - {} sec'.format(epoch + 1,\n EPOCHS,\n total_loss / steps_per_epoch,\n time.time() - start))\n\n # saving checkpoint\n if min_total_loss > total_loss / steps_per_epoch:\n print('Saving checkpoint...')\n min_total_loss = total_loss / steps_per_epoch\n checkpoint.save(file_prefix=checkpoint_prefix)\n\n print('\\n')\n\n\ndef main():\n pass\n\n\nif __name__=='__main__':\n main()\n" ]
[ [ "tensorflow.keras.preprocessing.text.Tokenizer", "tensorflow.train.CheckpointManager", "tensorflow.convert_to_tensor", "tensorflow.zeros", "tensorflow.losses.SparseCategoricalCrossentropy", "tensorflow.train.Checkpoint", "tensorflow.data.Dataset.from_tensor_slices", "sklearn.model_selection.train_test_split", "tensorflow.expand_dims", "tensorflow.argmax", "tensorflow.keras.preprocessing.sequence.pad_sequences", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
DaceT/dask-sql
[ "c545f2bf9a786b0e9ff7f68c90da4dcc39cdcd73" ]
[ "dask_sql/physical/rel/logical/sample.py" ]
[ "import logging\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom dask_sql.datacontainer import DataContainer\nfrom dask_sql.physical.rel.base import BaseRelPlugin\n\nif TYPE_CHECKING:\n import dask_sql\n from dask_sql.java import org\n\nlogger = logging.getLogger(__name__)\n\n\nclass SamplePlugin(BaseRelPlugin):\n \"\"\"\n Sample is used on TABLESAMPLE clauses.\n It returns only a fraction of the table, given by the\n number in the arguments.\n There exist two algorithms, SYSTEM or BERNOULLI.\n\n SYSTEM is a very fast algorithm, which works on partition\n level: a partition is kept with a probability given by the\n percentage. This algorithm will - especially for very small\n numbers of partitions - give wrong results. Only choose\n it when you really have too much data to apply BERNOULLI\n (which might never be the case in real world applications).\n\n BERNOULLI samples each row separately and will still\n give only an approximate fraction, but much closer to\n the expected.\n \"\"\"\n\n class_name = \"org.apache.calcite.rel.core.Sample\"\n\n def convert(\n self, rel: \"org.apache.calcite.rel.RelNode\", context: \"dask_sql.Context\"\n ) -> DataContainer:\n (dc,) = self.assert_inputs(rel, 1, context)\n df = dc.df\n cc = dc.column_container\n\n parameters = rel.getSamplingParameters()\n is_bernoulli = parameters.isBernoulli()\n fraction = float(parameters.getSamplingPercentage())\n seed = parameters.getRepeatableSeed() if parameters.isRepeatable() else None\n\n if is_bernoulli:\n df = df.sample(frac=fraction, replace=False, random_state=seed)\n else:\n random_state = np.random.RandomState(seed)\n random_choice = random_state.choice(\n [True, False],\n size=df.npartitions,\n replace=True,\n p=[fraction, 1 - fraction],\n )\n\n if random_choice.any():\n df = df.partitions[random_choice]\n else:\n df = df.head(0, compute=False)\n\n return DataContainer(df, cc)\n" ]
[ [ "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ibaiGorordo/depthai-experiments
[ "cde67e277120ddac815cbad6360695759cca900f", "cde67e277120ddac815cbad6360695759cca900f", "cde67e277120ddac815cbad6360695759cca900f", "cde67e277120ddac815cbad6360695759cca900f" ]
[ "gen2-efficientnet-classification/main.py", "gen2-nn-sync/main.py", "gen2-display-detections/3-stretch_img.py", "gen2-rgbd-projection/projector_3d.py" ]
[ "#!/usr/bin/env python3\nimport argparse\nfrom pathlib import Path\nimport sys\n\nimport blobconverter\nimport cv2\nimport depthai as dai\nimport numpy as np\nfrom classes import class_names\n\n# Get Argument First\nparser = argparse.ArgumentParser()\nparser.add_argument('-nd', '--no-debug', action=\"store_true\", help=\"Prevent debug output\")\nparser.add_argument('-cam', '--camera', action=\"store_true\", help=\"Use DepthAI 4K RGB camera for inference (conflicts with -vid)\")\nparser.add_argument('-vid', '--video', type=str, help=\"Path to video file to be used for inference (conflicts with -cam)\")\nargs = parser.parse_args()\n\n\n\n# NOTE: video must be of size 224 x 224. We will resize this on the\n# host, but you could also use ImageManip node to do it on device\n\n# Link video in with the detection network\n \nif not args.camera and not args.video:\n raise RuntimeError(\"No source selected. Please use either \\\"-cam\\\" to use RGB camera as a source or \\\"-vid <path>\\\" to run on video\")\n\ndebug = not args.no_debug\ncamera = not args.video\nlabels = class_names()\n\n\n# Start defining a pipeline\npipeline = dai.Pipeline()\n\n# NeuralNetwork\nprint(\"Creating Neural Network...\")\ndetection_nn = pipeline.create(dai.node.NeuralNetwork)\ndetection_nn.setBlobPath(blobconverter.from_zoo(name=\"efficientnet-b0\"))\n\nif camera:\n print(\"Creating Color Camera...\")\n cam_rgb = pipeline.create(dai.node.ColorCamera)\n cam_rgb.setPreviewSize(224,224)\n cam_rgb.setInterleaved(False)\n cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)\n cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)\n cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)\n\n cam_xout = pipeline.create(dai.node.XLinkOut)\n cam_xout.setStreamName(\"rgb\")\n cam_rgb.preview.link(cam_xout.input)\n cam_rgb.preview.link(detection_nn.input)\nelse:\n face_in = pipeline.create(dai.node.XLinkIn)\n face_in.setStreamName(\"in_nn\")\n face_in.out.link(detection_nn.input)\n\n# Create outputs\nxout_nn = pipeline.create(dai.node.XLinkOut)\nxout_nn.setStreamName(\"nn\")\ndetection_nn.out.link(xout_nn.input)\n\nframe = None\nbboxes = []\n\n\ndef to_tensor_result(packet):\n return {\n tensor.name: np.array(packet.getLayerFp16(tensor.name)).reshape(tensor.dims)\n for tensor in packet.getRaw().tensors\n }\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\n\ndef to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray:\n resized = cv2.resize(arr, shape)\n return resized.transpose(2, 0, 1)\n\n\n# Pipeline defined, now the device is assigned and pipeline is started\nwith dai.Device(pipeline) as device:\n\n # Output queues will be used to get the rgb frames and nn data from the outputs defined above\n if camera:\n q_rgb = device.getOutputQueue(name=\"rgb\", maxSize=1, blocking=False)\n else:\n cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute()))\n \n\n detection_in = device.getInputQueue(\"in_nn\")\n q_nn = device.getOutputQueue(name=\"nn\", maxSize=1, blocking=False)\n\n\n def should_run():\n return cap.isOpened() if args.video else True\n\n\n def get_frame():\n if camera:\n in_rgb = q_rgb.get()\n new_frame = np.array(in_rgb.getData()).reshape((3, in_rgb.getHeight(), in_rgb.getWidth())).transpose(1, 2, 0).astype(np.uint8)\n new_frame = cv2.cvtColor(new_frame, cv2.COLOR_BGR2RGB)\n return True, np.ascontiguousarray(new_frame)\n else:\n return cap.read()\n\n\n \n result = None\n\n while should_run():\n read_correctly, frame = get_frame()\n\n if not read_correctly:\n break\n\n if not camera:\n nn_data = dai.NNData()\n nn_data.setLayer(\"input\", to_planar(frame, (224, 224)))\n detection_in.send(nn_data)\n\n in_nn = q_nn.tryGet()\n\n if in_nn is not None:\n data = softmax(in_nn.getFirstLayerFp16())\n result_conf = np.max(data)\n if result_conf > 0.2:\n result = {\n \"name\": labels[np.argmax(data)],\n \"conf\": round(100 * result_conf, 2)\n }\n else:\n result = None\n\n if debug:\n frame_main = frame.copy()\n if result is not None:\n cv2.putText(frame_main, \"{}\".format(result[\"name\"]), (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0))\n cv2.putText(frame_main, \"Confidence: {}%\".format(result[\"conf\"]), (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0))\n \n cv2.imshow(\"rgb\", cv2.resize(frame_main, (400, 400)))\n \n if cv2.waitKey(1) == ord('q'):\n break\n elif result is not None:\n print(\"{} ({}%)\".format(result[\"name\"], result[\"conf\"]))\n", "#!/usr/bin/env python3\nimport queue\nimport blobconverter\nimport cv2\nimport depthai as dai\nimport numpy as np\n\n\ndef to_planar(arr: np.ndarray, shape: tuple) -> list:\n return [val for channel in cv2.resize(arr, shape).transpose(2, 0, 1) for y_col in channel for val in y_col]\n\n\n# Start defining a pipeline\npipeline = dai.Pipeline()\n\n# Define a source - color camera\ncam_rgb = pipeline.create(dai.node.ColorCamera)\ncam_rgb.setPreviewSize(300, 300)\ncam_rgb.setInterleaved(False)\n\n# Define a neural network that will make predictions based on the source frames\ndetection_nn = pipeline.create(dai.node.NeuralNetwork)\ndetection_nn.setBlobPath(blobconverter.from_zoo(name=\"face-detection-retail-0004\", shaves=6))\ncam_rgb.preview.link(detection_nn.input)\n\nlandmarks_nn = pipeline.create(dai.node.NeuralNetwork)\nlandmarks_nn.setBlobPath(blobconverter.from_zoo(name=\"landmarks-regression-retail-0009\", shaves=6))\n\n# Create outputs\nxin_rgb = pipeline.create(dai.node.XLinkIn)\nxin_rgb.setStreamName(\"land_in\")\nxin_rgb.out.link(landmarks_nn.input)\n\n# Create outputs\nxout_frame = pipeline.create(dai.node.XLinkOut)\nxout_frame.setStreamName(\"det_frame\")\ncam_rgb.preview.link(xout_frame.input)\n\nxout_det = pipeline.create(dai.node.XLinkOut)\nxout_det.setStreamName(\"det_nn\")\ndetection_nn.out.link(xout_det.input)\n\nxout_land = pipeline.create(dai.node.XLinkOut)\nxout_land.setStreamName(\"land_nn\")\nlandmarks_nn.out.link(xout_land.input)\n\n# Pipeline defined, now the device is assigned and pipeline is started\nwith dai.Device(pipeline) as device:\n # Output queues will be used to get the rgb frames and nn data from the outputs defined above\n q_frame = device.getOutputQueue(name=\"det_frame\", maxSize=4, blocking=False)\n q_det = device.getOutputQueue(name=\"det_nn\", maxSize=4, blocking=False)\n land_in = device.getInputQueue(name=\"land_in\", maxSize=4, blocking=False)\n q_land = device.getOutputQueue(name=\"land_nn\", maxSize=4, blocking=False)\n\n face_q = queue.Queue()\n\n # nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height\n def frame_norm(frame, bbox):\n return (np.clip(np.array(bbox), 0, 1) * np.array(frame.shape[:2] * (len(bbox) // 2))[::-1]).astype(int)\n\n\n while True:\n while q_det.has():\n in_frame = q_frame.get()\n shape = (3, in_frame.getHeight(), in_frame.getWidth())\n det_frame = in_frame.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)\n det_frame = np.ascontiguousarray(det_frame)\n # one detection has 7 numbers, and the last detection is followed by -1 digit, which later is filled with 0\n bboxes = np.array(q_det.get().getFirstLayerFp16())\n # take only the results before -1 digit\n bboxes = bboxes[:np.where(bboxes == -1)[0][0]]\n # transform the 1D array into Nx7 matrix\n bboxes = bboxes.reshape((bboxes.size // 7, 7))\n # filter out the results which confidence less than a defined threshold\n bboxes = bboxes[bboxes[:, 2] > 0.5][:, 3:7]\n for raw_bbox in bboxes:\n bbox = frame_norm(det_frame, raw_bbox)\n cv2.rectangle(det_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)\n\n face_frame = det_frame[\n bbox[1]:bbox[3],\n bbox[0]:bbox[2]\n ]\n\n nn_data = dai.NNData()\n nn_data.setLayer(\"0\", to_planar(face_frame, (48, 48)))\n land_in.send(nn_data)\n face_q.put(face_frame)\n cv2.imshow(\"rgb\", det_frame)\n\n while q_land.has():\n face_frame = face_q.get()\n out = frame_norm(face_frame, q_land.get().getFirstLayerFp16())\n cv2.circle(face_frame, tuple(out[:2]), 1, (255, 255, 0)) # Right eye\n cv2.circle(face_frame, tuple(out[2:4]), 1, (255, 255, 0)) # Left eye\n cv2.circle(face_frame, tuple(out[4:6]), 1, (255, 255, 0)) # Nose\n cv2.circle(face_frame, tuple(out[6:8]), 1, (255, 255, 0)) # Right mouth\n cv2.circle(face_frame, tuple(out[8:]), 1, (255, 255, 0)) # Left mouth\n cv2.imshow(\"face\", face_frame)\n\n if cv2.waitKey(1) == ord('q'):\n break\n", "#!/usr/bin/env python3\n\nimport cv2\nimport depthai as dai\nimport numpy as np\nimport blobconverter\nfrom utility import *\n\n# MobilenetSSD label texts\nlabelMap = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\",\n \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n\n# Create pipeline\npipeline = dai.Pipeline()\n\ncamRgb = pipeline.create(dai.node.ColorCamera)\ncamRgb.setPreviewSize(300, 300)\ncamRgb.setInterleaved(False)\ncamRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)\ncamRgb.setIspScale(1, 3) # You don't need to downscale (4k -> 720P) video frames\n# Squeeze the frame\ncamRgb.setPreviewKeepAspectRatio(False)\n\nxoutFrames = pipeline.create(dai.node.XLinkOut)\nxoutFrames.setStreamName(\"frames\")\ncamRgb.video.link(xoutFrames.input)\n\n# Define a neural network that will make predictions based on the source frames\nnn = pipeline.create(dai.node.MobileNetDetectionNetwork)\nnn.setConfidenceThreshold(0.5)\nnn.setBlobPath(blobconverter.from_zoo(name=\"mobilenet-ssd\", shaves=6))\ncamRgb.preview.link(nn.input)\n\npassthroughOut = pipeline.create(dai.node.XLinkOut)\npassthroughOut.setStreamName(\"pass\")\nnn.passthrough.link(passthroughOut.input)\n\nnnOut = pipeline.create(dai.node.XLinkOut)\nnnOut.setStreamName(\"nn\")\nnn.out.link(nnOut.input)\n\n# Connect to device and start pipeline\nwith dai.Device(pipeline) as device:\n qFrames = device.getOutputQueue(name=\"frames\")\n qPass = device.getOutputQueue(name=\"pass\")\n qDet = device.getOutputQueue(name=\"nn\")\n\n detections = []\n fps = FPSHandler()\n text = TextHelper()\n\n # nn data (bounding box locations) are in <0..1> range - they need to be normalized with frame width/height\n def frameNorm(frame, bbox):\n normVals = np.full(len(bbox), frame.shape[0])\n normVals[::2] = frame.shape[1]\n return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)\n\n def displayFrame(name, frame):\n for detection in detections:\n bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))\n text.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20))\n text.putText(frame, f\"{int(detection.confidence * 100)}%\", (bbox[0] + 10, bbox[1] + 40))\n text.rectangle(frame, bbox)\n # Show the frame\n cv2.imshow(name, frame)\n\n while True:\n frame = qFrames.get().getCvFrame()\n\n inDet = qDet.tryGet()\n if inDet is not None:\n detections = inDet.detections\n fps.next_iter()\n\n inPass = qPass.tryGet()\n if inPass is not None:\n displayFrame('Passthrough', inPass.getCvFrame())\n\n # If the frame is available, draw bounding boxes on it and show the frame\n text.putText(frame, \"NN fps: {:.2f}\".format(fps.fps()), (2, frame.shape[0] - 4))\n displayFrame(\"Frame\", frame)\n\n if cv2.waitKey(1) == ord('q'):\n break\n", "#!/usr/bin/env python3\n\n# Code copied from main depthai repo, depthai_helpers/projector_3d.py\n\nimport numpy as np\nimport open3d as o3d\n\nclass PointCloudVisualizer():\n def __init__(self, intrinsic_matrix, width, height):\n self.pcl = None\n # transform from camera to world orientation (Note the absolute position won't be correct)\n self.R_camera_to_world = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]]).astype(np.float64)\n \n self.pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(width,\n height,\n intrinsic_matrix[0][0],\n intrinsic_matrix[1][1],\n intrinsic_matrix[0][2],\n intrinsic_matrix[1][2])\n self.vis = o3d.visualization.Visualizer()\n self.vis.create_window()\n self.isstarted = False\n\n def depth_to_projection(self, depth_map, stride=1, downsample=False):\n depth_o3d = o3d.geometry.Image(depth_map)\n if self.pcl is None:\n self.pcl = o3d.geometry.PointCloud.create_from_depth_image(depth_o3d, self.pinhole_camera_intrinsic, stride=stride)\n else:\n pcd = o3d.geometry.PointCloud.create_from_depth_image(depth_o3d, self.pinhole_camera_intrinsic, stride=stride)\n if downsample:\n pcd = pcd.voxel_down_sample(voxel_size=0.01)\n # Remove noise\n pcd = pcd.remove_statistical_outlier(30, 0.1)[0]\n self.pcl.points = pcd.points\n # Rotate the pointcloud such that it is in the world coordinate frame (easier to visualize)\n self.pcl.rotate(self.R_camera_to_world, center=np.array([0,0,0],dtype=np.float64))\n return self.pcl\n\n def rgbd_to_projection(self, depth_map, rgb, downsample=False):\n rgb_o3d = o3d.geometry.Image(rgb)\n depth_o3d = o3d.geometry.Image(depth_map)\n rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(rgb_o3d, depth_o3d, convert_rgb_to_intensity=False, depth_trunc=6)\n if self.pcl is None:\n self.pcl = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, self.pinhole_camera_intrinsic)\n else:\n pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, self.pinhole_camera_intrinsic)\n if downsample:\n pcd = pcd.voxel_down_sample(voxel_size=0.01)\n # Remove noise\n pcd = pcd.remove_statistical_outlier(30, 0.1)[0]\n\n self.pcl.points = pcd.points\n self.pcl.colors = pcd.colors\n\n # Rotate the pointcloud such that it is in the world coordinate frame (easier to visualize)\n self.pcl.rotate(self.R_camera_to_world, center=np.array([0,0,0],dtype=np.float64))\n return self.pcl\n\n def visualize_pcd(self):\n if not self.isstarted:\n self.vis.add_geometry(self.pcl)\n origin = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1, origin=[0, 0, 0])\n self.vis.add_geometry(origin)\n self.isstarted = True\n else:\n self.vis.update_geometry(self.pcl)\n self.vis.poll_events()\n self.vis.update_renderer()\n\n def close_window(self):\n self.vis.destroy_window()\n" ]
[ [ "numpy.ascontiguousarray", "numpy.max", "numpy.argmax" ], [ "numpy.ascontiguousarray", "numpy.array", "numpy.where" ], [ "numpy.array" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lamorton/FIDASIM
[ "d4f68c17d4fcb08107768346d47bee7a4bf0586c" ]
[ "lib/python/fidasim/utils.py" ]
[ "#!/bin/sh\n\"exec\" \"$FIDASIM_DIR/deps/python\" \"$0\" \"$@\"\n# -*- coding: utf-8 -*-\n\n#+#FIDASIM Utilities\n#+This file contains useful FIDASIM utilities\n#+***\nfrom __future__ import print_function\nimport os\nfrom os.path import dirname\nimport subprocess\nimport platform\nimport numpy as np\nimport copy\nimport h5py\nimport efit\nfrom scipy.io import netcdf\nfrom scipy.interpolate import interp1d, interp2d, NearestNDInterpolator\nfrom scipy.spatial import Delaunay\nimport matplotlib.pyplot as plt\n\ndef get_fidasim_dir():\n \"\"\"\n #+#get_fidasim_dir\n #+ Gets FIDASIM install directory\n #+***\n #+##Output Arguments\n #+ **directory**: FIDASIM install directory.\n #+##Example Usage\n #+```python\n #+>>> fida_dir = get_fidasim_dir()\n #+```\n \"\"\"\n\n directory = dirname(dirname(dirname(dirname(os.path.abspath(__file__)))))\n\n return directory\n\ndef get_version(fidasim_dir):\n \"\"\"\n #+#get_version\n #+ Gets FIDASIM version number from git.\n #+ Falls back to reading VERSION file when git is not available\n #+***\n #+##Input Arguments\n #+ **fidasim_dir**: FIDASIM install directory\n #+\n #+##Output Arguments\n #+ **version**: FIDAIM version number.\n #+\n #+##Example Usage\n #+```python\n #+>>> version = get_version(get_fidasim_dir())\n #+```\n \"\"\"\n version = ''\n alt = False\n\n if platform.system() == 'Windows':\n alt = True\n else:\n # Location of .git folder\n git_dir = r'{}{}.git'.format(fidasim_dir, os.path.sep)\n\n # git is installed if git_file is a file\n proc = subprocess.Popen('command -v git', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n git_file = proc.communicate()[0].decode('utf-8')\n git_file = git_file.replace('\\n', '')\n\n # Check that .git folder is present and git is installed\n if os.path.isfile(git_file) and os.path.isdir(git_dir):\n try:\n version = subprocess.check_output(['git', '--git-dir={}'.format(git_dir), 'describe', '--tags', '--always', '--dirty'])\n version = version.replace('\\n', '')\n except:\n alt = True\n else:\n alt = True\n\n # If above didn't work, read version file\n if alt:\n # Git 'version' filepath\n ver_file = '{}{}VERSION'.format(fidasim_dir, os.path.sep)\n\n if os.path.isfile(ver_file):\n with open(ver_file) as f:\n version = f.read()\n\n return version\n\ndef aabb_intersect(rc, dr, r0, d0):\n \"\"\"\n #+#aabb_intersect\n #+Calculates intersection length of a ray and an axis aligned bounding box (AABB)\n #+***\n #+##Input Arguments\n #+ **rc**: Center of AABB\n #+\n #+ **dr**: [length, width, height] of AABB\n #+\n #+ **r0**: starting point of ray\n #+\n #+ **d0**: direction of ray\n #+\n #+##Output Arguments\n #+ **intersect**: Intersection length of ray and AABB\n #+\n #+ **ri**: Optional, ray enterence point\n #+\n #+ **rf**: Optional, ray exit point\n #+\n #+##Example Usage\n #+```python\n #+>>> intersect, r_enter, r_exit = aabb_intersect([0,0,0], [1,1,1], [-1,0,0], [1,0,0])\n #+>>> print(intersect)\n #+ 1.0\n #+>>> print(r_enter)\n #+ -0.5 0.0 0.0\n #+>>> print(r_exit)\n #+ 0.5 0.0 0.0\n #+```\n \"\"\"\n v0 = d0 / np.sqrt(np.sum(d0 ** 2.))\n\n # There are 6 sides to a cube/grid\n side_inter = np.zeros(6)\n\n # Intersection points of ray with planes defined by grid\n ipnts = np.zeros((3, 6))\n\n # Find whether ray intersects each side\n for i in range(6):\n j = int(np.floor(i / 2))\n ind = np.arange(3, dtype=int)\n ind = ind[ind != j]\n if np.abs(v0[j]) > 0.: # just v0[j] != 0 right?\n # Intersection point with plane\n ipnts[:, i] = r0 + v0 * (((rc[j] + (np.mod(i, 2) - 0.5) * dr[j]) - r0[j]) / v0[j])\n\n # Check if point on plane is within grid side\n if (np.abs(ipnts[ind[0], i] - rc[ind[0]]) <= 0.5 * dr[ind[0]]) and \\\n (np.abs(ipnts[ind[1], i] - rc[ind[1]]) <= 0.5 * dr[ind[1]]):\n side_inter[i] = 1\n\n intersect = 0.0\n r_enter = copy.deepcopy(r0)\n r_exit = copy.deepcopy(r0)\n ind = np.arange(side_inter.size)\n ind = ind[side_inter != 0]\n nw = side_inter[ind].size\n if nw >= 2:\n #Find two unique intersection points\n nunique = 0\n for i in range(nw - 1):\n if np.sum(ipnts[:, ind[0]] == ipnts[:, ind[i + 1]]) != 3:\n ind = [ind[0], ind[i + 1]]\n nunique = 2\n break\n\n if nunique == 2:\n vi = ipnts[:, ind[1]] - ipnts[:, ind[0]]\n vi = vi / np.sqrt(np.sum(vi ** 2.))\n dot_prod = np.sum(v0 * vi)\n if dot_prod > 0.0:\n r_enter = ipnts[:, ind[0]]\n r_exit = ipnts[:, ind[1]]\n else:\n r_enter = ipnts[:, ind[1]]\n r_exit = ipnts[:, ind[0]]\n\n # Calculate intersection length\n intersect = np.sqrt(np.sum((r_exit - r_enter) ** 2.))\n\n return intersect, r_enter, r_exit\n\ndef tb_zyx(alpha, beta, gamma):\n \"\"\"\n #+#tb_zyx\n #+Calculates Tait-Bryan z-y'-x\" active rotation matrix given rotation angles `alpha`,`beta`,`gamma` in radians\n #+***\n #+##Arguments\n #+ **alpha**: rotation angle about z [radians]\n #+\n #+ **beta**: rotation angle about y' [radians]\n #+\n #+ **gamma**: rotation angle about x\" [radians]\n #+\n #+##Return Value\n #+ Rotation Matrix [prefida](|url|/sourcefile/prefida.pro.html)\n #+\n #+##Example Usage\n #+```python\n #+ >>> rot_mat = tb_zyx(np.pi/2, 0.0, np.pi/3)\n #+```\n \"\"\"\n sa = np.sin(alpha)\n ca = np.cos(alpha)\n sb = np.sin(beta)\n cb = np.cos(beta)\n sg = np.sin(gamma)\n cg = np.cos(gamma)\n\n r = np.zeros((3, 3))\n\n r[0, 0] = ca * cb\n r[0, 1] = ca * sb * sg - cg * sa\n r[0, 2] = sa * sg + ca * cg * sb\n r[1, 0] = cb * sa\n r[1, 1] = ca * cg + sa * sb * sg\n r[1, 2] = cg * sa * sb - ca * sg\n r[2, 0] = -sb\n r[2, 1] = cb * sg\n r[2, 2] = cb * cg\n\n return r\n\ndef uvw_to_xyz(alpha, beta, gamma, uvw, origin=np.zeros(3)):\n \"\"\"\n #+#uvw_to_xyz\n #+ Express non-rotated coordinate `uvw` in rotated `xyz` coordinates\n #+***\n #+##Arguments\n #+ **alpha**: Rotation angle about z [radians]\n #+\n #+ **beta**: Rotation angle about y' [radians]\n #+\n #+ **gamma**: Rotation angle about x\" [radians]\n #+\n #+ **uvw**: Point in rotated coordinate system, (3, n)\n #+\n #+##Keyword Arguments\n #+ **origin**: Origin of rotated coordinate system in non-rotated (uvw) coordinates, (3)\n #+\n #+##Output Arguments\n #+ **xyz**: 'uvw' in 'xyz' coordinates\n #+\n #+##Example Usage\n #+```python\n #+>>> xyz = uvw_to_xyz(np.pi/2., 0.0, np.pi/3., uvw, origin=[.1, .2, 0.])\n #+```\n \"\"\"\n\n # Make np arrays\n uvw = np.array(uvw, dtype=float)\n origin = np.array(origin, dtype=float)\n\n # Do checks as this code does not allow multiple points to be entered (yet)\n if uvw.ndim == 2:\n s = uvw.shape\n if s[0] != 3:\n raise ValueError('uvw must be (3, n), but it has shape {}'.format(uvw.shape))\n n = s[1]\n elif uvw.ndim == 1:\n if uvw.size != 3:\n raise ValueError('uvw must have length 3, but it has length {}'.format(uvw.size))\n n = 1\n else:\n raise ValueError('uvw must be (3) or (3, n)')\n\n if origin.ndim != 1:\n raise ValueError('origin must be 1D, but it has shape {}'.format(origin.shape))\n\n if origin.size != 3:\n raise ValueError('origin must have length 3, but it has length {}'.format(origin.size))\n\n # Shift origin\n uvw_shifted = uvw - np.squeeze(np.tile(origin, (n, 1)).T)\n\n # Get rotation matrix\n r = tb_zyx(alpha, beta, gamma)\n\n # Apply rotation matrix\n xyz = np.dot(r.T, uvw_shifted)\n\n return xyz\n\ndef xyz_to_uvw(alpha, beta, gamma, xyz, origin = np.zeros(3)):\n \"\"\"\n #+##`xyz_to_uvw(alpha, beta, gamma, xyz, origin=[0,0,0])`\n #+Express rotated coordinate `xyz` in non-rotated `uvw` coordinates\n #+###Arguments\n #+ **alpha**: Rotation angle about z [radians]\n #+\n #+ **beta**: Rotation angle about y' [radians]\n #+\n #+ **gamma**: Rotation angle about x\" [radians]\n #+\n #+ **xyz**: Point in rotated coordinate system\n #+\n #+###Keyword Arguments\n #+ **origin**: Origin of rotated coordinate system in non-rotated (uvw) coordinates.\n #+\n #+###Example Usage\n #+```python\n #+>>> uvw = xyz_to_uvw(np.pi/2,0.0,np.pi/3,xyz)\n #+```\n \"\"\"\n xyz = np.array(xyz)\n\n # Do checks as this code does not allow multiple points to be entered (yet)\n if xyz.ndim == 2:\n s = xyz.shape\n if s[0] != 3:\n raise ValueError('xyz must be (3, n), but it has shape {}'.format(xyz.shape))\n n = s[1]\n elif xyz.ndim == 1:\n if xyz.size != 3:\n raise ValueError('xyz must have length 3, but it has length {}'.format(xyz.size))\n n = 1\n else:\n raise ValueError('xyz must be (3) or (3, n)')\n\n if origin.ndim != 1:\n raise ValueError('origin must be 1D, but it has shape {}'.format(origin.shape))\n\n if origin.size != 3:\n raise ValueError('origin must have length 3, but it has length {}'.format(origin.size))\n\n R = tb_zyx(alpha,beta,gamma)\n\n uvw = np.dot(R, xyz)\n\n return uvw + np.squeeze(np.tile(origin, (n, 1)).T)\n\ndef line_basis(r0, v0):\n \"\"\"\n #+#line_basis\n #+Calculates basis from a line with +x in the direction of line\n #+***\n #+##Arguments\n #+ **r0**: Starting point of line [cm]\n #+\n #+ **v0**: Direction of line\n #+\n #+##Example Usage\n #+```python\n #+>>> basis = line_basis([0,0,0],[0,-1,0])\n #+>>> x = np.dot(basis,np.array([1,1,0])) ;Transforms a point in line-space ([1,1,0]) to real space\n #+>>> x\n #+ [1, -1, 0]\n #+```\n \"\"\"\n r0 = np.array(r0)\n v0 = np.array(v0)\n rf = r0 + v0\n dis = np.sqrt(np.sum(v0**2))\n beta = np.arcsin((r0[2] - rf[2])/dis)\n alpha = np.arctan2((rf[1] - r0[1]),(rf[0]-r0[0]))\n\n R = tb_zyx(alpha,beta,0.0)\n return R\n\ndef rz_grid(rmin, rmax, nr, zmin, zmax, nz, phimin=0.0, phimax=0.0, nphi=1):\n \"\"\"\n #+#rz_grid\n #+Creates interpolation grid\n #+***\n #+##Arguments\n #+ **rmin**: Minimum radius [cm]\n #+\n #+ **rmax**: Maximum radius [cm]\n #+\n #+ **nr**: Number of radii\n #+\n #+ **zmin**: Minimum Z value [cm]\n #+\n #+ **zmax**: Maximum Z value [cm]\n #+\n #+ **nz**: Number of Z values\n #+\n #+ **phimin**: Minimum Phi value [rad]\n #+\n #+ **phimax**: Maximum Phi value [rad]\n #+\n #+ **nphi**: Number of Phi values \n #+\n #+##Return Value\n #+Interpolation grid dictionary\n #+\n #+##Example Usage\n #+```python\n #+>>> grid = rz_grid(0,200.0,200,-100,100,200,phimin=4*np.pi/3,phimax=5*np.pi/3,nphi=5)\n #+```\n \"\"\"\n dr = (rmax - rmin) / nr\n dz = (zmax - zmin) / nz\n dphi = (phimax - phimin) / nphi\n r = rmin + dr * np.arange(nr, dtype=np.float64)\n z = zmin + dz * np.arange(nz, dtype=np.float64)\n phi = phimin + dphi * np.arange(nphi, dtype=np.float64)\n\n r2d = np.tile(r, (nz, 1)).T\n z2d = np.tile(z, (nr, 1))\n\n grid = {'r2d': r2d,\n 'z2d': z2d,\n 'r': r,\n 'z': z,\n 'phi': phi,\n 'nr': nr,\n 'nz': nz,\n 'nphi': nphi}\n\n return grid\n\ndef colored(text, color): #, on_color=None, attrs=None):\n \"\"\"\n #+#colored\n #+ Return text string formatting for color in terminal\n #+***\n #+##Input Arguments\n #+ **text**: String to be colored\n #+\n #+ **color**: Desired color of string. Red, green, yellow, blue, magenta, cyan, or white.\n #+\n #+##Output Arguments\n #+ **text**: Text formated to have \"color\" in terminal.\n #+##Example Usage\n #+```python\n #+>>> text = colored(\"Text to be red\", 'red')\n #+>>> print(text)\n #+```\n \"\"\"\n # Copyright (c) 2008-2011 Volvox Development Team\n #\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n #\n # The above copyright notice and this permission notice shall be included in\n # all copies or substantial portions of the Software.\n #\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n # THE SOFTWARE.\n #\n # Author: Konstantin Lepa <[email protected]>\n COLORS = dict(list(zip(['grey',\n 'red',\n 'green',\n 'yellow',\n 'blue',\n 'magenta',\n 'cyan',\n 'white',],\n list(range(30, 38)))))\n\n RESET = '\\033[0m'\n\n if os.getenv('ANSI_COLORS_DISABLED') is None:\n fmt_str = '\\033[%dm%s'\n\n text = fmt_str % (COLORS[color], text)\n\n text += RESET\n\n return text\n\ndef info(string):\n \"\"\"\n #+#info\n #+Print a informational message\n #+***\n #+##Arguments\n #+ **str**: message\n #+\n #+##Example Usage\n #+```python\n #+>>> info(\"This is an informative message\")\n #+```\n \"\"\"\n print(colored('INFO: ' + string, 'cyan'))\n\ndef warn(string):\n \"\"\"\n #+#warn\n #+Print a warning message\n #+***\n #+##Arguments\n #+ **string**: message\n #+\n #+##Example Usage\n #+```python\n #+>>> warn(\"This may be a problem\")\n #+```\n \"\"\"\n print(colored('WARNING: ' + string, 'magenta'))\n\ndef error(string, halt=False):\n \"\"\"\n #+#error\n #+Print a error message\n #+***\n #+##Arguments\n #+ **string**: message\n #+\n #+##Keyword Arguments\n #+ **halt**: Halt program execution\n #+\n #+##Example Usage\n #+```python\n #+>>> error(\"Error message\")\n #+```\n \"\"\"\n print(colored('ERROR: {}'.format(string), 'red'))\n\n if halt:\n raise Exception()\n\ndef success(string):\n \"\"\"\n #+#success\n #+Print a success message\n #+***\n #+##Arguments\n #+ **string**: message\n #+\n #+##Example Usage\n #+```python\n #+>>> success(\"Yay!!!\")\n #+```\n \"\"\"\n print(colored('SUCCESS: ' + string, 'green'))\n\ndef beam_grid(nbi, rstart,\n nx=None, ny=None, nz=None, dv=8.0,\n length=100.0, width=80.0, height=80.0):\n \"\"\"\n #+#beam_grid\n #+ Calculates settings for a grid that aligns with the neutral beam.\n #+***\n #+##Arguments\n #+ **nbi**: [Neutral beam geometry structure](|url|/page/03_technical/01_prefida_inputs.html#neutral-beam-geometry-structure)\n #+\n #+ **rstart**: Radial start position of beam grid [cm]\n #+\n #+##Keyword Arguments\n #+ **dV**: Cell volume [\\(cm^3\\)]: Defaults to 8.0\n #+\n #+ **nx**: Number of cells in length: Default determined by `dV`\n #+\n #+ **ny**: Number of cells in width: Default determined by `dV`\n #+\n #+ **nz**: Number of cells in height: Default determined by `dV`\n #+\n #+ **length**: Length of grid along beam sightline. [cm]: Defaults to 100 cm\n #+\n #+ **width**: Width of grid [cm]: Defaults to 100 cm\n #+\n #+ **height**: Height of grid [cm]: Defaults 80 cm\n #+\n #+##Return Value\n #+ Structure containing beam grid settings suitable for the Namelist File\n #+\n #+##Example Usage\n #+```python\n #+>>> grid = beam_grid(nbi,200.0,nx=100,ny=50,nz=50,length=100,width=50,height=50)\n #+```\n \"\"\"\n\n if width < nbi['widy']:\n warn(\"Grid width is smaller then the source width\")\n print(\"width: {}\".format(width))\n print(\"source width: {}\".format(nbi['widy']))\n\n if height < nbi['widz']:\n warn(\"Grid height is smaller then the source height\")\n print(\"height: {}\".format(height))\n print(\"source height: {}\".format(nbi['widz']))\n\n dv3 = dv ** (1. / 3.)\n\n if nx is None:\n nx = round(length / dv3)\n\n if ny is None:\n ny = round(width / dv3)\n\n if nz is None:\n nz = round(height / dv3)\n\n xmin = 0.\n xmax = length\n ymin = -width / 2.\n ymax = width / 2.\n zmin = -height / 2.\n zmax = height / 2.\n\n src = nbi['src']\n axis = nbi['axis'] / np.sqrt(np.sum(nbi['axis'] ** 2))\n pos = src + 100. * axis\n\n if np.sqrt(src[0] ** 2 + src[1] ** 2) < rstart:\n error(\"Source radius cannot be less then rstart\", halt=True)\n\n dis = np.sqrt(np.sum((src - pos) ** 2.0))\n beta = np.arcsin((src[2] - pos[2]) / dis)\n alpha = np.arctan2((pos[1] - src[1]), (pos[0] - src[0]))\n gamma = 0.\n a = axis[0] ** 2 + axis[1] ** 2\n b = 2. * (src[0] * axis[0] + src[1] * axis[1])\n c = src[0] ** 2 + src[1] ** 2 - rstart ** 2\n t = (-b - np.sqrt(b ** 2 - 4. * a * c)) / (2. * a)\n origin = src + t * axis\n\n beam_grid = {'nx': nx,\n 'ny': ny,\n 'nz': nz,\n 'xmin': xmin,\n 'xmax': xmax,\n 'ymin': ymin,\n 'ymax': ymax,\n 'zmin': zmin,\n 'zmax': zmax,\n 'alpha': alpha,\n 'beta': beta,\n 'gamma': gamma,\n 'origin': origin}\n\n return beam_grid\n\ndef write_data(h5_obj, dic, desc=dict(), units=dict(), name=''):\n \"\"\"\n #+#write_data\n #+ Write h5 datasets with attributes 'description' and 'units'\n #+***\n #+##Arguments\n #+ **h5_obj**: An h5 file or group object from h5py\n #+\n #+ **dic**: Dict of data to save as h5 datasets\n #+\n #+##Keyword Arguments\n #+ **name**: Name/description of dic for clarity in raising errors\n #+\n #+ **desc**: Dict with same keys as dic describing each item in dic\n #+\n #+ **units**: Dict with same keys as dic providing units of data in dic, doesn't have to be all keys of dic.\n #+\n #+##Example Usage\n #+```python\n #+>>> write_data(h5_obj, dic, desc, units)\n #+```\n \"\"\"\n for key in dic:\n if isinstance(dic[key], dict):\n h5_grp = h5_obj.create_group(key)\n write_data(h5_grp, dic[key])\n continue\n\n # Transpose data to match expected by Fortran and historically provided by IDL\n if isinstance(dic[key], np.ndarray):\n if dic[key].ndim >= 2:\n dic[key] = dic[key].T\n\n # Make strings of fixed length as required by Fortran.\n # See http://docs.h5py.org/en/latest/strings.html#fixed-length-ascii\n if isinstance(dic[key], str):\n dic[key] = np.string_(dic[key])\n\n # Create dataset\n ds = h5_obj.create_dataset(key, data = dic[key])\n\n # Add descrption attribute\n if key in desc:\n ds.attrs['description'] = desc[key]\n\n # Add units attribute (if present)\n if key in units:\n ds.attrs['units'] = units[key]\n\ndef read_geqdsk(filename, grid, poloidal=False):\n \"\"\"\n #+#read_geqdsk\n #+Reads an EFIT GEQDSK file\n #+***\n #+##Arguments\n #+ **filename**: GEQDSK file\n #+\n #+ **grid**: Interpolation grid\n #+\n #+##Keyword Arguments\n #+ **poloidal**: Return rho_p (sqrt(normalized poloidal flux)) instead of rho (sqrt(normalized toroidal flux))\n #+\n #+##Return Value\n #+Electronmagnetic fields structure, rho, btipsign\n #+\n #+##Example Usage\n #+```python\n #+>>> fields, rho, btipsign = read_geqdsk(\"./g133223.00200\",grid)\n #+```\n \"\"\"\n dims = grid['r2d'].shape\n r_pts = grid['r2d'].flatten()/100\n z_pts = grid['z2d'].flatten()/100\n g = efit.readg(filename)\n btipsign = np.sign(g[\"current\"]*g[\"bcentr\"])\n\n fpol = g[\"fpol\"]\n psiaxis = g[\"ssimag\"]\n psiwall = g[\"ssibry\"]\n r = g[\"r\"]\n z = g[\"z\"]\n\n psi_arr = np.linspace(psiaxis, psiwall, len(fpol))\n fpol_itp = interp1d(psi_arr, fpol, 'cubic', fill_value=fpol[-1],bounds_error=False)\n psirz_itp = interp2d(r, z, g[\"psirz\"], 'cubic')\n\n if poloidal:\n rhogrid = np.array([psirz_itp(rr,zz) for (rr,zz) in zip(r_pts,z_pts)]).reshape(dims)\n rhogrid = np.sqrt((rhogrid - g[\"ssimag\"])/(g[\"ssibry\"] - g[\"ssimag\"]))\n else:\n rhogrid=efit.rho_rz(g,r_pts,z_pts,norm=True).reshape(dims)\n\n br = np.array([psirz_itp(rr,zz,dy=1)/rr for (rr,zz) in zip(r_pts,z_pts)]).reshape(dims)\n bz = np.array([-psirz_itp(rr,zz,dx=1)/rr for (rr,zz) in zip(r_pts,z_pts)]).reshape(dims)\n bt = np.array([fpol_itp(psirz_itp(rr,zz))/rr for (rr,zz) in zip(r_pts,z_pts)]).reshape(dims)\n\n er = br*0\n ez = bz*0\n et = bt*0\n\n mask = np.ones(dims,dtype=np.int32)\n\n equil = {\"time\":0.0,\"data_source\":os.path.abspath(filename), \"mask\":mask,\n \"br\":br,\"bt\":bt,\"bz\":bz,\"er\":er,\"et\":et,\"ez\":ez}\n\n return equil, rhogrid, btipsign\n\ndef read_ncdf(filename, vars=None):\n '''\n #+#read_ncdf\n #+Reads a flat NetCDF file\n #+***\n #+##Arguments\n #+ **filename**: NetCDF file\n #+\n #+##Keyword Arguments\n #+ **vars**: List of variables to read\n #+\n #+##Return Value\n #+Structure containing NetCDF variables\n #+\n #+##Example Usage\n #+```python\n #+>>> a = read_ncdf(\"./123324H01_fi_1.cdf\")\n #+```\n '''\n\n d = dict()\n d['err'] = 1\n if os.path.isfile(filename):\n d['err'] = 0\n f = netcdf.netcdf_file(filename, 'r', mmap=False)\n variables = f.variables\n if vars != None:\n for k in vars:\n # need to check case sensitibity\n if k in variables.keys():\n v = variables[k]\n if tuple() == v.shape:\n d[k] = v.getValue()\n else:\n d[k] = v[:]\n else:\n for k,v in variables.items():\n if tuple() == v.shape:\n d[k] = v.getValue()\n else:\n d[k] = v[:]\n f.close()\n else:\n error('FILE DOES NOT EXIST: '+filename)\n\n return d\n\ndef extract_transp_plasma(filename, intime, grid, rhogrid,\n dn0out=None, scrapeoff=None,rho_scrapeoff=0.1):\n '''\n #+#extract_transp_plasma\n #+Extracts `plasma` structure from a TRANSP run\n #+***\n #+##Arguments\n #+ **filename**: TRANSP output file e.g. [TRANSP_RUNID].CDF\n #+\n #+ **intime**: Time of interest [s]\n #+\n #+ **grid**: Interpolation grid\n #+\n #+ **rhogrid**: sqrt(normalized torodial flux) mapped onto the interpolation grid\n #+\n #+##Keyword Arguments\n #+ **dn0out**: Wall Neutral density value `dn0out` variable in transp namelist\n #+\n #+ **scrapeoff**: scrapeoff decay length\n #+\n #+ **rho_scrapeoff**: scrapeoff length, default = 0.1\n #+\n #+##Example Usage\n #+```python\n #+>>> plasma = extract_transp_plasma(\"./142332H01.CDF\", 1.2, grid, rho)\n #+```\n '''\n\n var_list = [\"X\",\"TRFLX\",\"TFLUX\",\"TIME\",\"NE\",\"NH\",\"ND\",\"NT\",\"NIMP\",\"TE\",\"TI\",\"ZEFFI\",\"OMEGA\",\"DN0WD\",\"XZIMP\"]\n\n zz = read_ncdf(filename, vars=var_list)\n\n t = zz['TIME']\n idx = np.argmin(abs(t-intime))\n time = t[idx].astype('float64')\n\n print(' * Selecting profiles at :', time, ' s') #pick the closest timeslice to TOI\n\n impurity_charge = np.max(zz[\"XZIMP\"]).astype(\"int16\")\n transp_ne = zz['NE'][idx,:] #cm^-3\n transp_nimp = zz['NIMP'][idx,:] #cm^-3\n transp_nn = zz['DN0WD'][idx,:] #cm^-3\n\n if 'NH' in zz:\n transp_nh = zz['NH'][idx,:] #cm^-3\n else:\n transp_nh = 0*transp_ne\n\n if 'ND' in zz:\n transp_nd = zz['ND'][idx,:] #cm^-3\n else:\n transp_nd = 0*transp_ne\n\n if 'NT' in zz:\n transp_nt = zz['NT'][idx,:] #cm^-3\n else:\n transp_nt = 0*transp_ne\n\n transp_te = zz['TE'][idx,:]*1.e-3 # kev\n transp_ti = zz['TI'][idx,:]*1.e-3 # kev\n transp_zeff = zz['ZEFFI'][idx,:]\n rho_cb = np.sqrt(zz['TRFLX'][idx,:]/zz['TFLUX'][idx])\n # center each rho b/c toroidal flux is at cell boundary\n rho = 0.e0*rho_cb\n rho[0] = 0.5*rho_cb[0]\n for i in range(len(rho_cb)-1):\n rho[i+1] = rho_cb[i+1] - 0.5*(rho_cb[i+1] - rho_cb[i])\n\n if 'OMEGA' not in zz.keys():\n error('OMEGA not found in TRANSP file. Assuming no plasma rotation')\n transp_omega=0.0*transp_te\n else:\n transp_omega = zz['OMEGA'][idx,:] # rad/s\n\n if dn0out == None:\n dn0out = transp_nn[-1]\n if scrapeoff == None:\n scrapeoff = 0.0\n\n if scrapeoff > 0.0:\n drho = abs(rho[-1] - rho[-2])\n rho_sc = rho[-1] + drho*(range(np.ceil(rho_scrapeoff/drho)) + 1)\n sc = np.exp(-(rho_sc - rho[-1])/scrapeoff)\n transp_ne = np.append(transp_ne,transp_ne[-1]*sc)\n transp_nimp = np.append(transp_nimp,transp_nimp[-1]*sc)\n transp_nh = np.append(transp_nh,transp_nh[-1]*sc)\n transp_nd = np.append(transp_nd,transp_nd[-1]*sc)\n transp_nt = np.append(transp_nt,transp_nt[-1]*sc)\n transp_te = np.append(transp_te,transp_te[-1]*sc)\n transp_ti = np.append(transp_ti,transp_ti[-1]*sc)\n transp_nn = np.append(transp_nn,0*sc + dn0out)\n transp_zeff = np.append(transp_zeff, (transp_zeff[-1]-1)*sc + 1)\n transp_omega = np.append(transp_omega,transp_omega[-1]*sc)\n rho = np.append(rho, rho_sc)\n\n profiles = {\"rho\":rho,\n \"dene\":np.where(transp_ne > 0, transp_ne, 0.0),\n \"denimp\":np.where(transp_nimp > 0, transp_nimp, 0.0),\n\t\t\"denn\":np.where(transp_nn > 0, transp_nn, 0.0),\n \"te\":np.where(transp_te > 0, transp_te, 0.0),\n \"ti\":np.where(transp_ti > 0, transp_ti, 0.0),\n \"zeff\":np.where(transp_zeff > 1.0, transp_zeff, 1.0),\n \"omega\":transp_omega}\n if 'NH' in zz:\n profiles['denh'] = np.where(transp_nh > 0, transp_nh, 0.0)\n if 'ND' in zz:\n profiles['dend'] = np.where(transp_nd > 0, transp_nd, 0.0)\n if 'NT' in zz:\n profiles['dent'] = np.where(transp_nt > 0, transp_nt, 0.0)\n\n\n # Interpolate onto r-z grid\n dims = rhogrid.shape\n f_dene = interp1d(rho,transp_ne,fill_value='extrapolate')\n dene = f_dene(rhogrid)\n dene = np.where(dene > 0.0, dene, 0.0).astype('float64')\n\n f_denimp = interp1d(rho,transp_nimp,fill_value='extrapolate')\n denimp = f_denimp(rhogrid)\n denimp = np.where(denimp > 0.0, denimp, 0.0).astype('float64')\n\n f_denh = interp1d(rho,transp_nh,fill_value='extrapolate')\n denh = f_denh(rhogrid)\n denh = np.where(denh > 0.0, denh, 0.0).astype('float64')\n\n f_dend = interp1d(rho,transp_nd,fill_value='extrapolate')\n dend = f_dend(rhogrid)\n dend = np.where(dend > 0.0, dend, 0.0).astype('float64')\n\n f_dent = interp1d(rho,transp_nt,fill_value='extrapolate')\n dent = f_dent(rhogrid)\n dent = np.where(dent > 0.0, dent, 0.0).astype('float64')\n\n f_denn = interp1d(rho,np.log(transp_nn),fill_value=np.nan,bounds_error=False)\n log_denn = f_denn(rhogrid)\n denn = np.where(~np.isnan(log_denn), np.exp(log_denn), 0.0).astype('float64')\n\n f_te = interp1d(rho,transp_te,fill_value='extrapolate')\n te = f_te(rhogrid)\n te = np.where(te > 0, te, 0.0).astype('float64')\n\n f_ti = interp1d(rho,transp_ti,fill_value='extrapolate')\n ti = f_ti(rhogrid)\n ti = np.where(ti > 0, ti, 0.0).astype('float64')\n\n f_zeff = interp1d(rho,transp_zeff, fill_value=1.0, bounds_error=False)\n zeff = f_zeff(rhogrid)\n zeff = np.where(zeff > 1, zeff, 1.0).astype('float64')\n\n f_omega = interp1d(rho,transp_omega,fill_value='extrapolate')\n vt = grid['r2d']*f_omega(rhogrid).astype('float64')\n vr = np.zeros(dims,dtype='float64')\n vz = np.zeros(dims,dtype='float64')\n\n max_rho = max(abs(rho))\n\n mask = np.zeros(dims,dtype='int')\n w = np.where(rhogrid <= max_rho) #where we have profiles\n mask[w] = 1\n\n deni = np.concatenate((denh.reshape(1,dims[0],dims[1]),\n dend.reshape(1,dims[0],dims[1]),\n dent.reshape(1,dims[0],dims[1])),axis=0)\n\n ai = np.array([1.007276466879e0, 2.013553212745e0,3.01550071632e0])\n w_ai = [a in zz for a in ['NH','ND','NT']]\n\n # SAVE IN PROFILES STRUCTURE\n plasma={\"data_source\":os.path.abspath(filename),\"time\":time,\"impurity_charge\":int(impurity_charge),\n \"nthermal\":int(np.sum(w_ai)), \"species_mass\":ai[w_ai], \"deni\":deni[w_ai,:,:],\"profiles\":profiles,\n \"mask\":mask,\"dene\":dene,\"denimp\":denimp,\"denn\":denn,\"te\":te,\"ti\":ti,\n \"vr\":vr,\"vt\":vt,\"vz\":vz,\"zeff\":zeff}\n\n return plasma\n\ndef read_nubeam(filename, grid, e_range=(), p_range=(), btipsign=-1, species=1):\n \"\"\"\n #+#read_nubeam\n #+Reads NUBEAM fast-ion distribution function\n #+***\n #+##Arguments\n #+ **filename**: NUBEAM guiding center fast-ion distribution function file e.g. 159245H01_fi_1.cdf\n #+\n #+ **grid**: Interpolation grid\n #+\n #+##Keyword Arguments\n #+ **btipsign**: Sign of the dot product of the magnetic field and plasma current\n #+\n #+ **e_range**: Energy range to consider\n #+\n #+ **p_range**: Pitch range to consider\n #+\n #+ **species**: Fast-ion species number. Defaults to 1\n #+\n #+##Return Value\n #+Distribution structure\n #+\n #+##Example Usage\n #+```python\n #+>>> dist = read_nubeam(\"./159245H02_fi_1.cdf\",grid,btipsign=-1)\n #+```\n \"\"\"\n\n species_var = \"SPECIES_{}\".format(species)\n sstr = read_ncdf(filename,vars=[species_var])[species_var].tostring().decode('UTF-8')\n print(\"Species: \"+sstr)\n var = read_ncdf(filename, vars=[\"TIME\",\"R2D\",\"Z2D\",\"E_\"+sstr,\"A_\"+sstr,\"F_\"+sstr,\"RSURF\",\"ZSURF\",\"BMVOL\"])\n\n ngrid = len(var[\"R2D\"])\n\n try:\n time = var[\"TIME\"][0]\n except:\n time = var[\"TIME\"]\n\n r2d = var[\"R2D\"]\n z2d = var[\"Z2D\"]\n rsurf = var[\"RSURF\"].T\n zsurf = var[\"ZSURF\"].T\n bmvol = var[\"BMVOL\"]\n pitch = var[\"A_\"+sstr]\n energy = var[\"E_\"+sstr]*1e-3\n fbm = var[\"F_\"+sstr].T*1e3\n fbm = np.where(fbm > 0.0, 0.5*fbm, 0.0) #0.5 to convert to pitch instead of solid angle d_omega/4pi\n\n if btipsign < 0:\n fbm = fbm[:,::-1,:] #reverse pitch elements\n\n if not e_range:\n e_range = (np.min(energy), np.max(energy))\n\n if not p_range:\n p_range = (np.min(pitch), np.max(pitch))\n\n # Trim distribution according to e/p_range\n we = np.logical_and(energy >= e_range[0], energy <= e_range[1])\n wp = np.logical_and(pitch >= p_range[0], pitch <= p_range[1])\n energy = energy[we]\n nenergy = len(energy)\n pitch = pitch[wp]\n npitch = len(pitch)\n fbm = fbm[we,:,:]\n fbm = fbm[:,wp,:]\n dE = np.abs(energy[1] - energy[0])\n dp = np.abs(pitch[1] - pitch[0])\n emin, emax = np.maximum(np.min(energy) - 0.5*dE, 0.0), np.max(energy) + 0.5*dE\n pmin, pmax = np.maximum(np.min(pitch) - 0.5*dp, -1.0), np.minimum(np.max(pitch)+0.5*dp, 1.0)\n\n print('Energy min/max: ', emin, emax)\n print('Pitch min/max: ',pmin, pmax)\n\n nr = grid[\"nr\"]\n nz = grid[\"nz\"]\n r = grid[\"r\"]\n z = grid[\"z\"]\n rgrid = grid[\"r2d\"]\n zgrid = grid[\"z2d\"]\n dr = np.abs(r[1] - r[0])\n dz = np.abs(z[1] - z[0])\n\n fdens = np.sum(fbm,axis=(0,1))*dE*dp\n ntot = np.sum(fdens*bmvol)\n print('Ntotal in phase space: ',ntot)\n\n tri = Delaunay(np.vstack((r2d,z2d)).T) # Triangulation for barycentric interpolation\n pts = np.array([xx for xx in zip(r2d,z2d)])\n itp = NearestNDInterpolator(pts,np.arange(ngrid)) #to find indices outside simplices\n\n points = np.array([xx for xx in zip(rgrid.flatten(),zgrid.flatten())])\n t = tri.find_simplex(points)\n\n denf = np.zeros((nr,nz))\n fbm_grid = np.zeros((nenergy,npitch,nr,nz))\n for (ind,tt) in enumerate(t):\n i,j = np.unravel_index(ind,(nr,nz))\n if tt == -1:\n ii = int(itp(r[i],z[j]))\n denf[i,j] = fdens[ii]\n fbm_grid[:,:,i,j] = fbm[:,:,ii]\n else:\n b = tri.transform[tt,:2].dot(np.transpose(points[ind] - tri.transform[tt,2]))\n s = tri.simplices[tt,:]\n #perform barycentric linear interpolation\n denf[i,j] = b[0]*fdens[s[0]] + b[1]*fdens[s[1]] + (1 - np.sum(b))*fdens[s[2]]\n fbm_grid[:,:,i,j] = b[0]*fbm[:,:,s[0]] + b[1]*fbm[:,:,s[1]] + (1-np.sum(b))*fbm[:,:,s[2]]\n\n denf[denf < 0] = 0\n\n # Correct for points outside of seperatrix\n rmaxis = np.mean(rsurf[:,0])\n zmaxis = np.mean(zsurf[:,0])\n r_sep = rsurf[:,-1]\n z_sep = zsurf[:,-1]\n\n #plt.triplot(r2d,z2d,tri.simplices.copy())\n #plt.plot(r2d,z2d,'o')\n #plt.plot(r_sep,z_sep)\n #plt.show()\n x_bdry = r_sep - rmaxis\n y_bdry = z_sep - zmaxis\n r_bdry = np.sqrt(x_bdry**2 + y_bdry**2)\n theta_bdry = np.arctan2(y_bdry,x_bdry)\n theta_bdry = np.where(theta_bdry < 0.0, theta_bdry + 2*np.pi, theta_bdry) #[0,2pi]\n w = np.argsort(theta_bdry)\n theta_bdry = theta_bdry[w]\n r_bdry = r_bdry[w]\n theta_bdry, w = np.unique(theta_bdry,return_index=True)\n r_bdry = r_bdry[w]\n itp = interp1d(theta_bdry,r_bdry,'cubic',fill_value='extrapolate')\n\n x_pts = grid[\"r2d\"] - rmaxis\n y_pts = grid[\"z2d\"] - zmaxis\n r_pts = np.sqrt(x_pts**2 + y_pts**2)\n theta_pts = np.arctan2(y_pts,x_pts)\n theta_pts = np.where(theta_pts < 0.0, theta_pts + 2*np.pi, theta_pts) #[0,2pi]\n r_bdry_itp = itp(theta_pts)\n\n w = r_pts >= r_bdry_itp + 2\n denf[w] = 0.0\n fbm_grid[:,:,w] = 0.0\n\n # enforce correct normalization\n ntot_denf = 2*np.pi*dr*dz*np.sum(r*np.sum(denf,axis=1))\n denf = denf*(ntot/ntot_denf)\n ntot_fbm = (2*np.pi*dE*dp*dr*dz)*np.sum(r*np.sum(fbm_grid,axis=(0,1,3)))\n fbm_grid = fbm_grid*(ntot/ntot_denf)\n\n\n fbm_dict={\"type\":1,\"time\":time,\"nenergy\":nenergy,\"energy\":energy,\"npitch\":npitch,\n \"pitch\":pitch,\"f\":fbm_grid,\"denf\":denf,\"data_source\":os.path.abspath(filename)}\n\n return fbm_dict\n\ndef nubeam_geometry(nubeam, angle=0.0, verbose=False):\n \"\"\"\n #+#nubeam_geometry\n #+Calculates the FIDASIM beam geometry from the beam geometry variables in the TRANSP/NUBEAM namelist\n #+***\n #+##Arguments\n #+ **NUBEAM**: Dictionary containing the following\n #+\n #+ **NUBEAM[\"NAME\"]**: Ion source name\n #+\n #+ **NUBEAM[\"NBSHAP\"]**: Ion source shape 1=rectangular, 2=circular\n #+\n #+ **NUBEAM[\"FOCLZ\"]**: Vertical focal length [cm]\n #+\n #+ **NUBEAM[\"FOCLR\"]**: Horizontal focal length [cm]\n #+\n #+ **NUBEAM[\"DIVZ\"]**: Vertical divergence [rad]\n #+\n #+ **NUBEAM[\"DIVR\"]**: Horizontal divergence [rad]\n #+\n #+ **NUBEAM[\"BMWIDZ\"]**: Ion source half height [cm]\n #+\n #+ **NUBEAM[\"BMWIDR\"]**: Ion source half width [cm]\n #+\n #+ **NUBEAM[\"RTCENA\"]**: Radius of tangency point [cm]\n #+\n #+ **NUBEAM[\"XLBTNA\"]**: Distance from center of beam source grid to tangency point [cm]\n #+\n #+ **NUBEAM[\"XBZETA\"]**: Torodial angle [deg] Positive angles defined to be in the counter-clockwise direction\n #+\n #+ **NUBEAM[\"XYBSCA\"]**: Elevation above/below vacuum vessel midplane of center of beam source grid [cm]\n #+\n #+ **NUBEAM[\"NLJCCW\"]**: Orientation of Ip. 1 for True/Counter-clockwise current, 0 or -1 for False/Clock-wise current\n #+\n #+ **NUBEAM[\"NLCO\"]**: 1 for Co-beam, 0 or -1 for Counter-beam\n #+\n #+ **NUBEAM[\"NBAPSHA\"]**: Vector of aperture shapes 1=rectangular, 2=circular\n #+\n #+ **NUBEAM[\"XLBAPA\"]**: Vector of distances from center of beam source grid to the aperture plane [cm]\n #+\n #+ **NUBEAM[\"XYBAPA\"]**: Vector of elevation above/below vacuum vessel midplane of beam centerline at aperture [cm]\n #+\n #+ **NUBEAM[\"RAPEDGA\"]**: Vector of aperture half-widths [cm]\n #+\n #+ **NUBEAM[\"XZPEDGA\"]**: Vector of aperture half-heights [cm]\n #+\n #+ **NUBEAM[\"XRAPOFFA\"]**: Vector of horizontal (y) offsets relative to the +x aligned beam centerline [cm]\n #+\n #+ **NUBEAM[\"XZAPOFFA\"]**: Vector of vertical (z) offsets relative to the +x aligned beam centerline [cm]\n #+\n #+##Keyword Arguments\n #+ **angle**: Angle to add to XBZETA to rotate the beams into correct coordinates [deg]\n #+\n #+ **verbose**: Print out positions\n #+\n #+##Return Value\n #+ Neutral beam structure\n #+\n #+##Example Usage\n #+```python\n #+>>> nbi = nubeam_geometry(nubeam)\n #+```\n \"\"\"\n\n if nubeam[\"NLCO\"] == 0:\n nubeam[\"NLCO\"] = -1\n\n if \"NLJCCW\" in nubeam:\n if nubeam[\"NLJCCW\"] == 0:\n nubeam[\"NLJCCW\"] = -1\n else:\n warn(\"Current orientation not specified. Assuming Counter-clockwise.\")\n nubeam[\"NLJCCW\"] = 1\n\n phi_s = (nubeam[\"XBZETA\"] + angle)*np.pi/180.0\n zs = nubeam[\"XYBSCA\"]\n za = nubeam[\"XYBAPA\"][0]\n alpha = np.arcsin((zs-za)/nubeam[\"XLBAPA\"][0])\n pdst = nubeam[\"XLBTNA\"]*np.cos(alpha)\n rs = np.sqrt(nubeam[\"RTCENA\"]**2 + pdst**2)\n dat = nubeam[\"XLBTNA\"] - nubeam[\"XLBAPA\"][0]\n pdat = dat*np.cos(alpha)\n ra = np.sqrt(nubeam[\"RTCENA\"]**2 + pdat**2.0)\n beta_s = np.arccos(nubeam[\"RTCENA\"]/rs)\n beta_a = np.arccos(nubeam[\"RTCENA\"]/ra)\n phi_a = phi_s + nubeam[\"NLJCCW\"]*nubeam[\"NLCO\"]*(beta_s-beta_a)\n\n src = np.array([rs*np.cos(phi_s), rs*np.sin(phi_s),zs])\n aper_src = np.array([ra*np.cos(phi_a), ra*np.sin(phi_a),za])\n axis = (aper_src - src)\n axis = axis/np.sqrt(np.sum(axis**2))\n pos = src + axis*nubeam[\"XLBTNA\"]\n\n if verbose:\n print('Source position: ',src)\n print('1st Aperture position: ',aper_src)\n print('Tangency position: ', pos)\n\n nbi = {\"data_source\":\"TRANSP/NUBEAM namelist\",\"name\":nubeam[\"NAME\"],\n \"shape\":nubeam[\"NBSHAP\"],\"src\":src,\"axis\":axis,\n \"focy\":nubeam[\"FOCLR\"],\"focz\":nubeam[\"FOCLZ\"],\n \"divy\":np.repeat(nubeam[\"DIVR\"],3),\n \"divz\":np.repeat(nubeam[\"DIVZ\"],3),\n \"widy\":nubeam[\"BMWIDR\"], \"widz\":nubeam[\"BMWIDZ\"],\n \"naperture\":len(nubeam[\"NBAPSHA\"]),\"ashape\":nubeam[\"NBAPSHA\"],\n \"awidy\":nubeam[\"RAPEDGA\"],\"awidz\":nubeam[\"XZPEDGA\"],\n \"aoffy\":nubeam[\"XRAPOFFA\"],\"aoffz\":nubeam[\"XZAPOFFA\"],\n \"adist\":nubeam[\"XLBAPA\"] }\n\n return nbi\n" ]
[ [ "numpy.dot", "numpy.string_", "numpy.sqrt", "numpy.arctan2", "scipy.io.netcdf.netcdf_file", "numpy.max", "numpy.mean", "scipy.interpolate.interp2d", "numpy.exp", "numpy.where", "numpy.arcsin", "numpy.unique", "numpy.arange", "numpy.sin", "numpy.ceil", "scipy.interpolate.interp1d", "numpy.repeat", "numpy.zeros", "numpy.unravel_index", "numpy.log", "numpy.min", "numpy.isnan", "numpy.arccos", "numpy.append", "numpy.floor", "numpy.transpose", "numpy.argsort", "numpy.logical_and", "numpy.array", "numpy.sum", "numpy.abs", "numpy.cos", "numpy.tile", "numpy.ones", "numpy.sign", "numpy.mod", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3" ], "tensorflow": [] } ]
laceyg/ternarynet
[ "b17744c2aba3aba7e7e72decb3b8a02792d33b54", "b17744c2aba3aba7e7e72decb3b8a02792d33b54", "b17744c2aba3aba7e7e72decb3b8a02792d33b54", "b17744c2aba3aba7e7e72decb3b8a02792d33b54" ]
[ "tools/tensorpack/tensorpack/tfutils/gradproc.py", "tools/tensorpack/examples/Inception/inception-bn.py", "tools/tensorpack/tensorpack/train/trainer.py", "tools/tensorpack/examples/cifar-convnet.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# File: gradproc.py\n# Author: Yuxin Wu <[email protected]>\n\nimport tensorflow as tf\nfrom abc import ABCMeta, abstractmethod\nimport re\nimport inspect\nfrom ..utils import logger\nfrom .symbolic_functions import rms\nfrom .summary import add_moving_summary\n\n__all__ = ['GradientProcessor', 'SummaryGradient', 'CheckGradient',\n 'ScaleGradient', 'MapGradient', 'apply_grad_processors',\n 'GlobalNormClip']\n\ndef apply_grad_processors(grads, gradprocs):\n \"\"\"\n :param grads: list of (grad, var).\n :param gradprocs: list of `GradientProcessor` instances.\n :returns: list of (grad, var) went through the processors\n \"\"\"\n g = []\n for grad, var in grads:\n if grad is None:\n logger.warn(\"No Gradient w.r.t {}\".format(var.op.name))\n else:\n g.append((grad, var))\n for proc in gradprocs:\n g = proc.process(g)\n return g\n\nclass GradientProcessor(object):\n __metaclass__ = ABCMeta\n\n def process(self, grads):\n \"\"\"\n Process the symbolic gradients.\n\n :param grads: list of (grad, var)\n :returns: symbolic gradients with the same type as input\n \"\"\"\n with tf.name_scope(type(self).__name__):\n return self._process(grads)\n\n @abstractmethod\n def _process(self, grads):\n pass\n\n\nclass GlobalNormClip(GradientProcessor):\n def __init__(self, global_norm):\n \"\"\" Clip by global norm\n Note that the global norm is the sum of norm for **all** gradients\n \"\"\"\n self._norm = global_norm\n\n def _process(self, grads):\n g = [k[0] for k in grads]\n v = [k[1] for k in grads]\n g, _ = tf.clip_by_global_norm(g, self._norm, name='clip_by_global_norm')\n return list(zip(g, v))\n\nclass MapGradient(GradientProcessor):\n \"\"\"\n Apply a function on all gradient if the name matches regex.\n Keep the other gradients unchanged.\n \"\"\"\n def __init__(self, func, regex='.*'):\n \"\"\"\n :param func: takes a grad or (grad, var) pair and returns a grad. If return None, the\n gradient is discarded.\n :param regex: used to match variables. default to match all variables.\n \"\"\"\n args = inspect.getargspec(func).args\n arg_num = len(args) - inspect.ismethod(func)\n assert arg_num in [1, 2], \\\n \"The function must take 1 or 2 arguments! ({})\".format(args)\n if arg_num == 1:\n self.func = lambda grad, var: func(grad)\n else:\n self.func = func\n\n if not regex.endswith('$'):\n regex = regex + '$'\n self.regex = regex\n\n def _process(self, grads):\n ret = []\n for grad, var in grads:\n if re.match(self.regex, var.op.name):\n grad = self.func(grad, var)\n if grad is not None:\n ret.append((grad, var))\n else:\n ret.append((grad, var))\n return ret\n\n_summaried_gradient = set()\n\nclass SummaryGradient(MapGradient):\n \"\"\"\n Summary history and RMS for each graident variable\n \"\"\"\n def __init__(self):\n super(SummaryGradient, self).__init__(self._mapper)\n\n def _mapper(self, grad, var):\n name = var.op.name\n if name not in _summaried_gradient:\n _summaried_gradient.add(name)\n tf.histogram_summary(name + '/grad', grad)\n add_moving_summary(rms(grad, name=name + '/rms'))\n return grad\n\nclass CheckGradient(MapGradient):\n \"\"\"\n Check for numeric issue.\n \"\"\"\n def __init__(self):\n super(CheckGradient, self).__init__(self._mapper)\n\n def _mapper(self, grad, var):\n # this is very slow.... see #3649\n #op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)\n grad = tf.check_numerics(grad, 'CheckGradient-' + var.op.name)\n return grad\n\nclass ScaleGradient(MapGradient):\n \"\"\"\n Scale certain gradient by a multiplier\n \"\"\"\n def __init__(self, multipliers, log=True):\n \"\"\"\n :param multipliers: list of (regex, float)\n :param log: whether to do logging or not\n \"\"\"\n if not isinstance(multipliers, list):\n multipliers = [multipliers]\n self.multipliers = multipliers\n self._log = log\n super(ScaleGradient, self).__init__(self._mapper)\n\n def _mapper(self, grad, var):\n varname = var.op.name\n for regex, val in self.multipliers:\n # always match against the whole name\n if not regex.endswith('$'):\n regex = regex + '$'\n\n if re.match(regex, varname):\n if self._log:\n logger.info(\"Apply lr multiplier {} for {}\".format(val, varname))\n if val != 0: # skip zero to speed up\n return grad * val\n else:\n return None\n return grad\n", "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# File: inception-bn.py\n# Author: Yuxin Wu <[email protected]>\n\nimport cv2\nimport argparse\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nfrom tensorpack import *\nfrom tensorpack.tfutils.symbolic_functions import *\nfrom tensorpack.tfutils.summary import *\n\n\nTOTAL_BATCH_SIZE = 64 * 6\nNR_GPU = 6\nBATCH_SIZE = TOTAL_BATCH_SIZE // NR_GPU\nINPUT_SHAPE = 224\n\n\"\"\"\nInception-BN model on ILSVRC12.\nSee \"Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift\", arxiv:1502.03167\n\nThis config reaches 71% single-crop validation accuracy after 150k steps with 6 TitanX.\nLearning rate may need a different schedule for different number of GPUs (because batch size will be different).\n\"\"\"\n\nclass Model(ModelDesc):\n def _get_input_vars(self):\n return [InputVar(tf.float32, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),\n InputVar(tf.int32, [None], 'label') ]\n\n def _build_graph(self, input_vars):\n image, label = input_vars\n image = image / 128.0\n\n def inception(name, x, nr1x1, nr3x3r, nr3x3, nr233r, nr233, nrpool, pooltype):\n stride = 2 if nr1x1 == 0 else 1\n with tf.variable_scope(name) as scope:\n outs = []\n if nr1x1 != 0:\n outs.append(Conv2D('conv1x1', x, nr1x1, 1))\n x2 = Conv2D('conv3x3r', x, nr3x3r, 1)\n outs.append(Conv2D('conv3x3', x2, nr3x3, 3, stride=stride))\n\n x3 = Conv2D('conv233r', x, nr233r, 1)\n x3 = Conv2D('conv233a', x3, nr233, 3)\n outs.append(Conv2D('conv233b', x3, nr233, 3, stride=stride))\n\n if pooltype == 'max':\n x4 = MaxPooling('mpool', x, 3, stride, padding='SAME')\n else:\n assert pooltype == 'avg'\n x4 = AvgPooling('apool', x, 3, stride, padding='SAME')\n if nrpool != 0: # pool + passthrough if nrpool == 0\n x4 = Conv2D('poolproj', x4, nrpool, 1)\n outs.append(x4)\n return tf.concat(3, outs, name='concat')\n\n with argscope(Conv2D, nl=BNReLU, use_bias=False):\n l = Conv2D('conv0', image, 64, 7, stride=2)\n l = MaxPooling('pool0', l, 3, 2, padding='SAME')\n l = Conv2D('conv1', l, 64, 1)\n l = Conv2D('conv2', l, 192, 3)\n l = MaxPooling('pool2', l, 3, 2, padding='SAME')\n # 28\n l = inception('incep3a', l, 64, 64, 64, 64, 96, 32, 'avg')\n l = inception('incep3b', l, 64, 64, 96, 64, 96, 64, 'avg')\n l = inception('incep3c', l, 0, 128, 160, 64, 96, 0, 'max')\n\n br1 = Conv2D('loss1conv', l, 128, 1)\n br1 = FullyConnected('loss1fc', br1, 1024, nl=tf.nn.relu)\n br1 = FullyConnected('loss1logit', br1, 1000, nl=tf.identity)\n loss1 = tf.nn.sparse_softmax_cross_entropy_with_logits(br1, label)\n loss1 = tf.reduce_mean(loss1, name='loss1')\n\n # 14\n l = inception('incep4a', l, 224, 64, 96, 96, 128, 128, 'avg')\n l = inception('incep4b', l, 192, 96, 128, 96, 128, 128, 'avg')\n l = inception('incep4c', l, 160, 128, 160, 128, 160, 128, 'avg')\n l = inception('incep4d', l, 96, 128, 192, 160, 192, 128, 'avg')\n l = inception('incep4e', l, 0, 128, 192, 192, 256, 0, 'max')\n\n br2 = Conv2D('loss2conv', l, 128, 1)\n br2 = FullyConnected('loss2fc', br2, 1024, nl=tf.nn.relu)\n br2 = FullyConnected('loss2logit', br2, 1000, nl=tf.identity)\n loss2 = tf.nn.sparse_softmax_cross_entropy_with_logits(br2, label)\n loss2 = tf.reduce_mean(loss2, name='loss2')\n\n # 7\n l = inception('incep5a', l, 352, 192, 320, 160, 224, 128, 'avg')\n l = inception('incep5b', l, 352, 192, 320, 192, 224, 128, 'max')\n l = GlobalAvgPooling('gap', l)\n\n logits = FullyConnected('linear', l, out_dim=1000, nl=tf.identity)\n prob = tf.nn.softmax(logits, name='output')\n loss3 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, label)\n loss3 = tf.reduce_mean(loss3, name='loss3')\n\n cost = tf.add_n([loss3, 0.3 * loss2, 0.3 * loss1], name='weighted_cost')\n add_moving_summary([cost, loss1, loss2, loss3])\n\n wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')\n add_moving_summary(tf.reduce_mean(wrong, name='train_error_top1'))\n\n wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')\n add_moving_summary(tf.reduce_mean(wrong, name='train_error_top5'))\n\n # weight decay on all W of fc layers\n wd_w = tf.train.exponential_decay(0.0002, get_global_step_var(),\n 80000, 0.7, True)\n wd_cost = tf.mul(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='l2_regularize_loss')\n add_moving_summary(wd_cost)\n\n add_param_summary([('.*/W', ['histogram'])]) # monitor W\n self.cost = tf.add_n([cost, wd_cost], name='cost')\n\ndef get_data(train_or_test):\n isTrain = train_or_test == 'train'\n ds = dataset.ILSVRC12(args.data, train_or_test, shuffle=True if isTrain else False)\n meta = dataset.ILSVRCMeta()\n pp_mean = meta.get_per_pixel_mean()\n\n if isTrain:\n # TODO use the augmentor in GoogleNet\n augmentors = [\n imgaug.Resize((256, 256)),\n imgaug.Brightness(30, False),\n imgaug.Contrast((0.8,1.2), True),\n imgaug.MapImage(lambda x: x - pp_mean),\n imgaug.RandomCrop((224, 224)),\n imgaug.Flip(horiz=True),\n ]\n else:\n augmentors = [\n imgaug.Resize((256, 256)),\n imgaug.MapImage(lambda x: x - pp_mean),\n imgaug.CenterCrop((224, 224)),\n ]\n ds = AugmentImageComponent(ds, augmentors)\n ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)\n if isTrain:\n ds = PrefetchDataZMQ(ds, 6)\n return ds\n\n\ndef get_config():\n logger.auto_set_dir()\n # prepare dataset\n dataset_train = get_data('train')\n step_per_epoch = 5000\n dataset_val = get_data('val')\n\n lr = get_scalar_var('learning_rate', 0.045, summary=True)\n return TrainConfig(\n dataset=dataset_train,\n optimizer=tf.train.MomentumOptimizer(lr, 0.9),\n callbacks=Callbacks([\n StatPrinter(), ModelSaver(),\n InferenceRunner(dataset_val, [\n ClassificationError('wrong-top1', 'val-top1-error'),\n ClassificationError('wrong-top5', 'val-top5-error')]),\n #HumanHyperParamSetter('learning_rate', 'hyper-googlenet.txt')\n ScheduledHyperParamSetter('learning_rate',\n [(8, 0.03), (14, 0.02), (17, 5e-3),\n (19, 3e-3), (24, 1e-3), (26, 2e-4),\n (30, 5e-5) ])\n ]),\n session_config=get_default_sess_config(0.99),\n model=Model(),\n step_per_epoch=step_per_epoch,\n max_epoch=80,\n )\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n parser.add_argument('--data', help='ImageNet data root directory', required=True)\n args = parser.parse_args()\n\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n config = get_config()\n if args.load:\n config.session_init = SaverRestore(args.load)\n if args.gpu:\n config.nr_tower = len(args.gpu.split(','))\n SyncMultiGPUTrainer(config).train()\n", "# -*- coding: UTF-8 -*-\n# File: trainer.py\n# Author: Yuxin Wu <[email protected]>\n\nimport tensorflow as tf\nimport time\nfrom six.moves import zip\n\nfrom .base import Trainer\n\nfrom ..utils import logger, SUMMARY_BACKUP_KEYS\nfrom ..tfutils import (get_tensors_by_names, freeze_collection,\n get_global_step_var, TowerContext)\nfrom ..tfutils.summary import summary_moving_average, add_moving_summary\nfrom ..predict import OnlinePredictor, build_multi_tower_prediction_graph\nfrom ..tfutils.gradproc import apply_grad_processors\nfrom .input_data import FeedInput, FeedfreeInput\n\n__all__ = ['SimpleTrainer', 'FeedfreeTrainer', 'MultiPredictorTowerTrainer',\n 'SingleCostFeedfreeTrainer']\n\nclass PredictorFactory(object):\n \"\"\" Make predictors for a trainer\"\"\"\n\n def __init__(self, sess, model, towers):\n \"\"\"\n :param towers: list of gpu relative id\n \"\"\"\n self.sess = sess\n self.model = model\n self.towers = towers\n self.tower_built = False\n\n def get_predictor(self, input_names, output_names, tower):\n \"\"\"\n :param tower: need the kth tower (not the gpu id)\n :returns: an online predictor\n \"\"\"\n if not self.tower_built:\n self._build_predict_tower()\n tower = self.towers[tower % len(self.towers)]\n raw_input_vars = get_tensors_by_names(input_names)\n output_names = ['towerp{}/'.format(tower) + n for n in output_names]\n output_vars = get_tensors_by_names(output_names)\n return OnlinePredictor(self.sess, raw_input_vars, output_vars)\n\n def _build_predict_tower(self):\n tf.get_variable_scope().reuse_variables()\n # build_predict_tower might get called anywhere, but 'towerp' should be the outermost name scope\n with tf.name_scope(None), \\\n freeze_collection(SUMMARY_BACKUP_KEYS):\n build_multi_tower_prediction_graph(self.model, self.towers)\n self.tower_built = True\n\nclass SimpleTrainer(Trainer):\n \"\"\" A naive demo trainer \"\"\"\n def __init__(self, config):\n super(SimpleTrainer, self).__init__(config)\n self._predictor_factory = PredictorFactory(self.sess, self.model, [0])\n if not hasattr(config, 'dataset'):\n self._input_method = config.data\n assert isinstance(self._input_method, FeedInput)\n else:\n self._input_method = FeedInput(config.dataset)\n\n def run_step(self):\n feed = self._input_method.next_feed()\n self.sess.run([self.train_op], feed_dict=feed) # faster since train_op return None\n\n def _setup(self):\n self._input_method._setup(self)\n model = self.model\n self.input_vars = model.get_input_vars()\n with TowerContext(''):\n model.build_graph(self.input_vars)\n cost_var = model.get_cost()\n add_moving_summary(cost_var)\n\n grads = self.config.optimizer.compute_gradients(cost_var)\n grads = apply_grad_processors(grads,\n self.model.get_gradient_processor())\n\n self.train_op = tf.group(\n self.config.optimizer.apply_gradients(grads, get_global_step_var()),\n summary_moving_average(), name='train_op')\n\n def _trigger_epoch(self):\n if self.summary_op is not None:\n feed = self._input_method.next_feed()\n summary_str = self.summary_op.eval(feed_dict=feed)\n self._process_summary(summary_str)\n\n def get_predict_func(self, input_names, output_names):\n return self._predictor_factory.get_predictor(input_names, output_names, 0)\n\nclass MultiPredictorTowerTrainer(Trainer):\n \"\"\" A trainer with possibly multiple prediction tower \"\"\"\n def _setup_predictor_factory(self, predict_tower):\n # by default, use the first training gpu for prediction\n predict_tower = predict_tower or [0]\n self._predictor_factory = PredictorFactory(\n self.sess, self.model, predict_tower)\n\n def get_predict_func(self, input_names, output_names, tower=0):\n \"\"\"\n :param tower: return the kth predict_func\n :returns: an `OnlinePredictor`\n \"\"\"\n return self._predictor_factory.get_predictor(input_names, output_names, tower)\n\n def get_predict_funcs(self, input_names, output_names, n):\n return [self.get_predict_func(input_names, output_names, k) for k in range(n)]\n\nclass FeedfreeTrainer(Trainer):\n \"\"\" A trainer which runs iteration without feed_dict (therefore faster) \"\"\"\n def _trigger_epoch(self):\n # need to run summary_op every epoch\n # note that summary_op will take a data from the queue\n if self.summary_op is not None:\n summary_str = self.summary_op.eval()\n self._process_summary(summary_str)\n\n def _get_input_tensors(self):\n return self._input_method.get_input_tensors()\n\n def _setup(self):\n assert isinstance(self._input_method, FeedfreeInput), type(self._input_method)\n self._input_method._setup(self)\n\nclass SingleCostFeedfreeTrainer(FeedfreeTrainer):\n def _get_cost_and_grad(self):\n \"\"\" get the cost and gradient on a new tower\"\"\"\n actual_inputs = self._get_input_tensors()\n self.model.build_graph(actual_inputs)\n cost_var = self.model.get_cost()\n # GATE_NONE faster?\n grads = self.config.optimizer.compute_gradients(\n cost_var, gate_gradients=0)\n add_moving_summary(cost_var)\n return cost_var, grads\n\n def run_step(self):\n \"\"\" Simply run self.train_op\"\"\"\n self.sess.run(self.train_op)\n # debug-benchmark code:\n #run_metadata = tf.RunMetadata()\n #self.sess.run([self.train_op],\n #options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),\n #run_metadata=run_metadata\n #)\n #from tensorflow.python.client import timeline\n #trace = timeline.Timeline(step_stats=run_metadata.step_stats)\n #trace_file = open('timeline.ctf.json', 'w')\n #trace_file.write(trace.generate_chrome_trace_format())\n #import sys; sys.exit()\n\n", "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# File: cifar-convnet.py\n# Author: Yuxin Wu <[email protected]>\nimport tensorflow as tf\nimport argparse\nimport numpy as np\nimport os\n\nfrom tensorpack import *\nimport tensorpack.tfutils.symbolic_functions as symbf\nfrom tensorpack.tfutils.summary import *\nfrom tensorpack.utils.gpu import get_nr_gpu\n\n\"\"\"\nA small convnet model for Cifar10 or Cifar100 dataset.\n\nCifar10:\n 91% accuracy after 50k step.\n 19.3 step/s on Tesla M40\n\nNot a good model for Cifar100, just for demonstration.\n\"\"\"\n\nclass Model(ModelDesc):\n def __init__(self, cifar_classnum):\n super(Model, self).__init__()\n self.cifar_classnum = cifar_classnum\n\n def _get_input_vars(self):\n return [InputVar(tf.float32, [None, 30, 30, 3], 'input'),\n InputVar(tf.int32, [None], 'label')\n ]\n\n def _build_graph(self, input_vars):\n image, label = input_vars\n is_training = get_current_tower_context().is_training\n keep_prob = tf.constant(0.5 if is_training else 1.0)\n\n if is_training:\n tf.image_summary(\"train_image\", image, 10)\n\n image = image / 4.0 # just to make range smaller\n with argscope(Conv2D, nl=BNReLU, use_bias=False, kernel_shape=3):\n logits = LinearWrap(image) \\\n .Conv2D('conv1.1', out_channel=64) \\\n .Conv2D('conv1.2', out_channel=64) \\\n .MaxPooling('pool1', 3, stride=2, padding='SAME') \\\n .Conv2D('conv2.1', out_channel=128) \\\n .Conv2D('conv2.2', out_channel=128) \\\n .MaxPooling('pool2', 3, stride=2, padding='SAME') \\\n .Conv2D('conv3.1', out_channel=128, padding='VALID') \\\n .Conv2D('conv3.2', out_channel=128, padding='VALID') \\\n .FullyConnected('fc0', 1024 + 512, nl=tf.nn.relu) \\\n .tf.nn.dropout(keep_prob) \\\n .FullyConnected('fc1', 512, nl=tf.nn.relu) \\\n .FullyConnected('linear', out_dim=self.cifar_classnum, nl=tf.identity)()\n\n cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, label)\n cost = tf.reduce_mean(cost, name='cross_entropy_loss')\n\n wrong = symbf.prediction_incorrect(logits, label)\n # monitor training error\n add_moving_summary(tf.reduce_mean(wrong, name='train_error'))\n\n # weight decay on all W of fc layers\n wd_cost = tf.mul(0.0004,\n regularize_cost('fc.*/W', tf.nn.l2_loss),\n name='regularize_loss')\n add_moving_summary(cost, wd_cost)\n\n add_param_summary([('.*/W', ['histogram'])]) # monitor W\n self.cost = tf.add_n([cost, wd_cost], name='cost')\n\ndef get_data(train_or_test, cifar_classnum):\n isTrain = train_or_test == 'train'\n if cifar_classnum == 10:\n ds = dataset.Cifar10(train_or_test)\n else:\n ds = dataset.Cifar100(train_or_test)\n if isTrain:\n augmentors = [\n imgaug.RandomCrop((30, 30)),\n imgaug.Flip(horiz=True),\n imgaug.Brightness(63),\n imgaug.Contrast((0.2,1.8)),\n imgaug.GaussianDeform(\n [(0.2, 0.2), (0.2, 0.8), (0.8,0.8), (0.8,0.2)],\n (30,30), 0.2, 3),\n imgaug.MeanVarianceNormalize(all_channel=True)\n ]\n else:\n augmentors = [\n imgaug.CenterCrop((30, 30)),\n imgaug.MeanVarianceNormalize(all_channel=True)\n ]\n ds = AugmentImageComponent(ds, augmentors)\n ds = BatchData(ds, 128, remainder=not isTrain)\n if isTrain:\n ds = PrefetchData(ds, 3, 2)\n return ds\n\ndef get_config(cifar_classnum):\n logger.auto_set_dir()\n\n # prepare dataset\n dataset_train = get_data('train', cifar_classnum)\n step_per_epoch = dataset_train.size()\n dataset_test = get_data('test', cifar_classnum)\n\n sess_config = get_default_sess_config(0.5)\n\n lr = symbf.get_scalar_var('learning_rate', 1e-2, summary=True)\n def lr_func(lr):\n if lr < 3e-5:\n raise StopTraining()\n return lr * 0.31\n\n return TrainConfig(\n dataset=dataset_train,\n optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),\n callbacks=Callbacks([\n StatPrinter(), ModelSaver(),\n InferenceRunner(dataset_test, ClassificationError()),\n StatMonitorParamSetter('learning_rate', 'val_error', lr_func,\n threshold=0.001, last_k=10),\n ]),\n session_config=sess_config,\n model=Model(cifar_classnum),\n step_per_epoch=step_per_epoch,\n max_epoch=150,\n )\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n parser.add_argument('--classnum', help='10 for cifar10 or 100 for cifar100',\n type=int, default=10)\n args = parser.parse_args()\n\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n else:\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n with tf.Graph().as_default():\n config = get_config(args.classnum)\n if args.load:\n config.session_init = SaverRestore(args.load)\n\n if args.gpu:\n config.nr_tower = len(args.gpu.split(','))\n nr_gpu = get_nr_gpu()\n if nr_gpu == 1:\n QueueInputTrainer(config).train()\n else:\n SyncMultiGPUTrainer(config).train()\n" ]
[ [ "tensorflow.clip_by_global_norm", "tensorflow.check_numerics", "tensorflow.histogram_summary" ], [ "tensorflow.nn.softmax", "tensorflow.concat", "tensorflow.reduce_mean", "tensorflow.train.MomentumOptimizer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.variable_scope", "tensorflow.add_n" ], [ "tensorflow.get_variable_scope", "tensorflow.name_scope" ], [ "tensorflow.Graph", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.train.AdamOptimizer", "tensorflow.image_summary", "tensorflow.add_n" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "0.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
ScSteffen/neuralEntropy
[ "796e0b38ac9c01f59772d49be3368b8ac9ad24d7", "796e0b38ac9c01f59772d49be3368b8ac9ad24d7" ]
[ "experimental/sandBoxGeneral.py", "experimental/convexNetworkComparison.py" ]
[ "### imports\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import initializers\n\nimport src.utils as utils\n\n# import tensorflow.keras.backend as K\nimport matplotlib.pyplot as plt\n\nfrom src.utils import finiteDiff, integrate, loadData, evaluateModel\n\nplt.style.use(\"kitish\")\n\n\n# ------ Code starts here --------\n\ndef main():\n y = [6.51778e-55,\n 9.20148e-53,\n 1.1754e-50,\n 1.35858e-48,\n 1.42087e-46,\n 1.3446e-44,\n 1.15134e-42,\n 8.92042e-41,\n 6.2537e-39,\n 3.96697e-37,\n 2.27694e-35,\n 1.18254e-33,\n 5.5571e-32,\n 2.36294e-30,\n 9.09133e-29,\n 3.165e-27,\n 9.96986e-26,\n 2.84168e-24,\n 7.3288e-23,\n 1.71025e-21,\n 3.61126e-20,\n 6.89965e-19,\n 1.1928e-17,\n 1.86585e-16,\n 2.64093e-15,\n 3.38226e-14,\n 3.91948e-13,\n 4.1098e-12,\n 3.89927e-11,\n 3.34747e-10,\n 2.60028e-09,\n 1.82766e-08,\n 1.16236e-07,\n 6.6889e-07,\n 3.4829e-06,\n 1.64096e-05,\n 6.99559e-05,\n 0.00026985,\n 0.000941867,\n 0.0029746,\n 0.00850037,\n 0.0219795,\n 0.0514242,\n 0.108865,\n 0.208536,\n 0.361445,\n 0.566858,\n 0.80441,\n 1.03288,\n 1.20004,\n 1.26157,\n 1.20004,\n 1.03288,\n 0.80441,\n 0.566858,\n 0.361445,\n 0.208536,\n 0.108865,\n 0.0514242,\n 0.0219795,\n 0.00850037,\n 0.0029746,\n 0.000941867,\n 0.00026985,\n 6.99559e-05,\n 1.64096e-05,\n 3.4829e-06,\n 6.6889e-07,\n 1.16236e-07,\n 1.82766e-08,\n 2.60028e-09,\n 3.34747e-10,\n 3.89927e-11,\n 4.1098e-12,\n 3.91948e-13,\n 3.38226e-14,\n 2.64093e-15,\n 1.86585e-16,\n 1.1928e-17,\n 6.89965e-19,\n 3.61126e-20,\n 1.71025e-21,\n 7.3288e-23,\n 2.84168e-24,\n 9.96986e-26,\n 3.165e-27,\n 9.09133e-29,\n 2.36294e-30,\n 5.5571e-32,\n 1.18254e-33,\n 2.27694e-35,\n 3.96697e-37,\n 6.2537e-39,\n 8.92042e-41,\n 1.15134e-42,\n 1.3446e-44,\n 1.42087e-46,\n 1.35858e-48,\n 1.1754e-50,\n 9.20148e-53]\n x = np.linspace(-5, 5, 100)\n plt.plot(x, y)\n plt.show()\n int = sum(y) / 10;\n print(int)\n # --- Set Parameters ---\n batchSize = 64\n epochCount = 5000\n filename1 = 'models/sandbox/best_model_linear.h5'\n filename2 = 'models/sandbox/best_model_tscheb.h5'\n\n nwHeight = 8\n nwWidth = 5\n inputDim = 1\n nPts = 5000\n maxIter = 1000\n\n # test Data\n [xTest, yTest] = createTrainingData(nPts * 100, -5, 5, mode=\"linear\")\n # xTest = xTest[1::2]\n # yTest = yTest[1::2]\n\n ### linear data\n [xL, yL] = createTrainingData(maxIter * 3, -5, 5, mode=\"linear\") # samples data between -1 and 1\n [xT, yT] = [xL, yL] # utils.shuffleTrainData(x, y)\n\n model1 = createModelRelu(nwWidth, nwHeight, inputDim)\n # model1.load_weights(filenameInit)\n\n multistepTraining(xL, yL, model1, maxIter, epochCount, batchSize)\n\n return 0\n\n\ndef multistepTraining(xT, yT, model, maxIter, epochs, batchSize):\n filename1 = 'models/sandbox/best_model_linear.h5'\n trainLen = xT.shape[0]\n mc_best = tf.keras.callbacks.ModelCheckpoint(filename1, monitor='loss', mode='min',\n save_best_only=True,\n verbose=2)\n xTList = list(xT)\n yTList = list(yT)\n\n yList = []\n xList = []\n\n ypred = model(xT)\n ypredArray = np.asarray(ypred)\n yDiff = np.linalg.norm(ypredArray - yT, axis=0, ord=2)\n newY = np.amax(yDiff)\n newIdx = np.where(yDiff == newY)[0]\n\n yList.append([yTList.pop(0)])\n yList.append([yTList.pop(-1)])\n xList.append([xTList.pop(0)])\n xList.append([xTList.pop(-1)])\n\n for iter in range(0, maxIter):\n xarr = np.asarray(xList)\n yarr = np.asarray(yList)\n history = model.fit(x=xarr, y=yarr,\n validation_split=0.0,\n epochs=epochs,\n batch_size=batchSize,\n verbose=0)\n\n print(\"Trained on iteration: \" + str(iter))\n\n # Get new data an evaluate current data\n ypred = model(np.asarray(xTList))\n ypredArray = np.asarray(ypred)\n tmp = np.asarray(yTList).reshape(ypredArray.shape)\n yDiff = ypredArray - tmp\n yDiff = np.absolute(yDiff)\n newY = np.amax(yDiff)\n newIdxes = np.where(yDiff == newY)\n newIdx = newIdxes[0]\n\n utils.plot1D(np.asarray(xTList), [np.asarray(yTList), ypredArray, yDiff], [\"y\", \"model\", \"difference\"],\n\n '../models/sandbox/prediction' + str(iter),\n log=False)\n\n # sort points\n\n utils.plot1D(xarr, [yarr], [\"Interpolation points\"],\n '../models/sandbox/datapts' + str(iter),\n log=False, linetypes=['*'])\n\n # print histories\n utils.plot1D(history.epoch, [history.history['loss']],\n [\"model loss\"],\n '../models/sandbox/traininghistory' + str(iter),\n log=True, linetypes=['-', '--'])\n\n yList.append([yTList.pop(newIdx[0])])\n xList.append([xTList.pop(newIdx[0])])\n return 0\n\n\ndef createTrainingData(nPts, a=-1, b=1, mode=\"linear\"):\n if (mode == \"tscheb\"):\n x = np.zeros((nPts,))\n degN = nPts - 1\n for k in range(0, nPts):\n tmp = np.cos((1 + 2 * (degN - k)) / (2 * (degN + 1)) * np.pi)\n x[k] = a + (tmp + 1) / 2 * (b - a)\n\n else: # (mode == \"linear\"):\n x = np.linspace(a, b, nPts)\n\n y = rungeFunc(x)\n\n return [x, y]\n\n\ndef rungeFunc(x):\n return 1 / (1 + x * x)\n\n\ndef quadFunc(x):\n return x * x\n\n\ndef createModel(nwWidth, nwHeight, inputDim): # Build the network:\n\n # basic dense network\n # Define the input\n\n # Weight initializer for sofplus after K Kumar\n input_stddev = np.sqrt((1 / inputDim) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))\n hidden_stddev = np.sqrt((1 / nwWidth) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))\n\n hiddenInitializer = initializers.RandomNormal(mean=0., stddev=hidden_stddev)\n inputLayerInitializer = initializers.RandomNormal(mean=0., stddev=input_stddev)\n # hiddenInitializer = initializers.Zeros()\n # inputLayerInitializer = initializers.Zeros()\n\n biasInitializer = initializers.Zeros()\n\n #### input layer ####\n input_ = keras.Input(shape=(inputDim,))\n hidden = layers.Dense(nwWidth, activation=\"softplus\", kernel_initializer=inputLayerInitializer,\n bias_initializer=biasInitializer)(input_)\n\n # hidden Layer\n for idx in range(0, nwHeight):\n hidden = layers.Dense(nwWidth, activation=\"softplus\", kernel_initializer=hiddenInitializer,\n bias_initializer=biasInitializer)(hidden)\n\n output_ = layers.Dense(1, activation=None, kernel_initializer=inputLayerInitializer,\n bias_initializer=biasInitializer)(hidden)\n\n # Create the model\n model = keras.Model(inputs=[input_], outputs=[output_], name=\"model1\")\n model.summary()\n\n model.compile(loss=\"mean_squared_error\", optimizer='adam', metrics=['mean_absolute_error'])\n\n return model\n\n\ndef createModelRelu(nwWidth, nwHeight, inputDim): # Build the network:\n\n # basic dense network\n # Define the input\n\n # Weight initializer for sofplus after K Kumar\n input_stddev = np.sqrt((1 / inputDim) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))\n hidden_stddev = np.sqrt((1 / nwWidth) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))\n\n hiddenInitializer = initializers.RandomNormal(mean=0., stddev=hidden_stddev)\n inputLayerInitializer = initializers.RandomNormal(mean=0., stddev=input_stddev)\n # hiddenInitializer = initializers.Zeros()\n # inputLayerInitializer = initializers.Zeros()\n\n biasInitializer = initializers.Zeros()\n\n #### input layer ####\n input_ = keras.Input(shape=(inputDim,))\n hidden = layers.Dense(nwWidth, activation=\"softplus\", kernel_initializer=inputLayerInitializer,\n bias_initializer=biasInitializer, kernel_regularizer='l1_l2')(input_)\n\n # hidden Layer\n for idx in range(0, nwHeight):\n hidden = layers.Dense(nwWidth, activation=\"softplus\", kernel_initializer=hiddenInitializer,\n bias_initializer=biasInitializer, kernel_regularizer='l1_l2')(hidden)\n\n output_ = layers.Dense(1, activation=None, kernel_initializer=inputLayerInitializer,\n bias_initializer=biasInitializer, kernel_regularizer='l1_l2')(hidden)\n\n # Create the model\n model = keras.Model(inputs=[input_], outputs=[output_], name=\"model1\")\n model.summary()\n\n model.compile(loss=\"mean_squared_error\", optimizer='adam', metrics=['mean_absolute_error'])\n\n return model\n\n\nif __name__ == '__main__':\n main()\n", "#### MK 4 Networks ####\n'''\nExploration of convex Networks on a simple example\nIt includes the ICNN techniques (Amos et al)\n'''\n\n### This is a script for the training of the\n### Third NN approach\n\n'''\nImprovements:\n1) accepts u as a N-vector\n2) Generalized Loss function\n3) Adapted network layout\n4) RESNet Used as Netowork ( TODO )\n'''\n\nimport csv\nimport multiprocessing\nimport pandas as pd\nfrom joblib import Parallel, delayed\n\n### imports\nimport numpy as np\n# in-project imports\nimport legacyCode.nnUtils as nnUtils\nimport csv\n# Tensorflow\nimport tensorflow as tf\nfrom tensorflow import Tensor\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom tensorflow.keras.constraints import NonNeg\nfrom tensorflow.keras import initializers\n# import tensorflow.keras.backend as K\nimport matplotlib.pyplot as plt\n\nplt.style.use(\"kitish\")\n\n\n# ------ Code starts here --------\n\ndef main():\n # Training Parameters\n batchSize = 5000\n epochCount = 5000\n\n ### Dense Network\n filename = \"legacyCode/models/ConvComparison_fcnn\"\n\n #model = create_modelMK4()\n #model = tf.keras.models.load_model(filename + '/model')\n #model = trainModel(model, filename, batchSize, epochCount)\n # model.load_weights(filename + '/best_model.h5')\n model = tf.keras.models.load_model(filename + '/model')\n\n\n ### Convex Network (nonnegative weights)\n filename = \"legacyCode/models/ConvComparison_nonNeg\"\n\n #model_nonneg = create_modelMK4_nonneg()\n #model_nonneg = tf.keras.models.load_model(filename + '/model')\n #model_nonneg = trainModel(model_nonneg, filename, batchSize, epochCount)\n # model_nonneg.load_weights(filename + '/best_model.h5')\n model_nonneg = tf.keras.models.load_model(filename + '/model')\n\n\n ### Convex Network ICNN architecture\n filename = \"legacyCode/models/ConvComparison_ICNN\"\n\n #model_ICNN = create_modelMK4_ICNN()\n # model_ICNN = trainModel(model_ICNN, filename, batchSize, epochCount)\n # model_nonneg.load_weights(filename + '/best_model.h5')\n model_ICNN = tf.keras.models.load_model(filename + '/model')\n\n # printDerivative(model)\n # printDerivative(model_ICNN)\n evaluateModel(model, model_nonneg, model_ICNN)\n\n # printWeights(model)\n # print(\"----\")\n # printWeights(model_nonneg)\n plt.show()\n return 0\n\n\ndef printDerivative(model):\n x = np.arange(-100.0, 100.0, 0.001)\n y = np.reshape(x,(x.shape[0],1))\n x_model = tf.Variable(y)\n\n\n with tf.GradientTape() as tape:\n # training=True is only needed if there are layers with different\n # behavior during training versus inference (e.g. Dropout).\n predictions = model(x_model, training=False) # same as model.predict(x)\n\n gradients = tape.gradient(predictions, x_model)\n\n # Gradient\n # print(grads)\n\n # plot model predictions and derivatives\n y = createTrainingData(x)\n # plt.plot(x, predictions)\n plt.plot(x, gradients)\n # plt.plot(x, y)\n # plt.plot(x,x)\n\n plt.ylabel('function value')\n plt.xlabel('input value')\n plt.legend(['Model', 'Model Derivative', 'Target Fct', 'Target Derivative'])\n\n plt.show()\n\n return gradients\n\n\ndef printWeights(model):\n for layer in model.layers:\n weights = layer.get_weights() # list of numpy arrays\n print(weights)\n # if weights:\n # plt.plot(weights)\n\n # plt.ylabel('weight value')\n # plt.xlabel('weight index')\n # plt.show()\n\n return 0\n\n\ndef evaluateModel(model, model2, model3):\n x = np.arange(-10, 10, 0.001)\n\n y = createTrainingData(x)\n\n predictions = model.predict(x)\n predictions2 = model2.predict(x)\n predictions3 = model3.predict(x)\n\n plt.plot(x, y)\n plt.plot(x, predictions)\n plt.plot(x, predictions2)\n plt.plot(x, predictions3)\n\n plt.ylabel('function value')\n plt.xlabel('input value')\n # plt.ylim([30.9,31])\n plt.legend(['quadratic function', 'FCNN', 'naive convex', 'ICNN'])\n plt.show()\n\n return 0\n\n\ndef trainModel(model, filename, batchSize, epochCount):\n ### 0) Set variables #######################################################\n\n # Name of modelDirectory\n # filename = \"models/Mk4_nnM_1\"\n filenameAlpha = \"trainingData_M1_alpha.csv\"\n filenameU = \"trainingData_M1_u.csv\"\n\n ### 1) Generate Training Data #############################################\n\n print(\"Create Training Data\")\n # build training data!\n x = np.arange(-5.0, 5.0, 0.0001)\n y = createTrainingData(x)\n\n ### 2) Create Model ########################################################\n # print(\"Create Model\")\n\n # Load weights\n # model.load_weights(filename + '/best_model.h5')\n\n ### 3) Setup Training and Train the model ##################################\n\n # Create Early Stopping callback\n es = EarlyStopping(monitor='loss', mode='min', min_delta=0.000000001, patience=500,\n verbose=10) # loss == custom_loss1dMBPrime by model definition\n mc_best = ModelCheckpoint(filename + '/best_model.h5', monitor='loss', mode='min', save_best_only=True)\n mc_500 = ModelCheckpoint(filename + '/model_quicksave.h5', monitor='loss', mode='min', save_best_only=False,\n save_freq=500)\n\n # Train the model\n print(\"Train Model\")\n history = model.fit(x, y, validation_split=0.01, epochs=epochCount, batch_size=batchSize, verbose=1,\n callbacks=[es, mc_best, mc_500]) # batch size = 900000\n\n # View History\n # nnUtils.print_history(history.history)\n\n ### 4) Save trained model and history ########################################\n\n print(\"Save model and history\")\n nnUtils.save_training(filename, model, history)\n print(\"Training successfully saved\")\n\n # load history\n history1 = nnUtils.load_trainHistory(filename)\n # print history as a check\n # nnUtils.print_history(history1)\n\n print(\"Training Sequence successfully finished\")\n return model\n\n\n### Build the network:\ndef create_modelMK4():\n # Define the input\n weightIniMean = 0.0\n weightIniStdDev = 0.05\n # Number of basis functions used:\n\n # Weight initializer\n initializer = tf.keras.initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=None)\n #### input layer ####\n input_ = keras.Input(shape=(1,))\n # Hidden layers\n # hidden = layers.BatchNormalization()(input_)\n '''\n\n hidden = layers.Dense(3,kernel_constraint=NonNeg(), activation=\"relu\")(input_)\n hidden = layers.Dense(3,kernel_constraint=NonNeg(), activation=\"relu\")(hidden)\n hidden = layers.Dense(3, kernel_constraint=NonNeg(), activation=\"relu\")(hidden)\n\n '''\n hidden = layers.Dense(3, activation=\"softplus\",\n kernel_initializer=initializer,\n bias_initializer='ones')(input_)\n hidden = layers.Dense(3, activation=\"softplus\",\n kernel_initializer=initializer,\n bias_initializer='ones'\n )(hidden)\n hidden = layers.Dense(3, activation=\"softplus\",\n kernel_initializer=initializer,\n bias_initializer='ones'\n )(hidden)\n\n # Define the output\n output_ = layers.Dense(1,\n kernel_initializer=initializer,\n bias_initializer='ones'\n )(hidden)\n\n # Create the model\n model = keras.Model(inputs=[input_], outputs=[output_])\n model.summary()\n\n # model.compile(loss=cLoss_FONC_varD(quadOrder,BasisDegree), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])\n model.compile(loss=\"mean_squared_error\", optimizer='adam', metrics=['mean_absolute_error'])\n return model\n\ndef create_modelMK4_nonneg():\n # Define the input\n weightIniMean = 0.0\n weightIniStdDev = 0.05\n\n # Define LayerDimensions\n layerDim = 3\n\n # Weight initializer\n initializer = tf.keras.initializers.RandomUniform(minval=0, maxval=0.5, seed=None)\n\n input_ = keras.Input(shape=(1,))\n\n # Hidden layers\n # hidden = layers.BatchNormalization()(input_)\n\n hidden = layers.Dense(layerDim, activation=\"softplus\",\n kernel_initializer=initializer,\n bias_initializer='zeros'\n )(input_)\n hidden = layers.Dense(layerDim, kernel_constraint=NonNeg(), activation=\"softplus\",\n kernel_initializer=initializer,\n bias_initializer='zeros'\n )(hidden)\n hidden = layers.Dense(layerDim, kernel_constraint=NonNeg(), activation=\"softplus\",\n kernel_initializer=initializer,\n bias_initializer='zeros'\n )(hidden)\n\n # Define the ouput\n output_ = layers.Dense(1, kernel_constraint=NonNeg(),\n kernel_initializer=initializer,\n bias_initializer='zeros'\n )(hidden)\n\n # Create the model\n model = keras.Model(inputs=[input_], outputs=[output_])\n model.summary()\n\n # model.compile(loss=cLoss_FONC_varD(quadOrder,BasisDegree), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])\n model.compile(loss=\"mean_squared_error\", optimizer='adam', metrics=['mean_absolute_error'])\n return model\n\ndef create_modelMK4_ICNN():\n # Define the input\n weightIniMean = 0.0\n weightIniStdDev = 0.05\n\n # Define LayerDimensions\n # inputDim = 1\n layerDim = 3\n\n # Weight initializer\n initializerNonNeg = tf.keras.initializers.RandomUniform(minval=0, maxval=0.5, seed=None)\n initializer = tf.keras.initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=None)\n\n def convexLayer(layerInput_z: Tensor, netInput_x: Tensor) -> Tensor:\n # Weighted sum of previous layers output plus bias\n weightedNonNegSum_z = layers.Dense(layerDim, kernel_constraint=NonNeg(), activation=None,\n kernel_initializer=initializerNonNeg,\n use_bias=True,\n bias_initializer='zeros'\n # name='in_z_NN_Dense'\n )(layerInput_z)\n # Weighted sum of network input\n weightedSum_x = layers.Dense(layerDim, activation=None,\n kernel_initializer=initializer,\n use_bias=False\n # name='in_x_Dense'\n )(netInput_x)\n # Wz+Wx+b\n intermediateSum = layers.Add()([weightedSum_x, weightedNonNegSum_z])\n\n # activation\n out = tf.keras.activations.softplus(intermediateSum)\n # batch normalization\n # out = layers.BatchNormalization()(out)\n return out\n\n def convexLayerOutput(layerInput_z: Tensor, netInput_x: Tensor) -> Tensor:\n # Weighted sum of previous layers output plus bias\n weightedNonNegSum_z = layers.Dense(1, kernel_constraint=NonNeg(), activation=None,\n kernel_initializer=initializerNonNeg,\n use_bias=True,\n bias_initializer='zeros'\n # name='in_z_NN_Dense'\n )(layerInput_z)\n # Weighted sum of network input\n weightedSum_x = layers.Dense(1, activation=None,\n kernel_initializer=initializer,\n use_bias=False\n # name='in_x_Dense'\n )(netInput_x)\n # Wz+Wx+b\n intermediateSum = layers.Add()([weightedSum_x, weightedNonNegSum_z])\n\n # activation\n out = tf.keras.activations.softplus(intermediateSum)\n # batch normalization\n # out = layers.BatchNormalization()(out)\n return out\n\n # Number of basis functions used:\n\n input_ = keras.Input(shape=(1,))\n\n ### Hidden layers ###\n # First Layer is a std dense layer\n hidden = layers.Dense(3, activation=\"softplus\",\n kernel_initializer=initializer,\n bias_initializer='zeros'\n )(input_)\n # other layers are convexLayers\n hidden = convexLayer(hidden, input_)\n hidden = convexLayer(hidden, input_)\n output_ = convexLayerOutput(hidden, input_) # outputlayer\n\n # Create the model\n model = keras.Model(inputs=[input_], outputs=[output_])\n model.summary()\n\n # model.compile(loss=cLoss_FONC_varD(quadOrder,BasisDegree), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])\n model.compile(loss=\"mean_squared_error\", optimizer='adam', metrics=['mean_absolute_error'])\n return model\n\ndef createTrainingData(x):\n return -0.5 * x * x\n\ndef loadTrainingData():\n filenameU = \"trainingData_M0_u.csv\"\n filenameH = \"trainingData_M0_h.csv\"\n\n # Load Alpha\n f = open(filenameH, 'r')\n hList = list()\n uList = list()\n\n # --- Load moments u ---\n with f:\n reader = csv.reader(f)\n\n for row in reader:\n numRow = []\n for word in row:\n numRow.append(float(word))\n\n hList.append(numRow)\n\n f = open(filenameU, 'r')\n # --- Load entropy values ---\n with f:\n reader = csv.reader(f)\n\n for row in reader:\n numRow = []\n for word in row:\n numRow.append(float(word))\n uList.append(numRow)\n\n return (np.asarray(uList), np.asarray(hList))\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint", "numpy.amax", "numpy.absolute", "numpy.log", "tensorflow.keras.Input", "numpy.linspace", "numpy.asarray", "tensorflow.keras.layers.Dense", "tensorflow.keras.initializers.RandomNormal", "numpy.linalg.norm", "tensorflow.keras.Model", "numpy.cos", "matplotlib.pyplot.plot", "tensorflow.keras.initializers.Zeros", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.style.use", "numpy.zeros" ], [ "tensorflow.keras.models.load_model", "matplotlib.pyplot.legend", "numpy.asarray", "matplotlib.pyplot.plot", "tensorflow.keras.Input", "tensorflow.Variable", "numpy.reshape", "numpy.arange", "tensorflow.keras.layers.Add", "tensorflow.keras.callbacks.EarlyStopping", "matplotlib.pyplot.style.use", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.activations.softplus", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "matplotlib.pyplot.show", "tensorflow.keras.constraints.NonNeg", "tensorflow.GradientTape", "matplotlib.pyplot.ylabel", "tensorflow.keras.initializers.RandomUniform", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
amzkit/nilmtk
[ "9c9add85f3e85ee549412b0a50d46781f7bfbe22" ]
[ "nilmtk/dataset_converters/combed/convert_combed.py" ]
[ "from __future__ import print_function, division\nfrom os.path import join, isdir, dirname, abspath\nfrom os import getcwd\nimport os\nfrom sys import getfilesystemencoding\nfrom inspect import currentframe, getfile, getsourcefile\nfrom collections import OrderedDict\nfrom six import iteritems\n\nimport pandas as pd\nfrom nilm_metadata import convert_yaml_to_hdf5\n\nfrom nilmtk.datastore import Key\nfrom nilmtk.measurement import LEVEL_NAMES\nfrom nilmtk.utils import check_directory_exists, get_datastore\n\n#{\"load_type\": {\"floor/wing\":meter_number_in_nilmtk}\nacad_block_meter_mapping = {'Building Total Mains': {'0': 1},\n 'Lifts': {'0': 2},\n 'Floor Total': {'1': 3, '2': 4, '3': 5, '4': 6, '5': 7},\n 'AHU': {'0': 8, '1': 9, '2': 10, '5': 11},\n 'Light': {'3': 12},\n 'Power Sockets': {'3': 13},\n 'UPS Sockets': {'3': 14}}\n\nlecture_block_meter_mapping = {'Building Total Mains': {'0': 1},\n 'Floor Total': {'0': 2, '1': 3, '2': 4},\n 'AHU': {'1': 5, '2': 6, '3': 7}}\n\noverall_dataset_mapping = OrderedDict({'Academic Block': acad_block_meter_mapping,\n 'Lecture Block': lecture_block_meter_mapping})\n\nbuilding_number_mapping = {'Academic Block': 1, 'Lecture Block': 2}\n\n\ncolumn_mapping = OrderedDict({\n 'Power': ('power', 'active'),\n 'Energy': ('energy', 'active'),\n 'Current': ('current', '')})\n\n\ndef convert_combed(combed_path, output_filename, format='HDF'):\n \"\"\"\n Parameters\n ----------\n combed_path : str\n The root path of the combed dataset.\n output_filename : str\n The destination HDF5 filename (including path and suffix).\n \"\"\"\n\n check_directory_exists(combed_path)\n\n # Open store\n store = get_datastore(output_filename, format, mode='w')\n\n any_file_converted = False\n \n for building_name, building_mapping in iteritems(overall_dataset_mapping):\n for load_name, load_mapping in iteritems(building_mapping):\n for load_mapping_path, meter_number in iteritems(load_mapping):\n building_number = building_number_mapping[building_name]\n key = Key(building=building_number, meter=meter_number)\n dfs = []\n for attribute in column_mapping.keys():\n filename_attribute = join(combed_path, building_name, load_name, load_mapping_path, \"%s.csv\" %attribute)\n if not os.path.isfile(filename_attribute):\n # File not found directly in the combed_path provided\n # Try adding 'iiitd' to it\n filename_attribute = join(combed_path, 'iiitd', building_name, load_name, load_mapping_path, \"%s.csv\" %attribute)\n \n if os.path.isfile(filename_attribute):\n exists = True\n print(filename_attribute)\n df = pd.read_csv(filename_attribute, names=[\"timestamp\", attribute])\n df.index = pd.to_datetime(df[\"timestamp\"], unit='ms')\n df = df.drop(\"timestamp\", 1)\n dfs.append(df)\n else:\n exists = False\n \n if exists:\n total = pd.concat(dfs, axis=1)\n total = total.tz_localize('UTC').tz_convert('Asia/Kolkata')\n total.columns = pd.MultiIndex.from_tuples([column_mapping[x] for x in total.columns])\n total.columns.set_names(LEVEL_NAMES, inplace=True)\n assert total.index.is_unique\n store.put(str(key), total)\n any_file_converted = True\n \n if not any_file_converted:\n raise RuntimeError('No files converted, did you specify the correct path?')\n \n convert_yaml_to_hdf5(join(_get_module_directory(), 'metadata'),\n output_filename)\n\n print(\"Done converting COMBED to HDF5!\")\n\n \ndef _get_module_directory():\n # Taken from http://stackoverflow.com/a/6098238/732596\n path_to_this_file = dirname(getfile(currentframe()))\n if not isdir(path_to_this_file):\n encoding = getfilesystemencoding()\n path_to_this_file = dirname(unicode(__file__, encoding))\n if not isdir(path_to_this_file):\n abspath(getsourcefile(lambda _: None))\n if not isdir(path_to_this_file):\n path_to_this_file = getcwd()\n assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'\n return path_to_this_file\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.to_datetime", "pandas.MultiIndex.from_tuples" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
skailuxspa/OpenCV-Tools
[ "ef0d16533722779e3a3dd40de494e28c1aa1a39e" ]
[ "calibration.py" ]
[ "import numpy as np\r\nimport cv2\r\n\r\ncalibration_state = 0\r\ndebug_state = 1\r\nrunning_state = 2\r\nstate = calibration_state\r\n\r\ncalibration_frame_max = 100\r\ncalibration_frame_current = 0\r\nHmin, Hmax, Hmean, Hstdv = 0, 0, 0, 0\r\nSmin, Smax, Smean, Sstdv = 0, 0, 0, 0\r\nlower_bound, upper_bound = 0, 0\r\n\r\nbeta_1 = 2.5\r\nbeta_2 = 2.5\r\n\r\nchroma_mask = 0\r\n\r\ndef initialize_calibration():\r\n print(\"restarting calibration\")\r\n calibration_frame_max = 100\r\n calibration_frame_current = 0\r\n Hmin, Hmax, Hmean, Hstdv = 0, 0, 0, 0\r\n Smin, Smax, Smean, Sstdv = 0, 0, 0, 0\r\n state = calibration_state\r\n \r\ndef calculate_bounds():\r\n Hmin = np.clip(Hmean - ((beta_1/100) * Hstdv), 0, 255)\r\n Hmax = np.clip(Hmean + ((beta_1/100) * Hstdv), 0, 255)\r\n Smin = np.clip(Smean - ((beta_2/100) * Sstdv), 0, 255)\r\n Smax = np.clip(Smean + ((beta_2/100) * Sstdv), 0, 255)\r\n lower_bound = np.array([Hmin, Smin, 50], dtype=np.uint8)\r\n upper_bound = np.array([Hmax, Smax, 255], dtype=np.uint8)\r\n chroma_mask = cv2.inRange(frame_hsv, lower_bound, upper_bound)\r\n\r\ndef change_b1(x):\r\n print(\"beta 1:\", x)\r\n print(Hmin, Hmax, Hmean, Hstdv)\r\n beta_1 = x\r\n\r\ndef change_b2(x):\r\n print(\"beta 2:\", x)\r\n print(Smin, Smax, Smean, Sstdv)\r\n beta_2 = x\r\n\r\ncv2.namedWindow(\"Sliders\")\r\ncv2.createTrackbar(\"Beta 1\", \"Sliders\", 6, 10, change_b1)\r\ncv2.createTrackbar(\"Beta 2\", \"Sliders\", 6, 10, change_b2)\r\n\r\ncap = cv2.VideoCapture(1)\r\n\r\nwhile(True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n if state is calibration_state:\r\n print(\"Current calibration frame:\", calibration_frame_current)\r\n #split hsv channels\r\n h, s, v = cv2.split(frame_hsv)\r\n\r\n #calculate mean and stdv for current frames h and s channels\r\n buffer_Hmean, buffer_Hstdv = cv2.meanStdDev(h)\r\n buffer_Smean, buffer_Sstdv = cv2.meanStdDev(s)\r\n \r\n #accumulate the buffers\r\n Hmean += buffer_Hmean\r\n Hstdv += buffer_Hstdv\r\n Smean += buffer_Smean\r\n Sstdv += buffer_Sstdv\r\n\r\n calibration_frame_current += 1\r\n if calibration_frame_current is calibration_frame_max - 1:\r\n #calibration algorithm\r\n Hmean = Hmean / calibration_frame_max\r\n Hstdv = Hstdv / calibration_frame_max\r\n Smean = Smean / calibration_frame_max\r\n Sstdv = Sstdv / calibration_frame_max\r\n \r\n Hmin = np.clip(Hmean - (beta_1 * Hstdv), 0, 255)\r\n Hmax = np.clip(Hmean + (beta_1 * Hstdv), 0, 255)\r\n Smin = np.clip(Smean - (beta_2 * Sstdv), 0, 255)\r\n Smax = np.clip(Smean + (beta_2 * Sstdv), 0, 255)\r\n lower_bound = np.array([Hmin, Smin, 0], dtype=np.uint8)\r\n upper_bound = np.array([Hmax, Smax, 255], dtype=np.uint8)\r\n chroma_mask = 255 - cv2.inRange(frame_hsv, lower_bound, upper_bound)\r\n kernel = np.ones((3,3), np.uint8)\r\n chroma_mask = cv2.morphologyEx(chroma_mask, cv2.MORPH_OPEN, kernel)\r\n\r\n #next state change\r\n state = debug_state\r\n print(\"Hmean:\", Hmean, \"Hstdv:\", Hstdv, \"Hmin:\", Hmin, \"Hmax:\", Hmax)\r\n print(\"Smean:\", Smean, \"Sstdv:\", Sstdv, \"Smin:\", Smin, \"Smax:\", Smax)\r\n print(\"going to debug state\")\r\n\r\n elif state is debug_state:\r\n Hmin = np.clip(Hmean - (beta_1 * Hstdv), 0, 255)\r\n Hmax = np.clip(Hmean + (beta_1 * Hstdv), 0, 255)\r\n Smin = np.clip(Smean - (beta_2 * Sstdv), 0, 255)\r\n Smax = np.clip(Smean + (beta_2 * Sstdv), 0, 255)\r\n lower_bound = np.array([Hmin, Smin, 0], dtype=np.uint8)\r\n upper_bound = np.array([Hmax, Smax, 255], dtype=np.uint8)\r\n chroma_mask = 255 - cv2.inRange(frame_hsv, lower_bound, upper_bound)\r\n kernel = np.ones((3,3), np.uint8)\r\n chroma_mask = cv2.morphologyEx(chroma_mask, cv2.MORPH_OPEN, kernel)\r\n #chroma_mask = cv2.erode(chroma_mask, kernel, iterations = 1)\r\n# elif state is running_state:\r\n\r\n if state is calibration_state:\r\n cv2.imshow(\"asdf\", frame)\r\n elif state is debug_state:\r\n calibrated_frame = cv2.bitwise_and(frame, frame, mask=chroma_mask)\r\n cv2.imshow(\"asdf\", calibrated_frame)\r\n cv2.imshow(\"mask\", chroma_mask)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('c'):\r\n #restarting calibration\r\n print(\"restarting calibration\")\r\n calibration_frame_max = 100\r\n calibration_frame_current = 0\r\n Hmin, Hmax, Hmean, Hstdv = 0, 0, 0, 0\r\n Smin, Smax, Smean, Sstdv = 0, 0, 0, 0\r\n state = calibration_state\r\n #initialize_calibration()\r\n\r\n # Quit the thing\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# When everything done, release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()" ]
[ [ "numpy.ones", "numpy.array", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]