repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
nbortych/AEVNMT.pt
[ "360a885a3bab9212a67922f3a9bb44f26c9512fc" ]
[ "aevnmt/generate.py" ]
[ "import torch\nimport numpy as np\nimport sys\nimport time\n\nfrom aevnmt.hparams import Hyperparameters\nfrom aevnmt.data import TextDataset, RawInputTextDataset, remove_subword_tokens, postprocess\nfrom aevnmt.train_monolingual import create_model\nfrom aevnmt.train_utils import load_vocabularies, load_vocabularies_monolingual , compute_bleu\nfrom aevnmt.data.datasets import InputTextDataset\nfrom aevnmt.data.textprocessing import SentenceSplitter\nfrom aevnmt.data.textprocessing import Pipeline\nfrom aevnmt.data.textprocessing import Tokenizer, Detokenizer\nfrom aevnmt.data.textprocessing import Lowercaser, Truecaser, Recaser\nfrom aevnmt.data.textprocessing import WordSegmenter, WordDesegmenter\n\nfrom torch.utils.data import DataLoader\nfrom pathlib import Path\n\n\nclass GenerationEngine:\n\n def __init__(self, hparams):\n\n output_dir = Path(hparams.output_dir)\n verbose = hparams.verbose\n\n if hparams.vocab.prefix is None:\n hparams.vocab.prefix = str(output_dir / \"vocab\")\n hparams.vocab.shared = False\n\n # Select the correct device (GPU or CPU).\n device = torch.device(\"cuda:0\") if hparams.use_gpu else torch.device(\"cpu\")\n\n # Pre/post-processing\n if hparams.tokenize:\n src_tokenizer_lang = hparams.src\n else:\n src_tokenizer_lang = None\n\n if hparams.lowercase and hparams.truecaser_prefix:\n raise ValueError(\"You cannot use lowercasing and truecasing at the same time\")\n\n model_checkpoint = output_dir / f\"model/{hparams.criterion}/{hparams.src}.pt\"\n\n self.hparams = hparams\n self.verbose = verbose\n self.device = device\n self.model_checkpoint = model_checkpoint\n self.src_tokenizer_lang = src_tokenizer_lang\n self.pipeline = Pipeline()\n self.vocab_src = None\n self.model = None\n self.translate_fn = None\n self.n_translated = 0\n\n @staticmethod\n def make_pipeline(hparams):\n # Loading pre/post-processing models\n if hparams.verbose:\n print(\"Loading pre/post-processing models\", file=sys.stderr)\n\n postprocess = []\n\n # Tokenization\n if hparams.detokenize:\n postprocess.append(Detokenizer(hparams.tgt))\n\n # Case\n if hparams.lowercase and hparams.truecaser_prefix:\n raise ValueError(\"You cannot set --lowercase to true and provide a --truecaser_prefix at the same time\")\n\n if hparams.recase:\n postprocess.append(Recaser(hparams.tgt))\n\n # Word segmentation\n if hparams.bpe.merge:\n postprocess.append(WordDesegmenter(separator=hparams.subword_token))\n\n return Pipeline(pre=[], post=list(reversed(postprocess)))\n\n def load_statics(self):\n # Loading vocabulary\n if self.verbose:\n t0 = time.time()\n print(f\"Loading vocabularies src={self.hparams.src} tgt={self.hparams.tgt}\", file=sys.stderr)\n self.vocab_src = load_vocabularies_monolingual(self.hparams)\n\n # Load pre/post processing models and configure a pipeline\n self.pipeline = GenerationEngine.make_pipeline(self.hparams)\n\n if self.verbose:\n print(f\"Restoring model selected wrt {self.hparams.criterion} from {self.model_checkpoint}\", file=sys.stderr)\n\n model, _, _, translate_fn = create_model(self.hparams, self.vocab_src)\n\n if self.hparams.use_gpu:\n model.load_state_dict(torch.load(self.model_checkpoint))\n else:\n model.load_state_dict(torch.load(self.model_checkpoint, map_location='cpu'))\n\n self.model = model.to(self.device)\n self.translate_fn = translate_fn\n self.model.eval()\n if self.verbose:\n print(\"Done loading in %.2f seconds\" % (time.time() - t0), file=sys.stderr)\n\n def generate(self,lines , num_samples: int, stdout=sys.stdout):\n hparams = self.hparams\n batch_size=hparams.batch_size\n\n # Translate the data.\n num_translated = 0\n all_hypotheses = []\n\n if lines is not None:\n #Use inference network to obtain latent codes from input sentences\n if hparams.split_sentences: # This is a type of pre-processing we do not a post-processing counterpart for\n if hparams.verbose:\n print(f\"Running sentence splitter for {len(lines)} lines\")\n lines = SentenceSplitter(hparams.src).split(lines)\n if hparams.verbose:\n print(f\"Produced {len(lines)} sentences\")\n input_data = InputTextDataset(\n generator=(self.pipeline.pre(line) for line in lines),\n max_length=hparams.max_sentence_length,\n split=True)\n input_dl = DataLoader(\n input_data, batch_size=hparams.batch_size,\n shuffle=False, num_workers=4)\n input_size = len(input_data)\n\n for input_sentences in input_dl:\n\n # Sort the input sentences from long to short.\n input_sentences = np.array(input_sentences)\n seq_len = np.array([len(s.split()) for s in input_sentences])\n sort_keys = np.argsort(-seq_len)\n input_sentences = input_sentences[sort_keys]\n\n t1 = time.time()\n # Translate the sentences using the trained model.\n hypotheses = self.translate_fn(\n self.model, input_sentences,None,\n self.vocab_src,\n self.device, hparams)\n\n num_translated += len(input_sentences)\n\n # Restore the original ordering.\n inverse_sort_keys = np.argsort(sort_keys)\n all_hypotheses += hypotheses[inverse_sort_keys].tolist()\n\n if self.verbose:\n print(f\"{num_translated}/{input_size} sentences translated in {time.time() - t1:.2f} seconds.\", file=sys.stderr)\n\n else:\n input_size=num_samples\n #Sample from the prior to obtain latent codes\n if self.verbose:\n print(f\"Sampling {num_samples} sentences...\", file=sys.stderr)\n\n num_batches=num_samples//batch_size\n if num_samples % batch_size > 0:\n num_batches+=1\n\n for batch_idx in range(num_batches):\n local_batch_size=batch_size\n if batch_idx == num_batches -1 and num_samples % batch_size > 0:\n local_batch_size=num_samples % batch_size\n\n t1 = time.time()\n # Translate the sentences using the trained model.\n hypotheses = self.translate_fn(\n self.model, None,local_batch_size,\n self.vocab_src,\n self.device, hparams)\n\n num_translated += local_batch_size\n\n # Restore the original ordering.\n all_hypotheses += hypotheses.tolist()\n\n if self.verbose:\n print(f\"{num_translated}/{num_samples} sentences translated in {time.time() - t1:.2f} seconds.\", file=sys.stderr)\n\n if hparams.show_raw_output:\n for i in range(input_size):\n print(i + self.n_translated, '|||' '|||', all_hypotheses[i], file=sys.stderr)\n\n # Post-processing\n all_hypotheses = [self.pipeline.post(h) for h in all_hypotheses]\n\n if stdout is not None:\n for hypothesis in all_hypotheses:\n print(hypothesis, file=stdout)\n\n self.n_translated += input_size\n\n return all_hypotheses\n\n def generate_file(self, input_path=None, output_path=None, num_samples=100, stdout=None):\n if output_path is None:\n stdout = sys.stdout\n\n if input_path is not None:\n with open(input_path) as f:\n translations = self.generate(lines=f.readlines(), num_samples=num_samples, stdout=stdout)\n else:\n translations = self.generate(lines=None,num_samples=num_samples, stdout=stdout)\n\n # If an output file is given write the output to that file.\n if output_path is not None:\n if self.verbose:\n print(f\"\\nWriting translation output to {output_path}\", file=sys.stderr)\n with open(output_path, \"w\") as f:\n for translation in translations:\n f.write(f\"{translation}\\n\")\n\n\n\ndef main(hparams=None):\n # Load command line hyperparameters (and if provided from an hparams_file).\n if hparams is None:\n if \"--hparams_file\" not in sys.argv:\n # TODO This is added to prevent incorrect overriding of arguments, see Issue #14\n # When resolved, hparams.update_from_file can be used instead.\n output_dir = Path(sys.argv[sys.argv.index(\"--output_dir\") + 1])\n hparams_file = str(output_dir / \"hparams\")\n sys.argv = [sys.argv[0]] + ['--hparams_file', hparams_file] + sys.argv[1:]\n hparams = Hyperparameters(check_required=False)\n\n engine = GenerationEngine(hparams)\n\n engine.load_statics()\n\n #if hparams.translation.interactive > 0:\n # if hparams.translation.interactive == 1:\n # engine.interactive_translation()\n # else:\n # engine.interactive_translation_n(wait_for=hparams.translation.interactive)\n #elif hparams.translation.input_file == '-':\n # engine.translate_stdin()\n #else:\n # if hparams.translation.ref_file and hparams.split_sentences:\n # raise ValueError(\"If you enable sentence splitting you will compromise line-alignment with the reference\")\n # engine.translate_file(\n # input_path=hparams.translation.input_file,\n # output_path=hparams.translation.output_file,\n # reference_path=hparams.translation.ref_file\n # )\n\n if hparams.translation.input_file is not None and hparams.translation.num_prior_samples is not None:\n raise ValueError(\"If you specify an input file, you cannot sample from the prior\")\n if hparams.translation.input_file is None and hparams.translation.num_prior_samples is None:\n raise ValueError(\"You must specify either an input file or a number of prior samples\")\n engine.generate_file(input_path=hparams.translation.input_file,output_path=hparams.translation.output_file,num_samples=hparams.translation.num_prior_samples)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "numpy.argsort", "numpy.array", "torch.device" ] ]
MuhammadFadhilArkan/gym-pybullet-drones
[ "b0670e4b0289210954632c6919ff9b5dc740a61f" ]
[ "experiments/performance/script.py" ]
[ "\"\"\"Only used to spawn multiple simulations and evaluate performance.\n\nThis script is similar to `examples/fly.py` and used by `parallelism.sh`.\n\n\"\"\"\nimport os\nimport time\nimport argparse\nfrom datetime import datetime\nimport pdb\nimport math\nimport random\nimport numpy as np\nimport pybullet as p\nimport matplotlib.pyplot as plt\n\nfrom gym_pybullet_drones.envs.BaseAviary import DroneModel, Physics\nfrom gym_pybullet_drones.envs.CtrlAviary import CtrlAviary\nfrom gym_pybullet_drones.envs.VisionAviary import VisionAviary\nfrom gym_pybullet_drones.control.DSLPIDControl import DSLPIDControl\nfrom gym_pybullet_drones.utils.Logger import Logger\nfrom gym_pybullet_drones.utils.utils import sync, str2bool\n\nif __name__ == \"__main__\":\n\n #### Define and parse (optional) arguments for the script ##\n parser = argparse.ArgumentParser(description='Helix flight script using CtrlAviary or VisionAviary and DSLPIDControl')\n parser.add_argument('--drone', default=\"cf2x\", type=DroneModel, help='Drone model (default: CF2X)', metavar='', choices=DroneModel)\n parser.add_argument('--num_drones', default=3, type=int, help='Number of drones (default: 3)', metavar='')\n parser.add_argument('--physics', default=\"pyb\", type=Physics, help='Physics updates (default: PYB)', metavar='', choices=Physics)\n parser.add_argument('--vision', default=False, type=str2bool, help='Whether to use VisionAviary (default: False)', metavar='')\n parser.add_argument('--gui', default=True, type=str2bool, help='Whether to use PyBullet GUI (default: True)', metavar='')\n parser.add_argument('--record_video', default=False, type=str2bool, help='Whether to record a video (default: False)', metavar='')\n parser.add_argument('--plot', default=True, type=str2bool, help='Whether to plot the simulation results (default: True)', metavar='')\n parser.add_argument('--user_debug_gui', default=False, type=str2bool, help='Whether to add debug lines and parameters to the GUI (default: False)', metavar='')\n parser.add_argument('--aggregate', default=False, type=str2bool, help='Whether to aggregate physics steps (default: False)', metavar='')\n parser.add_argument('--obstacles', default=True, type=str2bool, help='Whether to add obstacles to the environment (default: True)', metavar='')\n parser.add_argument('--simulation_freq_hz', default=240, type=int, help='Simulation frequency in Hz (default: 240)', metavar='')\n parser.add_argument('--control_freq_hz', default=48, type=int, help='Control frequency in Hz (default: 48)', metavar='')\n parser.add_argument('--duration_sec', default=5, type=int, help='Duration of the simulation in seconds (default: 5)', metavar='')\n ARGS = parser.parse_args()\n\n #### Initialize the simulation #############################\n H = .1\n H_STEP = .05\n R = .3\n INIT_XYZS = np.array([[R*np.cos((i/6)*2*np.pi+np.pi/2), R*np.sin((i/6)*2*np.pi+np.pi/2)-R, H+i*H_STEP] for i in range(ARGS.num_drones)])\n AGGR_PHY_STEPS = int(ARGS.simulation_freq_hz/ARGS.control_freq_hz) if ARGS.aggregate else 1\n\n #### Create the environment with or without video capture ##\n if ARGS.vision:\n env = VisionAviary(drone_model=ARGS.drone,\n num_drones=ARGS.num_drones,\n initial_xyzs=INIT_XYZS,\n physics=ARGS.physics,\n neighbourhood_radius=10,\n freq=ARGS.simulation_freq_hz,\n aggregate_phy_steps=AGGR_PHY_STEPS,\n gui=ARGS.gui,\n record=ARGS.record_video,\n obstacles=ARGS.obstacles\n )\n else: \n env = CtrlAviary(drone_model=ARGS.drone,\n num_drones=ARGS.num_drones,\n initial_xyzs=INIT_XYZS,\n physics=ARGS.physics,\n neighbourhood_radius=10,\n freq=ARGS.simulation_freq_hz,\n aggregate_phy_steps=AGGR_PHY_STEPS,\n gui=ARGS.gui,\n record=ARGS.record_video,\n obstacles=ARGS.obstacles,\n user_debug_gui=ARGS.user_debug_gui\n )\n\n #### Initialize a circular trajectory ######################\n PERIOD = 10\n NUM_WP = ARGS.control_freq_hz*PERIOD\n TARGET_POS = np.zeros((NUM_WP, 3))\n for i in range(NUM_WP):\n TARGET_POS[i, :] = R*np.cos((i/NUM_WP)*(2*np.pi)+np.pi/2)+INIT_XYZS[0, 0], R*np.sin((i/NUM_WP)*(2*np.pi)+np.pi/2)-R+INIT_XYZS[0, 1], INIT_XYZS[0, 2]\n wp_counters = np.array([int((i*NUM_WP/6)%NUM_WP) for i in range(ARGS.num_drones)])\n\n #### Initialize the logger #################################\n logger = Logger(logging_freq_hz=int(ARGS.simulation_freq_hz/AGGR_PHY_STEPS),\n num_drones=ARGS.num_drones\n )\n\n #### Initialize the controllers ############################\n ctrl = [DSLPIDControl(drone_model=ARGS.drone) for i in range(ARGS.num_drones)]\n\n #### Run the simulation ####################################\n CTRL_EVERY_N_STEPS= int(np.floor(env.SIM_FREQ/ARGS.control_freq_hz))\n action = {str(i): np.array([0, 0, 0, 0]) for i in range(ARGS.num_drones)}\n START = time.time()\n for i in range(0, int(ARGS.duration_sec*env.SIM_FREQ), AGGR_PHY_STEPS):\n\n #### Step the simulation ###################################\n obs, reward, done, info = env.step(action)\n\n #### Compute control at the desired frequency @@@@@#########\n if i%CTRL_EVERY_N_STEPS == 0:\n\n #### Compute control for the current way point #############\n for j in range(ARGS.num_drones):\n action[str(j)], _, _ = ctrl[j].computeControlFromState(control_timestep=CTRL_EVERY_N_STEPS*env.TIMESTEP,\n state=obs[str(j)][\"state\"],\n target_pos=np.hstack([TARGET_POS[wp_counters[j], 0:2], H+j*H_STEP])\n )\n\n #### Go to the next way point and loop #####################\n for j in range(ARGS.num_drones):\n wp_counters[j] = wp_counters[j] + 1 if wp_counters[j] < (NUM_WP-1) else 0\n\n #### Log the simulation ####################################\n for j in range(ARGS.num_drones):\n logger.log(drone=j,\n timestamp=i/env.SIM_FREQ,\n state= obs[str(j)][\"state\"],\n control=np.hstack([TARGET_POS[wp_counters[j], 0:2], H+j*H_STEP, np.zeros(9)])\n )\n\n #### Printout ##############################################\n if i%env.SIM_FREQ == 0:\n env.render()\n #### Print matrices with the images captured by each drone #\n if ARGS.vision:\n for j in range(ARGS.num_drones): \n print(obs[str(j)][\"rgb\"].shape, np.average(obs[str(j)][\"rgb\"]),\n obs[str(j)][\"dep\"].shape, np.average(obs[str(j)][\"dep\"]),\n obs[str(j)][\"seg\"].shape, np.average(obs[str(j)][\"seg\"])\n )\n\n #### Sync the simulation ###################################\n if ARGS.gui:\n sync(i, START, env.TIMESTEP)\n\n #### Close the environment #################################\n env.close()\n\n #### Save the simulation results ###########################\n logger.save()\n\n #### Plot the simulation results ###########################\n if ARGS.plot:\n logger.plot()\n" ]
[ [ "numpy.zeros", "numpy.floor", "numpy.cos", "numpy.hstack", "numpy.array", "numpy.sin" ] ]
AbhilashReddyM/curvpack
[ "74351624ec9ec50ec4445c7be85a48a4eabb029a" ]
[ "curvpack/CurvatureISF.py" ]
[ "\r\n\"\"\"\r\nAbhilash Reddy Malipeddi. January 2017\r\nCalculate the mean and gaussian curvature at a vertex in a tri mesh using\r\nusing an iterative fitting method similar to what is given in [Garimella and Swartz],\r\n[Yazdani and Bagchi], etc.\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom numpy.linalg import lstsq\r\nfrom .utils import triangle_neighbors,GetVertexNormals,get_surf_neighbors,fastcross,normr\r\n\r\n\r\ndef CurvatureISF1(vertices,faces):\r\n '''\r\n This uses a two-ring neighborhood around a point. \r\n '''\r\n tol=1e-10\r\n \r\n npt=vertices.shape[0]\r\n neighbor_tri=triangle_neighbors(faces,npt)\r\n\r\n neighbor_verts= np.array([get_surf_neighbors(faces,neighbor_tri, k)\r\n for k in range(npt)])\r\n\r\n e0=vertices[faces[:,2]]-vertices[faces[:,1]]\r\n e1=vertices[faces[:,0]]-vertices[faces[:,2]]\r\n e2=vertices[faces[:,1]]-vertices[faces[:,0]]\r\n\r\n e0_norm=normr(e0)\r\n e1_norm=normr(e1)\r\n e2_norm=normr(e2)\r\n\r\n FaceNormals=0.5*fastcross(e0,e1)\r\n VN=GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2)\r\n up = np.zeros(vertices.shape)\r\n #Calculate initial coordinate system\r\n up[faces[:,0]]=e2_norm\r\n up[faces[:,1]]=e0_norm\r\n up[faces[:,2]]=e1_norm\r\n\r\n #Calculate initial vertex coordinate system\r\n up=fastcross(VN,up)\r\n up=normr(up)\r\n vp=fastcross(up,VN)\r\n vp=normr(vp)\r\n\r\n qj=np.zeros([30,3])\r\n A =np.zeros([36,5])\r\n B =np.zeros([36,1])\r\n\r\n H=np.zeros(npt)\r\n K=np.zeros(npt)\r\n\r\n\r\n for i in range(npt):\r\n n1=up[i]\r\n n2=vp[i]\r\n n3=VN[i]\r\n\r\n nbrs=np.unique(np.hstack(neighbor_verts[neighbor_verts[i]].flat))\r\n nbrs=np.setdiff1d(nbrs,i)\r\n\r\n for _ in range(30):\r\n for j,pj in enumerate(vertices[nbrs]):\r\n qj[j]=np.array([np.dot(pj-vertices[i],n1),\r\n np.dot(pj-vertices[i],n2),\r\n np.dot(pj-vertices[i],n3)])\r\n j=0\r\n k=0\r\n for (x,y,z) in qj:\r\n k+=1\r\n if k==len(nbrs):\r\n break\r\n scale = 2/(x**2+y**2)\r\n A[j] = scale*np.array([ x**2, x*y, y**2, x, y])\r\n B[j] = scale*z\r\n j+=1\r\n\r\n X=lstsq(A[:len(nbrs),:],B[:len(nbrs)],rcond=None)\r\n\r\n a,b,c,d,e=X[0]\r\n\r\n factor=1.0/np.sqrt(1.0+d[0]**2+e[0]**2)\r\n oldn3=n3.copy()\r\n n3=factor*np.array([-d[0],-e[0],1.0])\r\n\r\n n3=np.c_[n1,n2,oldn3].dot(n3)#new normal in local coordinates\r\n VN[i]=n3 #new normal in global coordinates. up,vp,VN system is not orthogonal anymore, but that is okay as it is not used again\r\n n2=np.cross(n1,n3)\r\n n2=n2/np.linalg.norm(n2)\r\n n1=np.cross(n3,n2)\r\n n1=n1/np.linalg.norm(n1)\r\n\r\n H[i]=factor**3*(a+c+a*e**2+c*d**2-b*d*e)\r\n K[i]=factor**4*(4*a*c-b**2)\r\n if np.linalg.norm(n3-oldn3) <tol:\r\n break\r\n return K,-H,VN\r\n\r\ndef CurvatureISF2(vertices,faces):\r\n '''\r\n This is a slight modification of the previous. Here we only use the one ring\r\n but we include the vertex normals in the fitting procedure. This indirectly has\r\n two ring support because the vertex normals themselves are calculated\r\n as a weighted average of the face normals. Sidenote: I wonder what happens if we include both\r\n vertex and face normals in the fitting procedure....\r\n '''\r\n tol=1e-10\r\n npt=vertices.shape[0]\r\n neighbor_tri=triangle_neighbors(faces,npt)\r\n\r\n neighbor_verts= np.array([get_surf_neighbors(faces,neighbor_tri, k)\r\n for k in range(npt)])\r\n\r\n e0=vertices[faces[:,2]]-vertices[faces[:,1]]\r\n e1=vertices[faces[:,0]]-vertices[faces[:,2]]\r\n e2=vertices[faces[:,1]]-vertices[faces[:,0]]\r\n\r\n e0_norm=normr(e0)\r\n e1_norm=normr(e1)\r\n e2_norm=normr(e2)\r\n\r\n FaceNormals=0.5*fastcross(e0,e1)\r\n VN=GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2)\r\n up = np.zeros(vertices.shape)\r\n #Calculate initial coordinate system\r\n up[faces[:,0]]=e2_norm\r\n up[faces[:,1]]=e0_norm\r\n up[faces[:,2]]=e1_norm\r\n\r\n #Calculate initial vertex coordinate system\r\n up=fastcross(VN,up)\r\n up=normr(up)\r\n vp=fastcross(up,VN)\r\n vp=normr(vp)\r\n\r\n qj=np.zeros([12,5])\r\n A =np.zeros([36,5])\r\n B =np.zeros([36,1])\r\n\r\n H=np.zeros(npt)\r\n K=np.zeros(npt)\r\n VNnew=np.zeros_like(VN)\r\n\r\n for i in range(npt):\r\n n1=up[i]\r\n n2=vp[i]\r\n n3=VN[i]\r\n for iter in range(30):\r\n for j,(pj,nj) in enumerate(zip(vertices[neighbor_verts[i]],VN[neighbor_verts[i]])):\r\n qj[j]=np.array([np.dot(pj-vertices[i],n1),\r\n np.dot(pj-vertices[i],n2),\r\n np.dot(pj-vertices[i],n3),\r\n -np.dot(nj,n1)/np.dot(nj,n3),\r\n -np.dot(nj,n2)/np.dot(nj,n3)])\r\n j=0\r\n k=0\r\n for (x,y,z,nx,ny) in qj:\r\n k+=1\r\n if k==len(neighbor_verts[i]):\r\n break\r\n scale=2/(x**2+y**2)\r\n A[j] = scale*np.array([ x**2, x*y, y**2, x, y])\r\n A[j+1] = scale*np.array([ 2*x, y, 0, 1, 0])\r\n A[j+2] = scale*np.array([ 0, x, 2*y, 0, 1])\r\n B[j] = scale*z\r\n B[j+1] = scale*nx\r\n B[j+2] = scale*ny\r\n j+=3\r\n\r\n X=lstsq(A[:3*len(neighbor_verts[i]),:],B[:3*len(neighbor_verts[i])],rcond=None)\r\n a,b,c,d,e=X[0]\r\n factor=1.0/np.sqrt(1.0+d[0]**2+e[0]**2)\r\n H[i]=factor**3*(a+c+a*e**2+c*d**2-b*d*e)\r\n K[i]=factor**4*(4*a*c-b**2)\r\n\r\n oldn3=n3.copy()\r\n n3=factor*np.array([-d[0],-e[0],1.0])#new normal in local coordinates\r\n n3=np.c_[n1,n2,oldn3].dot(n3) #new normal in global coordinates\r\n n2=np.cross(n1,n3)\r\n n2=n2/np.linalg.norm(n2)\r\n n1=np.cross(n3,n2)\r\n n1=n1/np.linalg.norm(n1)\r\n\r\n\r\n if np.linalg.norm(n3-oldn3) <tol:\r\n up[i]=n1\r\n vp[i]=n2\r\n VN[i]=n3\r\n break\r\n return K,-H,VN\r\n\r\ndef CurvatureISF3(vertices,faces):\r\n '''\r\n Uses two ring vertices and normals.\r\n '''\r\n tol=1e-10\r\n npt=vertices.shape[0]\r\n neighbor_tri=triangle_neighbors(faces,npt)\r\n\r\n neighbor_verts= np.array([get_surf_neighbors(faces,neighbor_tri, k)\r\n for k in range(npt)])\r\n\r\n e0=vertices[faces[:,2]]-vertices[faces[:,1]]\r\n e1=vertices[faces[:,0]]-vertices[faces[:,2]]\r\n e2=vertices[faces[:,1]]-vertices[faces[:,0]]\r\n\r\n e0_norm=normr(e0)\r\n e1_norm=normr(e1)\r\n e2_norm=normr(e2)\r\n\r\n FaceNormals=0.5*fastcross(e0,e1)\r\n VN=GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2)\r\n up = np.zeros(vertices.shape)\r\n #Calculate initial coordinate system\r\n up[faces[:,0]]=e2_norm\r\n up[faces[:,1]]=e0_norm\r\n up[faces[:,2]]=e1_norm\r\n\r\n #Calculate initial vertex coordinate system\r\n up=fastcross(VN,up)\r\n up=normr(up)\r\n vp=fastcross(up,VN)\r\n vp=normr(vp)\r\n\r\n qj=np.zeros([100,5])\r\n A =np.zeros([200,5])\r\n B =np.zeros([200,1])\r\n\r\n H=np.zeros(npt)\r\n K=np.zeros(npt)\r\n VNnew=np.zeros_like(VN)\r\n\r\n for i in range(npt):\r\n n1=up[i]\r\n n2=vp[i]\r\n n3=VN[i]\r\n nbrs=np.unique(np.hstack(neighbor_verts[neighbor_verts[i]].flat))\r\n nbrs=np.setdiff1d(nbrs,i)\r\n\r\n for iter in range(30):\r\n for j,(pj,nj) in enumerate(zip(vertices[nbrs],VN[nbrs])):\r\n qj[j]=np.array([np.dot(pj-vertices[i],n1),\r\n np.dot(pj-vertices[i],n2),\r\n np.dot(pj-vertices[i],n3),\r\n -np.dot(nj,n1)/np.dot(nj,n3),\r\n -np.dot(nj,n2)/np.dot(nj,n3)])\r\n j=0\r\n k=0\r\n for (x,y,z,nx,ny) in qj:\r\n k+=1\r\n if k==len(nbrs):\r\n break\r\n scale=2/(x**2+y**2)\r\n A[j] = scale*np.array([ x**2, x*y, y**2, x, y])\r\n A[j+1] = scale*np.array([ 2*x, y, 0, 1, 0])\r\n A[j+2] = scale*np.array([ 0, x, 2*y, 0, 1])\r\n B[j] = scale*z\r\n B[j+1] = scale*nx\r\n B[j+2] = scale*ny\r\n j+=3\r\n\r\n X=lstsq(A[:3*len(nbrs),:],B[:3*len(nbrs)],rcond=None)\r\n a,b,c,d,e=X[0]\r\n factor=1.0/np.sqrt(1.0+d[0]**2+e[0]**2)\r\n H[i]=factor**3*(a+c+a*e**2+c*d**2-b*d*e)\r\n K[i]=factor**4*(4*a*c-b**2)\r\n\r\n oldn3=n3.copy()\r\n n3=factor*np.array([-d[0],-e[0],1.0])#new normal in local coordinates\r\n n3=np.c_[n1,n2,oldn3].dot(n3) #new normal in global coordinates\r\n n2=np.cross(n1,n3)\r\n n2=n2/np.linalg.norm(n2)\r\n n1=np.cross(n3,n2)\r\n n1=n1/np.linalg.norm(n1)\r\n\r\n if np.linalg.norm(n3-oldn3) <tol:\r\n up[i]=n1\r\n vp[i]=n2\r\n VN[i]=n3\r\n break\r\n return K,-H,VN\r\n" ]
[ [ "numpy.zeros_like", "numpy.linalg.norm", "numpy.zeros", "numpy.setdiff1d", "numpy.cross", "numpy.hstack", "numpy.sqrt", "numpy.dot", "numpy.array" ] ]
Khushbu0610/allennlp
[ "60deece9fca2da6b66bfcde44484384bdefa3fe7" ]
[ "allennlp/modules/token_embedders/pretrained_transformer_embedder.py" ]
[ "import math\nfrom typing import Optional, Tuple\n\nfrom overrides import overrides\n\nimport torch\nimport torch.nn.functional as F\nfrom transformers import XLNetConfig\n\nfrom allennlp.data.tokenizers import PretrainedTransformerTokenizer\nfrom allennlp.modules.token_embedders.token_embedder import TokenEmbedder\nfrom allennlp.nn.util import batched_index_select\n\n\[email protected](\"pretrained_transformer\")\nclass PretrainedTransformerEmbedder(TokenEmbedder):\n \"\"\"\n Uses a pretrained model from `transformers` as a `TokenEmbedder`.\n\n Registered as a `TokenEmbedder` with name \"pretrained_transformer\".\n\n # Parameters\n\n model_name : `str`\n The name of the `transformers` model to use. Should be the same as the corresponding\n `PretrainedTransformerIndexer`.\n max_length : `int`, optional (default = `None`)\n If positive, folds input token IDs into multiple segments of this length, pass them\n through the transformer model independently, and concatenate the final representations.\n Should be set to the same value as the `max_length` option on the\n `PretrainedTransformerIndexer`.\n sub_module: `str`, optional (default = `None`)\n The name of a submodule of the transformer to be used as the embedder. Some transformers naturally act\n as embedders such as BERT. However, other models consist of encoder and decoder, in which case we just\n want to use the encoder.\n train_parameters: `bool`, optional (default = `True`)\n If this is `True`, the transformer weights get updated during training.\n \"\"\"\n\n def __init__(\n self,\n model_name: str,\n *,\n max_length: int = None,\n sub_module: str = None,\n train_parameters: bool = True,\n override_weights_file: Optional[str] = None,\n override_weights_strip_prefix: Optional[str] = None\n ) -> None:\n super().__init__()\n from allennlp.common import cached_transformers\n\n self.transformer_model = cached_transformers.get(\n model_name, True, override_weights_file, override_weights_strip_prefix\n )\n self.config = self.transformer_model.config\n if sub_module:\n assert hasattr(self.transformer_model, sub_module)\n self.transformer_model = getattr(self.transformer_model, sub_module)\n self._max_length = max_length\n # I'm not sure if this works for all models; open an issue on github if you find a case\n # where it doesn't work.\n self.output_dim = self.config.hidden_size\n\n tokenizer = PretrainedTransformerTokenizer(model_name)\n self._num_added_start_tokens = len(tokenizer.single_sequence_start_tokens)\n self._num_added_end_tokens = len(tokenizer.single_sequence_end_tokens)\n self._num_added_tokens = self._num_added_start_tokens + self._num_added_end_tokens\n\n if not train_parameters:\n for param in self.transformer_model.parameters():\n param.requires_grad = False\n\n @overrides\n def get_output_dim(self):\n return self.output_dim\n\n def _number_of_token_type_embeddings(self):\n if isinstance(self.config, XLNetConfig):\n return 3 # XLNet has 3 type ids\n elif hasattr(self.config, \"type_vocab_size\"):\n return self.config.type_vocab_size\n else:\n return 0\n\n @overrides\n def forward(\n self,\n token_ids: torch.LongTensor,\n mask: torch.BoolTensor,\n type_ids: Optional[torch.LongTensor] = None,\n segment_concat_mask: Optional[torch.BoolTensor] = None,\n ) -> torch.Tensor: # type: ignore\n \"\"\"\n # Parameters\n\n token_ids: `torch.LongTensor`\n Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.\n num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the\n middle, e.g. the length of: \"[CLS] A B C [SEP] [CLS] D E F [SEP]\" (see indexer logic).\n mask: `torch.BoolTensor`\n Shape: [batch_size, num_wordpieces].\n type_ids: `Optional[torch.LongTensor]`\n Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.\n segment_concat_mask: `Optional[torch.BoolTensor]`\n Shape: `[batch_size, num_segment_concat_wordpieces]`.\n\n # Returns\n\n `torch.Tensor`\n Shape: `[batch_size, num_wordpieces, embedding_size]`.\n\n \"\"\"\n # Some of the huggingface transformers don't support type ids at all and crash when you supply\n # them. For others, you can supply a tensor of zeros, and if you don't, they act as if you did.\n # There is no practical difference to the caller, so here we pretend that one case is the same\n # as another case.\n if type_ids is not None:\n max_type_id = type_ids.max()\n if max_type_id == 0:\n type_ids = None\n else:\n if max_type_id >= self._number_of_token_type_embeddings():\n raise ValueError(\"Found type ids too large for the chosen transformer model.\")\n assert token_ids.shape == type_ids.shape\n\n fold_long_sequences = self._max_length is not None and token_ids.size(1) > self._max_length\n if fold_long_sequences:\n batch_size, num_segment_concat_wordpieces = token_ids.size()\n token_ids, segment_concat_mask, type_ids = self._fold_long_sequences(\n token_ids, segment_concat_mask, type_ids\n )\n\n transformer_mask = segment_concat_mask if self._max_length is not None else mask\n # Shape: [batch_size, num_wordpieces, embedding_size],\n # or if self._max_length is not None:\n # [batch_size * num_segments, self._max_length, embedding_size]\n\n # We call this with kwargs because some of the huggingface models don't have the\n # token_type_ids parameter and fail even when it's given as None.\n # Also, as of transformers v2.5.1, they are taking FloatTensor masks.\n parameters = {\"input_ids\": token_ids, \"attention_mask\": transformer_mask.float()}\n if type_ids is not None:\n parameters[\"token_type_ids\"] = type_ids\n embeddings = self.transformer_model(**parameters)[0]\n\n if fold_long_sequences:\n embeddings = self._unfold_long_sequences(\n embeddings, segment_concat_mask, batch_size, num_segment_concat_wordpieces\n )\n\n return embeddings\n\n def _fold_long_sequences(\n self,\n token_ids: torch.LongTensor,\n mask: torch.BoolTensor,\n type_ids: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.LongTensor, torch.LongTensor, Optional[torch.LongTensor]]:\n \"\"\"\n We fold 1D sequences (for each element in batch), returned by `PretrainedTransformerIndexer`\n that are in reality multiple segments concatenated together, to 2D tensors, e.g.\n\n [ [CLS] A B C [SEP] [CLS] D E [SEP] ]\n -> [ [ [CLS] A B C [SEP] ], [ [CLS] D E [SEP] [PAD] ] ]\n The [PAD] positions can be found in the returned `mask`.\n\n # Parameters\n\n token_ids: `torch.LongTensor`\n Shape: `[batch_size, num_segment_concat_wordpieces]`.\n num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the\n middle, i.e. the length of: \"[CLS] A B C [SEP] [CLS] D E F [SEP]\" (see indexer logic).\n mask: `torch.BoolTensor`\n Shape: `[batch_size, num_segment_concat_wordpieces]`.\n The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`\n in `forward()`.\n type_ids: `Optional[torch.LongTensor]`\n Shape: [batch_size, num_segment_concat_wordpieces].\n\n # Returns:\n\n token_ids: `torch.LongTensor`\n Shape: [batch_size * num_segments, self._max_length].\n mask: `torch.BoolTensor`\n Shape: [batch_size * num_segments, self._max_length].\n \"\"\"\n num_segment_concat_wordpieces = token_ids.size(1)\n num_segments = math.ceil(num_segment_concat_wordpieces / self._max_length)\n padded_length = num_segments * self._max_length\n length_to_pad = padded_length - num_segment_concat_wordpieces\n\n def fold(tensor): # Shape: [batch_size, num_segment_concat_wordpieces]\n # Shape: [batch_size, num_segments * self._max_length]\n tensor = F.pad(tensor, [0, length_to_pad], value=0)\n # Shape: [batch_size * num_segments, self._max_length]\n return tensor.reshape(-1, self._max_length)\n\n return fold(token_ids), fold(mask), fold(type_ids) if type_ids is not None else None\n\n def _unfold_long_sequences(\n self,\n embeddings: torch.FloatTensor,\n mask: torch.BoolTensor,\n batch_size: int,\n num_segment_concat_wordpieces: int,\n ) -> torch.FloatTensor:\n \"\"\"\n We take 2D segments of a long sequence and flatten them out to get the whole sequence\n representation while remove unnecessary special tokens.\n\n [ [ [CLS]_emb A_emb B_emb C_emb [SEP]_emb ], [ [CLS]_emb D_emb E_emb [SEP]_emb [PAD]_emb ] ]\n -> [ [CLS]_emb A_emb B_emb C_emb D_emb E_emb [SEP]_emb ]\n\n We truncate the start and end tokens for all segments, recombine the segments,\n and manually add back the start and end tokens.\n\n # Parameters\n\n embeddings: `torch.FloatTensor`\n Shape: [batch_size * num_segments, self._max_length, embedding_size].\n mask: `torch.BoolTensor`\n Shape: [batch_size * num_segments, self._max_length].\n The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`\n in `forward()`.\n batch_size: `int`\n num_segment_concat_wordpieces: `int`\n The length of the original \"[ [CLS] A B C [SEP] [CLS] D E F [SEP] ]\", i.e.\n the original `token_ids.size(1)`.\n\n # Returns:\n\n embeddings: `torch.FloatTensor`\n Shape: [batch_size, self._num_wordpieces, embedding_size].\n \"\"\"\n\n def lengths_to_mask(lengths, max_len, device):\n return torch.arange(max_len, device=device).expand(\n lengths.size(0), max_len\n ) < lengths.unsqueeze(1)\n\n device = embeddings.device\n num_segments = int(embeddings.size(0) / batch_size)\n embedding_size = embeddings.size(2)\n\n # We want to remove all segment-level special tokens but maintain sequence-level ones\n num_wordpieces = num_segment_concat_wordpieces - (num_segments - 1) * self._num_added_tokens\n\n embeddings = embeddings.reshape(batch_size, num_segments * self._max_length, embedding_size)\n mask = mask.reshape(batch_size, num_segments * self._max_length)\n # We assume that all 1s in the mask precede all 0s, and add an assert for that.\n # Open an issue on GitHub if this breaks for you.\n # Shape: (batch_size,)\n seq_lengths = mask.sum(-1)\n if not (lengths_to_mask(seq_lengths, mask.size(1), device) == mask).all():\n raise ValueError(\n \"Long sequence splitting only supports masks with all 1s preceding all 0s.\"\n )\n # Shape: (batch_size, self._num_added_end_tokens); this is a broadcast op\n end_token_indices = (\n seq_lengths.unsqueeze(-1) - torch.arange(self._num_added_end_tokens, device=device) - 1\n )\n\n # Shape: (batch_size, self._num_added_start_tokens, embedding_size)\n start_token_embeddings = embeddings[:, : self._num_added_start_tokens, :]\n # Shape: (batch_size, self._num_added_end_tokens, embedding_size)\n end_token_embeddings = batched_index_select(embeddings, end_token_indices)\n\n embeddings = embeddings.reshape(batch_size, num_segments, self._max_length, embedding_size)\n embeddings = embeddings[\n :, :, self._num_added_start_tokens : -self._num_added_end_tokens, :\n ] # truncate segment-level start/end tokens\n embeddings = embeddings.reshape(batch_size, -1, embedding_size) # flatten\n\n # Now try to put end token embeddings back which is a little tricky.\n\n # The number of segment each sequence spans, excluding padding. Mimicking ceiling operation.\n # Shape: (batch_size,)\n num_effective_segments = (seq_lengths + self._max_length - 1) / self._max_length\n # The number of indices that end tokens should shift back.\n num_removed_non_end_tokens = (\n num_effective_segments * self._num_added_tokens - self._num_added_end_tokens\n )\n # Shape: (batch_size, self._num_added_end_tokens)\n end_token_indices -= num_removed_non_end_tokens.unsqueeze(-1)\n assert (end_token_indices >= self._num_added_start_tokens).all()\n # Add space for end embeddings\n embeddings = torch.cat([embeddings, torch.zeros_like(end_token_embeddings)], 1)\n # Add end token embeddings back\n embeddings.scatter_(\n 1, end_token_indices.unsqueeze(-1).expand_as(end_token_embeddings), end_token_embeddings\n )\n\n # Now put back start tokens. We can do this before putting back end tokens, but then\n # we need to change `num_removed_non_end_tokens` a little.\n embeddings = torch.cat([start_token_embeddings, embeddings], 1)\n\n # Truncate to original length\n embeddings = embeddings[:, :num_wordpieces, :]\n return embeddings\n" ]
[ [ "torch.nn.functional.pad", "torch.arange", "torch.cat", "torch.zeros_like" ] ]
Frandium/nni
[ "a8c12fb75af3c695b61f48a2525fd2d520860a99" ]
[ "examples/model_compress/pruning/v2/activation_pruning_torch.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\n'''\nNNI example for supported ActivationAPoZRank and ActivationMeanRank pruning algorithms.\nIn this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning.\nNote that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required.\n\n'''\nimport argparse\nimport sys\n\nimport torch\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom nni.compression.pytorch import ModelSpeedup\nfrom nni.compression.pytorch.utils.counter import count_flops_params\nfrom nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ActivationAPoZRankPruner, ActivationMeanRankPruner\nfrom nni.algorithms.compression.v2.pytorch.utils import trace_parameters\n\nfrom pathlib import Path\nsys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))\nfrom cifar10.vgg import VGG\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nnormalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\ng_epoch = 0\n\ntrain_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ]), download=True),\n batch_size=128, shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=128, shuffle=False)\n\ndef trainer(model, optimizer, criterion):\n global g_epoch\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx and batch_idx % 100 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n g_epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n g_epoch += 1\n\ndef evaluator(model):\n model.eval()\n correct = 0.0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n acc = 100 * correct / len(test_loader.dataset)\n print('Accuracy: {}%\\n'.format(acc))\n return acc\n\ndef optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4, total_epoch=160):\n optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay)\n scheduler = MultiStepLR(optimizer, milestones=[int(total_epoch * 0.5), int(total_epoch * 0.75)], gamma=0.1)\n return optimizer, scheduler\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch Example for model comporession')\n parser.add_argument('--pruner', type=str, default='apoz',\n choices=['apoz', 'mean'],\n help='pruner to use')\n parser.add_argument('--pretrain-epochs', type=int, default=20,\n help='number of epochs to pretrain the model')\n parser.add_argument('--fine-tune-epochs', type=int, default=20,\n help='number of epochs to fine tune the model')\n args = parser.parse_args()\n\n print('\\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50)\n model = VGG().to(device)\n optimizer, scheduler = optimizer_scheduler_generator(model, total_epoch=args.pretrain_epochs)\n criterion = torch.nn.CrossEntropyLoss()\n pre_best_acc = 0.0\n best_state_dict = None\n\n for i in range(args.pretrain_epochs):\n trainer(model, optimizer, criterion)\n scheduler.step()\n acc = evaluator(model)\n if acc > pre_best_acc:\n pre_best_acc = acc\n best_state_dict = model.state_dict()\n print(\"Best accuracy: {}\".format(pre_best_acc))\n model.load_state_dict(best_state_dict)\n pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device))\n g_epoch = 0\n\n # Start to prune and speedup\n print('\\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50)\n config_list = [{\n 'total_sparsity': 0.5,\n 'op_types': ['Conv2d'],\n }]\n\n # make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize\n traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)\n if 'apoz' in args.pruner:\n pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)\n else:\n pruner = ActivationMeanRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)\n _, masks = pruner.compress()\n pruner.show_pruned_weights()\n pruner._unwrap_model()\n ModelSpeedup(model, dummy_input=torch.rand([10, 3, 32, 32]).to(device), masks_file=masks).speedup_model()\n print('\\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50)\n evaluator(model)\n\n # Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage.\n print('\\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50)\n optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01, total_epoch=args.fine_tune_epochs)\n\n best_acc = 0.0\n g_epoch = 0\n for i in range(args.fine_tune_epochs):\n trainer(model, optimizer, criterion)\n scheduler.step()\n best_acc = max(evaluator(model), best_acc)\n flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device))\n print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%')\n print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%')\n" ]
[ [ "torch.randn", "torch.rand", "torch.no_grad", "torch.nn.CrossEntropyLoss", "torch.cuda.is_available" ] ]
taconite/MetaAvatar-release
[ "c9403a478ee82232633d25f65f108befd21d04e9" ]
[ "evaluation/lib/mesh_distance.py" ]
[ "\"\"\"\nOriginal Author: Garvita\nEdited by: Bharat\n\"\"\"\n\nimport torch\nimport kaolin as kal\nfrom kaolin.rep import Mesh\nimport kaolin.cuda.tri_distance as td\nimport numpy as np\nfrom kaolin.metrics.mesh import _compute_edge_dist, _compute_planar_dist, TriangleDistance, point_to_surface\nfrom kaolin.rep import SDF as sdf\nfrom kaolin.rep import TriangleMesh as tm\nfrom evaluation.lib.torch_functions import batch_gather\n\ndef point_to_surface_vec(points,mesh):\n \"\"\"Computes the minimum distances from a set of points to a mesh\n Args:\n points (torch.Tensor): set of points\n mesh (Mesh): mesh to calculate distance\n\n Returns:\n distance: distance between points and surface (not averaged like Kaolin point_to_surface)\n \"\"\"\n\n # extract triangle defs from mesh\n v1 = torch.index_select(mesh.vertices.clone(), 0, mesh.faces[:, 0])\n v2 = torch.index_select(mesh.vertices.clone(), 0, mesh.faces[:, 1])\n v3 = torch.index_select(mesh.vertices.clone(), 0, mesh.faces[:, 2])\n\n # if quad mesh the separate the triangles\n if mesh.faces.shape[-1] == 4:\n v4 = torch.index_select(mesh.vertices.clone(), 0, mesh.faces[:, 3])\n temp1 = v1.clone()\n temp2 = v2.clone()\n temp3 = v3.clone()\n v1 = torch.cat((v1, v1), dim=0)\n v2 = torch.cat((v2, v4), dim=0)\n v3 = torch.cat((v3, v3), dim=0)\n\n if points.is_cuda:\n\n tri_minimum_dist = TriangleDistance()\n # pass to cuda\n distance, indx, dist_type = tri_minimum_dist(points, v1, v2, v3)\n indx = indx.data.cpu().numpy()\n dist_type = torch.LongTensor(dist_type.data.cpu().numpy())\n # reconpute distances to define gradient\n grad_dist = _recompute_point_to_surface_vec(\n [v1, v2, v3], points, indx, dist_type)\n # sanity check\n # print(distance.mean(), grad_dist)\n else:\n raise NotImplementedError\n\n return grad_dist\n\n\ndef _recompute_point_to_surface_vec(verts, p, indecies, dist_type):\n # recompute surface based the calcualted correct assignments of points and triangles\n # and the type of distacne, type 1 to 3 idicates which edge to calcualte to,\n # type 4 indicates the distance is from a point on the triangle not an edge\n v1, v2, v3 = verts\n v1 = v1[indecies]\n v2 = v2[indecies]\n v3 = v3[indecies]\n\n type_1 = (dist_type == 0)\n type_2 = (dist_type == 1)\n type_3 = (dist_type == 2)\n type_4 = (dist_type == 3)\n\n v21 = v2 - v1\n v32 = v3 - v2\n v13 = v1 - v3\n\n p1 = p - v1\n p2 = p - v2\n p3 = p - v3\n\n dists = []\n dists.append(_compute_edge_dist(v21[type_1], p1[type_1]).view(-1))\n dists.append(_compute_edge_dist(v32[type_2], p2[type_2]).view(-1))\n dists.append(_compute_edge_dist(v13[type_3], p3[type_3]).view(-1))\n\n if len(np.where(type_4)[0]) > 0:\n nor = torch.cross(v21[type_4], v13[type_4])\n dists.append(_compute_planar_dist(nor, p1[type_4]))\n\n distances = torch.cat(dists)\n return distances\n\ndef normal_consistency_face(pred_trimesh, gt_Mesh, gt_trimesh):\n \"\"\"\n :param pred: predicted trimesh\n :param gt_Mesh: GT mesh in psbody.mesh.Mesh\n :param gt trimesh: GT mesh trimesh\n \"\"\"\n pred_vertices = np.array(pred_trimesh.vertices)\n pred_normals = np.array(pred_trimesh.vertex_normals)\n closest_face, _ = gt_Mesh.closest_faces_and_points(pred_vertices)\n gt_normals = np.array(gt_trimesh.face_normals[closest_face.ravel()])\n consistency = np.linalg.norm(pred_normals - gt_normals, axis=-1).mean()\n\n return consistency\n\ndef normal_consistency_vertex(pred_trimesh, gt_trimesh, part_mask):\n \"\"\"\n :param pred: predicted trimesh\n :param gt trimesh: GT mesh trimesh\n \"\"\"\n pred_vertices = np.array(pred_trimesh.vertices)\n pred_normals = np.array(pred_trimesh.vertex_normals)\n\n gt_vertices = np.array(gt_trimesh.vertices)[part_mask, ...].copy()\n gt_normals = np.array(gt_trimesh.vertex_normals)[part_mask, ...].copy()\n\n from scipy.spatial import cKDTree as KDTree\n # kdtree = KDTree(gt_vertices)\n # _, ind = kdtree.query(pred_vertices)\n\n # gt_normals = gt_normals[ind, :]\n\n kdtree = KDTree(pred_vertices)\n _, ind = kdtree.query(gt_vertices)\n\n pred_normals = pred_normals[ind, :]\n\n consistency = 1 - np.linalg.norm(pred_normals - gt_normals, axis=-1).mean()\n\n return consistency\n\ndef chamfer_l1_distance(s1, s2, w1=1., w2=1.):\n \"\"\"\n :param s1: B x N x 3\n :param s2: B x M x 3\n :param w1: weight for distance from s1 to s2\n :param w2: weight for distance from s2 to s1\n \"\"\"\n from kaolin.metrics.point import SidedDistance\n\n assert s1.is_cuda and s2.is_cuda\n sided_minimum_dist = SidedDistance()\n closest_index_in_s2 = sided_minimum_dist(s1, s2)\n closest_index_in_s1 = sided_minimum_dist(s2, s1)\n closest_s2 = batch_gather(s2, closest_index_in_s2)\n closest_s1 = batch_gather(s1, closest_index_in_s1)\n\n dist_to_s2 = torch.abs(s1 - closest_s2).sum(-1).mean() * w1\n dist_to_s1 = torch.abs(s2 - closest_s1).sum(-1).mean() * w2\n\n return dist_to_s2 + dist_to_s1\n\ndef chamfer_l2_distance(s1, s2, w1=1., w2=1.):\n \"\"\"\n :param s1: B x N x 3\n :param s2: B x M x 3\n :param w1: weight for distance from s1 to s2\n :param w2: weight for distance from s2 to s1\n \"\"\"\n from kaolin.metrics.point import SidedDistance\n\n assert s1.is_cuda and s2.is_cuda\n sided_minimum_dist = SidedDistance()\n closest_index_in_s2 = sided_minimum_dist(s1, s2)\n closest_index_in_s1 = sided_minimum_dist(s2, s1)\n closest_s2 = batch_gather(s2, closest_index_in_s2)\n closest_s1 = batch_gather(s1, closest_index_in_s1)\n\n dist_to_s2 = torch.norm(s1 - closest_s2, dim=-1)\n dist_to_s1 = torch.norm(s2 - closest_s1, dim=-1)\n\n return dist_to_s2.mean() * w1 + dist_to_s1.mean() * w2, dist_to_s2, dist_to_s1, closest_index_in_s2, closest_index_in_s1\n\ndef chamfer_distance(s1, s2, w1=1., w2=1.):\n \"\"\"\n :param s1: B x N x 3\n :param s2: B x M x 3\n :param w1: weight for distance from s1 to s2\n :param w2: weight for distance from s2 to s1\n \"\"\"\n from kaolin.metrics.point import SidedDistance\n\n assert s1.is_cuda and s2.is_cuda\n sided_minimum_dist = SidedDistance()\n closest_index_in_s2 = sided_minimum_dist(s1, s2)\n closest_index_in_s1 = sided_minimum_dist(s2, s1)\n closest_s2 = batch_gather(s2, closest_index_in_s2)\n closest_s1 = batch_gather(s1, closest_index_in_s1)\n\n dist_to_s2 = (((s1 - closest_s2) ** 2).sum(dim=-1)).mean() * w1\n dist_to_s1 = (((s2 - closest_s1) ** 2).sum(dim=-1)).mean() * w2\n\n return dist_to_s2 + dist_to_s1\n\ndef batch_point_to_surface(points, meshes):\n \"\"\"\n Naive implementation. Just loops over the set of points and meshes.\n This is a bit tricky to batch-ify because number of points and\n mesh structure could be different for each entry in the batch.\n \"\"\"\n distance = [point_to_surface(p, m) for p, m in zip(points, meshes)]\n return torch.stack(distance)\n\ndef batch_point_to_surface_vec(points, meshes):\n distance = [point_to_surface_vec(p, m) for p, m in zip(points, meshes)]\n return torch.stack(distance)\n\ndef batch_point_to_surface_vec_signed(meshes, points):\n prelu = torch.nn.PReLU(init=25. *25.).cuda()\n dist = []\n for m, p in zip(meshes, points):\n dist_val = point_to_surface_vec(p, m)\n sign_val = torch.ones_like(dist_val)\n sign_bool = sdf.check_sign(m,p)[0] == 0\n sign_val[sign_bool] = -1.\n signed_dist = prelu(sign_val*dist_val)\n dist.append(torch.mean(signed_dist*signed_dist))\n\n return torch.stack(dist)\n" ]
[ [ "torch.ones_like", "torch.stack", "numpy.linalg.norm", "torch.nn.PReLU", "torch.norm", "scipy.spatial.cKDTree", "torch.abs", "torch.cross", "numpy.array", "numpy.where", "torch.cat", "torch.mean" ] ]
ddrake/convex_m
[ "6e506133c03bb1e0cf38143a907ac595082d524c" ]
[ "admm/check_output.py" ]
[ "from matplotlib import pyplot as plt\nfrom mat_util import *\n\ndatadir = 'from_coeus/data'\nnxs = load('nxs', datadir)\nfs = load('fs', datadir)\n\nplt.plot(fs)\nplt.xlabel(\"Steps\")\nplt.ylabel(\"Objective Function Values\")\nplt.title(\"Convergence of Objective Function\")\nplt.show()\n\ninput(\"press a key\")\n\nplt.plot(nxs)\nplt.xlabel(\"Steps\")\nplt.ylabel(\"1-Norm of x Values\")\nplt.title(\"Convergence of x in 1-Norm\")\nplt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
jgori-ouistiti/CoopIHC-zoo
[ "5efb1e1f9e773d793b4634b4671124d43aae3e7d" ]
[ "coopihczoo/eye/tests/test_PlIE.py" ]
[ "from coopihc.space.Space import Space\nfrom coopihc.space.State import State\nfrom coopihc.space.StateElement import StateElement\nfrom coopihc.space.utils import autospace\n\nfrom coopihczoo.eye.utils import ProvideLikelihoodInferenceEngine\n\nimport numpy\n\ngame_state = State(\n game_info=State(\n turn_index=StateElement(\n numpy.array([0]), autospace([0, 1, 2, 3]), out_of_bounds_mode=\"raw\"\n ),\n round_index=StateElement(\n numpy.array([0]), autospace([0, 1]), out_of_bounds_mode=\"raw\"\n ),\n ),\n task_state=State(\n target=StateElement(\n numpy.array([[-0.30282614]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"clip\",\n ),\n fixation=StateElement(\n numpy.array([[0.0]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"clip\",\n ),\n ),\n user_state=State(\n **{\n \"belief-mu\": StateElement(\n numpy.array([[0.0]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"warning\",\n ),\n \"belief-sigma\": StateElement(\n numpy.array([[1000.0]]),\n autospace([[[-numpy.inf]], [[numpy.inf]]]),\n out_of_bounds_mode=\"warning\",\n ),\n \"y\": StateElement(\n numpy.array([[0.0]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"warning\",\n ),\n \"Sigma_0\": StateElement(\n numpy.array([[1000.0]]),\n autospace([[[-numpy.inf]], [[numpy.inf]]]),\n out_of_bounds_mode=\"warning\",\n ),\n }\n ),\n assistant_state=State(),\n user_action=State(\n action=StateElement(\n numpy.array([[0.15020657]]),\n autospace([[[-1.0]], [[1.0]]]),\n out_of_bounds_mode=\"warning\",\n )\n ),\n assistant_action=State(\n action=StateElement(\n numpy.array([1]), autospace([0, 1]), out_of_bounds_mode=\"warning\"\n )\n ),\n)\n\nprint(game_state)\n\n\nclass Test(ProvideLikelihoodInferenceEngine):\n def __init__(self, noise_level, observation, *args, **kwargs):\n class Host:\n pass\n\n super().__init__(noise_level, *args, **kwargs)\n self.host = Host()\n self.host.role = \"user\"\n self.buffer = [observation]\n\n\ninference_engine = Test(0.5, game_state)\nstate, reward = inference_engine.infer()\nprint(state)\n" ]
[ [ "numpy.array" ] ]
RPGroup-PBoC/mwc_activation
[ "6ef3f02a53ecd80877082006ecc4b8fe4204c1d6" ]
[ "act/_fit_bivariate_normal_AstroML.py" ]
[ "\"\"\"\nNotes:\n These functions were used with modification from the astroML python\n function `fit_bivariate_normal` which can be found [here]\n (http://www.astroml.org/book_figures/chapter3/fig_robust_pca.html). In\n this modified version, the percentiles are computed using the numpy\n function `nanpercentile`.\nLicense:\n These functions were borrowed from the AstroML library in agreement with\n their 3-clause BSD license. These functions retain this 3-clause BSD\n licensing and are the copyright of the owners.\n\"\"\"\nimport numpy as np\nfrom scipy import stats\n\n#from scipy.special import erfinv\n#sigmaG_factor = 1. / (2 * np.sqrt(2) * erfinv(0.5))\nsigmaG_factor = 0.74130110925280102\n\n\ndef mean_sigma(a, axis=None, dtype=None, ddof=0, keepdims=False):\n \"\"\"\n Compute mean and standard deviation for an array\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose mean is desired. If `a` is not an\n array, a conversion is attempted.\n axis : int, optional\n Axis along which the means are computed. The default is to compute\n the mean of the flattened array.\n dtype : dtype, optional\n Type to use in computing the standard deviation. For arrays of\n integer type the default is float64, for arrays of float types it is\n the same as the array type.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n mu : ndarray, see dtype parameter above\n array containing the mean values\n sigma : ndarray, see dtype parameter above.\n array containing the standard deviation\n\n See Also\n --------\n median_sigmaG : robust rank-based version of this calculation.\n Notes\n\n -----\n This routine simply calls ``np.mean`` and ``np.std``, passing the\n keyword arguments to them. It is provided for ease of comparison\n with the function median_sigmaG()\n \"\"\"\n mu = np.mean(a, axis=axis, dtype=dtype)\n sigma = np.std(a, axis=axis, dtype=dtype, ddof=ddof)\n\n if keepdims:\n if axis is None:\n newshape = a.ndim * (1,)\n else:\n newshape = np.asarray(a.shape)\n newshape[axis] = 1\n\n mu = mu.reshape(newshape)\n sigma = sigma.reshape(newshape)\n\n return mu, sigma\n\n\ndef median_sigmaG(a, axis=None, overwrite_input=False, keepdims=False):\n \"\"\"\n Compute median and rank-based estimate of the standard deviation\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose mean is desired. If `a` is not an\n array, a conversion is attempted.\n axis : int, optional\n Axis along which the means are computed. The default is to compute\n the mean of the flattened array.\n overwrite_input : bool, optional\n If True, then allow use of memory of input array `a` for\n calculations. The input array will be modified by the call to\n median. This will save memory when you do not need to preserve\n the contents of the input array. Treat the input as undefined,\n but it will probably be fully or partially sorted.\n Default is False. Note that, if `overwrite_input` is True and the\n input is not already an array, an error will be raised.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n median : ndarray, see dtype parameter above\n array containing the median values\n sigmaG : ndarray, see dtype parameter above.\n array containing the robust estimator of the standard deviation\n\n See Also\n --------\n mean_sigma : non-robust version of this calculation\n sigmaG : robust rank-based estimate of standard deviation\n\n Notes\n -----\n This routine uses a single call to ``np.nanpercentile`` to find the\n quartiles along the given axis, and uses these to compute the\n median and sigmaG:\n median = q50\n sigmaG = (q75 - q25) * 0.7413\n where 0.7413 ~ 1 / (2 sqrt(2) erf^-1(0.5))\n \"\"\"\n q25, median, q75 = np.nanpercentile(a, [25, 50, 75],\n axis=axis,\n overwrite_input=overwrite_input)\n sigmaG = sigmaG_factor * (q75 - q25)\n\n if keepdims:\n if axis is None:\n newshape = a.ndim * (1,)\n else:\n newshape = np.asarray(a.shape)\n newshape[axis] = 1\n\n median = median.reshape(newshape)\n sigmaG = sigmaG.reshape(newshape)\n\n return median, sigmaG\n\n\ndef sigmaG(a, axis=None, overwrite_input=False, keepdims=False):\n \"\"\"\n Compute the rank-based estimate of the standard deviation\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose mean is desired. If `a` is not an\n array, a conversion is attempted.\n axis : int, optional\n Axis along which the means are computed. The default is to compute\n the mean of the flattened array.\n overwrite_input : bool, optional\n If True, then allow use of memory of input array `a` for\n calculations. The input array will be modified by the call to\n median. This will save memory when you do not need to preserve\n the contents of the input array. Treat the input as undefined,\n but it will probably be fully or partially sorted.\n Default is False. Note that, if `overwrite_input` is True and the\n input is not already an array, an error will be raised.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n median : ndarray, see dtype parameter above\n array containing the median values\n sigmaG : ndarray, see dtype parameter above.\n array containing the robust estimator of the standard deviation\n\n See Also\n --------\n median_sigmaG : robust rank-based estimate of mean and standard deviation\n\n Notes\n -----\n This routine uses a single call to ``np.nanpercentile`` to find the\n quartiles along the given axis, and uses these to compute the\n sigmaG, a robust estimate of the standard deviation sigma:\n sigmaG = 0.7413 * (q75 - q25)\n where 0.7413 ~ 1 / (2 sqrt(2) erf^-1(0.5))\n \"\"\"\n q25, q75 = np.nanpercentile(a, [25, 75],\n axis=axis,\n overwrite_input=overwrite_input)\n sigmaG = sigmaG_factor * (q75 - q25)\n\n if keepdims:\n if axis is None:\n newshape = a.ndim * (1,)\n else:\n newshape = np.asarray(a.shape)\n newshape[axis] = 1\n\n sigmaG = sigmaG.reshape(newshape)\n\n return sigmaG\n\n\ndef fit_bivariate_normal(x, y, robust=False):\n \"\"\"\n Fit bivariate normal parameters to a 2D distribution of points\n\n Parameters\n ----------\n x, y : array_like\n The x, y coordinates of the points\n robust : boolean (optional, default=False)\n If True, then use rank-based statistics which are robust to outliers\n Otherwise, use mean/std statistics which are not robust\n\n Returns\n -------\n mu : tuple\n (x, y) location of the best-fit bivariate normal\n sigma_1, sigma_2 : float\n The best-fit gaussian widths in the uncorrelated frame\n alpha : float\n The rotation angle in radians of the uncorrelated frame\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n\n assert x.shape == y.shape\n\n if robust:\n # use quartiles to compute center and spread\n med_x, sigmaG_x = median_sigmaG(x)\n med_y, sigmaG_y = median_sigmaG(y)\n\n # define the principal variables from Shevlyakov & Smirnov (2011)\n sx = 2 * sigmaG_x\n sy = 2 * sigmaG_y\n\n u = (x / sx + y / sy) / np.sqrt(2)\n v = (x / sx - y / sy) / np.sqrt(2)\n\n med_u, sigmaG_u = median_sigmaG(u)\n med_v, sigmaG_v = median_sigmaG(v)\n\n r_xy = ((sigmaG_u ** 2 - sigmaG_v ** 2) /\n (sigmaG_u ** 2 + sigmaG_v ** 2))\n\n # rename estimators\n mu_x, mu_y = med_x, med_y\n sigma_x, sigma_y = sigmaG_x, sigmaG_y\n else:\n mu_x = np.mean(x)\n sigma_x = np.std(x)\n\n mu_y = np.mean(y)\n sigma_y = np.std(y)\n\n r_xy = stats.pearsonr(x, y)[0]\n\n # We need to use the full (-180, 180) version of arctan: this is\n # np.arctan2(x, y) = np.arctan(x / y), modulo 180 degrees\n sigma_xy = r_xy * sigma_x * sigma_y\n alpha = 0.5 * np.arctan2(2 * sigma_xy, sigma_x ** 2 - sigma_y ** 2)\n\n sigma1 = np.sqrt((0.5 * (sigma_x ** 2 + sigma_y ** 2) +\n np.sqrt(0.25 * (sigma_x ** 2 - sigma_y ** 2) ** 2 +\n sigma_xy ** 2)))\n sigma2 = np.sqrt((0.5 * (sigma_x ** 2 + sigma_y ** 2) -\n np.sqrt(0.25 * (sigma_x ** 2 - sigma_y ** 2) ** 2 +\n sigma_xy ** 2)))\n\n return [mu_x, mu_y], sigma1, sigma2, alpha\n" ]
[ [ "numpy.arctan2", "scipy.stats.pearsonr", "numpy.asarray", "numpy.nanpercentile", "numpy.sqrt", "numpy.std", "numpy.mean" ] ]
syuqings/Fashion-MMT
[ "809c88cade3328176b202db543e686bce99ef76a" ]
[ "readers/data.py" ]
[ "from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport json\nimport numpy as np\nimport random\nimport pdb\nimport math\n\nfrom cytoolz import partition_all\nimport torch.utils.data\nfrom torch.utils.data import Sampler\n\nUNK, PAD, BOS, EOS, MASK = 0, 1, 2, 3, 4\n\n\nclass MMTDataset(torch.utils.data.Dataset):\n def __init__(self, config, split, img_max=10, src_max=36, tgt_max=72, task='mmt', _logger=None):\n super(MMTDataset, self).__init__()\n\n if _logger is None:\n self.print_fn = print\n else:\n self.print_fn = _logger.info\n\n self.names = np.load(config.name_file[split])\n self.anno = json.load(open(config.anno_file))\n self.src = open(config.src_txt[split], 'r', encoding='utf-8').readlines()\n self.trg = open(config.tgt_txt[split], 'r', encoding='utf-8').readlines()\n self.num_text = len(self.src)\n self.print_fn('text size %d' % self.num_text)\n\n self.lens = []\n if task == 'xmlm':\n for i in range(len(self.trg)):\n self.lens.append((len(self.trg[i].strip().split())+len(self.src[i].strip().split())+2)/2)\n elif task == 'mmt':\n for i in range(len(self.trg)):\n self.lens.append(len(self.trg[i].strip().split())+2)\n elif task in ['attp', 'itm']:\n for i in range(len(self.src)):\n self.lens.append(len(self.src[i].strip().split())+2)\n \n self.sim_img = json.load(open(config.sim_img[split]))\n self.stoi = json.load(open(config.word2int_file))\n self.itos = json.load(open(config.int2word_file))\n self.atoi = json.load(open(config.attr2int_file))\n self.ft_root = config.ft_root\n self.img_max = img_max\n self.src_max = src_max\n self.tgt_max = tgt_max\n self.is_train = True if split == 'trn' else False\n self.task = task\n\n def mask_and_pad_sent(self, x, id=None, lang='src'):\n max_len = self.src_max if lang == 'src' else self.tgt_max\n\n # masking input sequence\n if self.task == 'xmlm' or (self.task == 'mmt' and lang == 'trg'): # cross-lingual masking or adapt to MMT\n x, output_label = self.mask_sent(x[:max_len-1])\n elif self.task == 'attp':\n x, output_label = self.get_attr(x[:max_len-1], id)\n else:\n output_label = [PAD] * (max_len-1)\n\n # padding input sequence\n prob = random.random()\n if self.task == 'mmt' and lang == 'trg' and prob < 0.12:\n padded = [BOS] + x[:max_len-1] + [MASK] + [PAD] * max(0, max_len - len(x) - 2)\n output_label = [PAD] + output_label + [EOS] + [PAD] * max(0, max_len - len(x) - 2)\n elif self.task == 'attp':\n padded = [BOS] + x[:max_len-1] + [EOS] + [PAD] * max(0, max_len - len(x) - 2)\n else:\n padded = [BOS] + x[:max_len-1] + [EOS] + [PAD] * max(0, max_len - len(x) - 2)\n output_label = [PAD] + output_label + [PAD] + [PAD] * max(0, max_len - len(x) - 2)\n\n # truncate with the max length\n length = min(len(x)+2, max_len)\n padded = padded[:max_len]\n if self.task != 'attp':\n output_label = output_label[:max_len]\n return np.array(padded), np.array(output_label), length\n\n def random_mask(self, x, i, prob):\n # 80% randomly change token to mask token\n if prob < 0.8:\n x[i] = MASK\n # 10% randomly change token to random token\n elif prob < 0.9:\n x[i] = random.choice(list(range(len(self.stoi))))\n # -> rest 10% randomly keep current token\n return x\n\n def mask_sent(self, x):\n output_label = []\n for i, token in enumerate(x):\n prob = random.random()\n # mask normal token with 15% probability\n if prob < 0.15:\n prob /= 0.15\n x = self.random_mask(x, i, prob)\n output_label.append(token)\n else:\n # no masking token (will be ignored by loss function later)\n output_label.append(PAD)\n return x, output_label\n\n def get_attr(self, x, id):\n attrs = []\n output_label = [0.] * len(self.atoi)\n for attr in self.anno[id]['attr']:\n try:\n output_label[self.atoi[attr]] = 1.\n prob = random.random()\n if self.stoi[attr] in x:\n x = self.random_mask(x, x.index(self.stoi[attr]), prob)\n elif self.stoi[attr+'s'] in x:\n x = self.random_mask(x, x.index(self.stoi[attr+'s']), prob)\n elif self.stoi[attr+'es'] in x:\n x = self.random_mask(x, x.index(self.stoi[attr+'es']), prob)\n except:\n pass\n return x, output_label\n\n def sent2int(self, str_sent):\n int_sent = [self.stoi.get(w, UNK) for w in str_sent.split()]\n return int_sent\n\n def int2sent(self, batch):\n with torch.cuda.device_of(batch):\n batch = batch.tolist()\n batch = [[self.itos.get(str(ind), '<unk>') for ind in ex] for ex in batch] # denumericalize\n \n def trim(s, t):\n sentence = []\n for w in s:\n if w == t:\n break\n sentence.append(w)\n return sentence\n batch = [trim(ex, '<eos>') for ex in batch] # trim past frst eos\n\n def filter_special(tok):\n return tok not in ('<sos>', '<pad>', '<mask>')\n batch = [\" \".join(filter(filter_special, ex)).replace(\"@@ \", \"\") for ex in batch]\n return batch\n\n def __len__(self):\n return self.num_text\n\n def __getitem__(self, idx):\n outs = {}\n name = self.names[idx]\n img_ft = np.zeros(shape=[self.img_max, 2048], dtype=np.float32)\n for i, img in enumerate(self.anno[name]['images']):\n if i >= self.img_max:\n break\n img_ft[i] = np.load(os.path.join(self.ft_root, img+\".npy\"))[0]\n ft_len = min(self.img_max, len(self.anno[name]['images']))\n\n if self.task in ['xmlm', 'mmt']:\n src_id, src_label, src_len = self.mask_and_pad_sent(self.sent2int(self.src[idx].strip()), id=name, lang='src')\n trg_id, trg_label, trg_len = self.mask_and_pad_sent(self.sent2int(self.trg[idx].strip()), id=name, lang='trg')\n elif self.task == 'attp':\n src_id, src_label, src_len = self.mask_and_pad_sent(self.sent2int(self.src[idx].strip()), id=name, lang='src')\n trg_id = np.array([BOS])\n trg_len = 1\n elif self.task == 'itm':\n rep_prob = random.random()\n if rep_prob < 0.5:\n old_idx = idx\n idx = random.choice(self.sim_img[self.anno[name]['category']])\n if old_idx == idx:\n align_label = 1\n else:\n align_label = 0\n else:\n align_label = 1\n src_id, src_label, src_len = self.mask_and_pad_sent(self.sent2int(self.src[idx].strip()), id=name, lang='src')\n trg_id = np.array([BOS])\n trg_len = 1\n\n outs['ft_len'] = ft_len\n outs['img_ft'] = img_ft\n outs['src_ids'] = src_id\n outs['src_lens'] = src_len\n outs['trg_ids'] = trg_id\n outs['trg_lens'] = trg_len\n outs['ref_sents'] = self.trg[idx].strip()\n if self.task == 'itm':\n outs['align_label'] = align_label\n elif self.task == 'attp':\n outs['attr_label'] = src_label\n else:\n outs['output_label'] = np.concatenate([src_label, trg_label], axis=0)\n return outs\n\n\nclass TokenBucketSampler(Sampler):\n def __init__(self, lens, bucket_size, batch_size, droplast=False, size_multiple=8):\n self._lens = lens\n self._max_tok = batch_size\n self._bucket_size = bucket_size\n self._droplast = droplast\n self._size_mul = size_multiple\n\n def _create_ids(self):\n return list(range(len(self._lens)))\n\n def _sort_fn(self, i):\n return self._lens[i]\n\n def __iter__(self):\n ids = self._create_ids()\n random.shuffle(ids)\n buckets = [sorted(ids[i:i+self._bucket_size], key=self._sort_fn, reverse=True)\n for i in range(0, len(ids), self._bucket_size)]\n # fill batches until max_token (include padding)\n batches = []\n for bucket in buckets:\n max_len = 0\n batch_indices = []\n for indices in partition_all(self._size_mul, bucket):\n max_len = max(max_len, max(self._lens[i] for i in indices))\n if (max_len * (len(batch_indices) + self._size_mul)\n > self._max_tok):\n if not batch_indices:\n raise ValueError(\"max_tokens too small / max_seq_len too long\")\n assert len(batch_indices) % self._size_mul == 0\n batches.append(batch_indices)\n batch_indices = list(indices)\n max_len = max(self._lens[i] for i in indices)\n else:\n batch_indices.extend(indices)\n if not self._droplast and batch_indices:\n batches.append(batch_indices)\n random.shuffle(batches)\n return iter(batches)\n\n def __len__(self):\n raise ValueError(\"NOT supported. \")\n\n\nclass MetaLoader(object):\n \"\"\" wraps multiple data loaders \"\"\"\n def __init__(self, loaders, accum_steps=1):\n assert isinstance(loaders, dict)\n self.name2loader = {}\n self.name2iter = {}\n self.sampling_pools = []\n for n, l in loaders.items():\n if isinstance(l, tuple):\n l, r = l\n elif isinstance(l, torch.utils.data.DataLoader):\n r = 1\n else:\n raise ValueError()\n self.name2loader[n] = l\n self.name2iter[n] = iter(l)\n self.sampling_pools.extend([n]*r)\n self.accum_steps = accum_steps\n self.step = 0\n\n def __iter__(self):\n \"\"\" this iterator will run indefinitely \"\"\"\n task = self.sampling_pools[0]\n while True:\n if self.step % self.accum_steps == 0:\n task = random.choice(self.sampling_pools)\n self.step += 1\n iter_ = self.name2iter[task]\n try:\n batch = next(iter_)\n except StopIteration:\n iter_ = iter(self.name2loader[task])\n batch = next(iter_)\n self.name2iter[task] = iter_\n\n yield task, batch\n \n\n\n" ]
[ [ "numpy.array", "numpy.load", "numpy.concatenate", "numpy.zeros" ] ]
workingyifei/display-pattern-generator
[ "9444dce96954c546333d5aecc92a06c3bfd19aa5" ]
[ "VENV/lib/python3.6/site-packages/pandas/tests/indexes/common.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom pandas import compat\nfrom pandas.compat import PY3\n\nimport numpy as np\n\nfrom pandas import (Series, Index, Float64Index, Int64Index, UInt64Index,\n RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex,\n TimedeltaIndex, PeriodIndex, IntervalIndex, isna)\nfrom pandas.core.indexes.base import InvalidIndexError\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\nfrom pandas.core.dtypes.common import needs_i8_conversion\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas._libs.tslib import iNaT\n\nimport pandas.util.testing as tm\n\nimport pandas as pd\n\n\nclass Base(object):\n \"\"\" base class for index sub-class tests \"\"\"\n _holder = None\n _compat_props = ['shape', 'ndim', 'size', 'nbytes']\n\n def setup_indices(self):\n for name, idx in self.indices.items():\n setattr(self, name, idx)\n\n def verify_pickle(self, indices):\n unpickled = tm.round_trip_pickle(indices)\n assert indices.equals(unpickled)\n\n def test_pickle_compat_construction(self):\n # this is testing for pickle compat\n if self._holder is None:\n return\n\n # need an object to create with\n pytest.raises(TypeError, self._holder)\n\n def test_to_series(self):\n # assert that we are creating a copy of the index\n\n idx = self.create_index()\n s = idx.to_series()\n assert s.values is not idx.values\n assert s.index is not idx\n assert s.name == idx.name\n\n def test_to_series_with_arguments(self):\n # GH18699\n\n # index kwarg\n idx = self.create_index()\n s = idx.to_series(index=idx)\n\n assert s.values is not idx.values\n assert s.index is idx\n assert s.name == idx.name\n\n # name kwarg\n idx = self.create_index()\n s = idx.to_series(name='__test')\n\n assert s.values is not idx.values\n assert s.index is not idx\n assert s.name != idx.name\n\n def test_to_frame(self):\n # see gh-15230\n idx = self.create_index()\n name = idx.name or 0\n\n df = idx.to_frame()\n\n assert df.index is idx\n assert len(df.columns) == 1\n assert df.columns[0] == name\n assert df[name].values is not idx.values\n\n df = idx.to_frame(index=False)\n assert df.index is not idx\n\n def test_shift(self):\n\n # GH8083 test the base class for shift\n idx = self.create_index()\n pytest.raises(NotImplementedError, idx.shift, 1)\n pytest.raises(NotImplementedError, idx.shift, 1, 2)\n\n def test_create_index_existing_name(self):\n\n # GH11193, when an existing index is passed, and a new name is not\n # specified, the new index should inherit the previous object name\n expected = self.create_index()\n if not isinstance(expected, MultiIndex):\n expected.name = 'foo'\n result = pd.Index(expected)\n tm.assert_index_equal(result, expected)\n\n result = pd.Index(expected, name='bar')\n expected.name = 'bar'\n tm.assert_index_equal(result, expected)\n else:\n expected.names = ['foo', 'bar']\n result = pd.Index(expected)\n tm.assert_index_equal(\n result, Index(Index([('foo', 'one'), ('foo', 'two'),\n ('bar', 'one'), ('baz', 'two'),\n ('qux', 'one'), ('qux', 'two')],\n dtype='object'),\n names=['foo', 'bar']))\n\n result = pd.Index(expected, names=['A', 'B'])\n tm.assert_index_equal(\n result,\n Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),\n ('baz', 'two'), ('qux', 'one'), ('qux', 'two')],\n dtype='object'), names=['A', 'B']))\n\n def test_numeric_compat(self):\n\n idx = self.create_index()\n tm.assert_raises_regex(TypeError, \"cannot perform __mul__\",\n lambda: idx * 1)\n tm.assert_raises_regex(TypeError, \"cannot perform __rmul__\",\n lambda: 1 * idx)\n\n div_err = \"cannot perform __truediv__\" if PY3 \\\n else \"cannot perform __div__\"\n tm.assert_raises_regex(TypeError, div_err, lambda: idx / 1)\n div_err = div_err.replace(' __', ' __r')\n tm.assert_raises_regex(TypeError, div_err, lambda: 1 / idx)\n tm.assert_raises_regex(TypeError, \"cannot perform __floordiv__\",\n lambda: idx // 1)\n tm.assert_raises_regex(TypeError, \"cannot perform __rfloordiv__\",\n lambda: 1 // idx)\n\n def test_logical_compat(self):\n idx = self.create_index()\n tm.assert_raises_regex(TypeError, 'cannot perform all',\n lambda: idx.all())\n tm.assert_raises_regex(TypeError, 'cannot perform any',\n lambda: idx.any())\n\n def test_boolean_context_compat(self):\n\n # boolean context compat\n idx = self.create_index()\n\n def f():\n if idx:\n pass\n\n tm.assert_raises_regex(ValueError, 'The truth value of a', f)\n\n def test_reindex_base(self):\n idx = self.create_index()\n expected = np.arange(idx.size, dtype=np.intp)\n\n actual = idx.get_indexer(idx)\n tm.assert_numpy_array_equal(expected, actual)\n\n with tm.assert_raises_regex(ValueError, 'Invalid fill method'):\n idx.get_indexer(idx, method='invalid')\n\n def test_get_indexer_consistency(self):\n # See GH 16819\n for name, index in self.indices.items():\n if isinstance(index, IntervalIndex):\n continue\n\n if index.is_unique or isinstance(index, CategoricalIndex):\n indexer = index.get_indexer(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n else:\n e = \"Reindexing only valid with uniquely valued Index objects\"\n with tm.assert_raises_regex(InvalidIndexError, e):\n indexer = index.get_indexer(index[0:2])\n\n indexer, _ = index.get_indexer_non_unique(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n\n def test_ndarray_compat_properties(self):\n idx = self.create_index()\n assert idx.T.equals(idx)\n assert idx.transpose().equals(idx)\n\n values = idx.values\n for prop in self._compat_props:\n assert getattr(idx, prop) == getattr(values, prop)\n\n # test for validity\n idx.nbytes\n idx.values.nbytes\n\n def test_repr_roundtrip(self):\n\n idx = self.create_index()\n tm.assert_index_equal(eval(repr(idx)), idx)\n\n def test_str(self):\n\n # test the string repr\n idx = self.create_index()\n idx.name = 'foo'\n assert \"'foo'\" in str(idx)\n assert idx.__class__.__name__ in str(idx)\n\n def test_dtype_str(self, indices):\n dtype = indices.dtype_str\n assert isinstance(dtype, compat.string_types)\n assert dtype == str(indices.dtype)\n\n def test_repr_max_seq_item_setting(self):\n # GH10182\n idx = self.create_index()\n idx = idx.repeat(50)\n with pd.option_context(\"display.max_seq_items\", None):\n repr(idx)\n assert '...' not in str(idx)\n\n def test_wrong_number_names(self, indices):\n def testit(ind):\n ind.names = [\"apple\", \"banana\", \"carrot\"]\n tm.assert_raises_regex(ValueError, \"^Length\", testit, indices)\n\n def test_set_name_methods(self, indices):\n new_name = \"This is the new name for this index\"\n\n # don't tests a MultiIndex here (as its tested separated)\n if isinstance(indices, MultiIndex):\n return\n original_name = indices.name\n new_ind = indices.set_names([new_name])\n assert new_ind.name == new_name\n assert indices.name == original_name\n res = indices.rename(new_name, inplace=True)\n\n # should return None\n assert res is None\n assert indices.name == new_name\n assert indices.names == [new_name]\n # with tm.assert_raises_regex(TypeError, \"list-like\"):\n # # should still fail even if it would be the right length\n # ind.set_names(\"a\")\n with tm.assert_raises_regex(ValueError, \"Level must be None\"):\n indices.set_names(\"a\", level=0)\n\n # rename in place just leaves tuples and other containers alone\n name = ('A', 'B')\n indices.rename(name, inplace=True)\n assert indices.name == name\n assert indices.names == [name]\n\n def test_hash_error(self, indices):\n index = indices\n tm.assert_raises_regex(TypeError, \"unhashable type: %r\" %\n type(index).__name__, hash, indices)\n\n def test_copy_name(self):\n # gh-12309: Check that the \"name\" argument\n # passed at initialization is honored.\n\n for name, index in compat.iteritems(self.indices):\n if isinstance(index, MultiIndex):\n continue\n\n first = index.__class__(index, copy=True, name='mario')\n second = first.__class__(first, copy=False)\n\n # Even though \"copy=False\", we want a new object.\n assert first is not second\n\n # Not using tm.assert_index_equal() since names differ.\n assert index.equals(first)\n\n assert first.name == 'mario'\n assert second.name == 'mario'\n\n s1 = Series(2, index=first)\n s2 = Series(3, index=second[:-1])\n\n if not isinstance(index, CategoricalIndex):\n # See gh-13365\n s3 = s1 * s2\n assert s3.index.name == 'mario'\n\n def test_ensure_copied_data(self):\n # Check the \"copy\" argument of each Index.__new__ is honoured\n # GH12309\n for name, index in compat.iteritems(self.indices):\n init_kwargs = {}\n if isinstance(index, PeriodIndex):\n # Needs \"freq\" specification:\n init_kwargs['freq'] = index.freq\n elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):\n # RangeIndex cannot be initialized from data\n # MultiIndex and CategoricalIndex are tested separately\n continue\n\n index_type = index.__class__\n result = index_type(index.values, copy=True, **init_kwargs)\n tm.assert_index_equal(index, result)\n tm.assert_numpy_array_equal(index.values, result.values,\n check_same='copy')\n\n if isinstance(index, PeriodIndex):\n # .values an object array of Period, thus copied\n result = index_type(ordinal=index.asi8, copy=False,\n **init_kwargs)\n tm.assert_numpy_array_equal(index._ndarray_values,\n result._ndarray_values,\n check_same='same')\n elif isinstance(index, IntervalIndex):\n # checked in test_interval.py\n pass\n else:\n result = index_type(index.values, copy=False, **init_kwargs)\n tm.assert_numpy_array_equal(index.values, result.values,\n check_same='same')\n tm.assert_numpy_array_equal(index._ndarray_values,\n result._ndarray_values,\n check_same='same')\n\n def test_copy_and_deepcopy(self, indices):\n from copy import copy, deepcopy\n\n if isinstance(indices, MultiIndex):\n return\n for func in (copy, deepcopy):\n idx_copy = func(indices)\n assert idx_copy is not indices\n assert idx_copy.equals(indices)\n\n new_copy = indices.copy(deep=True, name=\"banana\")\n assert new_copy.name == \"banana\"\n\n def test_duplicates(self, indices):\n if type(indices) is not self._holder:\n return\n if not len(indices) or isinstance(indices, MultiIndex):\n return\n idx = self._holder([indices[0]] * 5)\n assert not idx.is_unique\n assert idx.has_duplicates\n\n def test_unique(self, indices):\n # don't test a MultiIndex here (as its tested separated)\n # don't test a CategoricalIndex because categories change (GH 18291)\n if isinstance(indices, (MultiIndex, CategoricalIndex)):\n return\n\n # GH 17896\n expected = indices.drop_duplicates()\n for level in 0, indices.name, None:\n result = indices.unique(level=level)\n tm.assert_index_equal(result, expected)\n\n for level in 3, 'wrong':\n pytest.raises((IndexError, KeyError), indices.unique, level=level)\n\n def test_unique_na(self):\n idx = pd.Index([2, np.nan, 2, 1], name='my_index')\n expected = pd.Index([2, np.nan, 1], name='my_index')\n result = idx.unique()\n tm.assert_index_equal(result, expected)\n\n def test_get_unique_index(self, indices):\n # MultiIndex tested separately\n if not len(indices) or isinstance(indices, MultiIndex):\n return\n\n idx = indices[[0] * 5]\n idx_unique = indices[[0]]\n\n # We test against `idx_unique`, so first we make sure it's unique\n # and doesn't contain nans.\n assert idx_unique.is_unique\n try:\n assert not idx_unique.hasnans\n except NotImplementedError:\n pass\n\n for dropna in [False, True]:\n result = idx._get_unique_index(dropna=dropna)\n tm.assert_index_equal(result, idx_unique)\n\n # nans:\n if not indices._can_hold_na:\n return\n\n if needs_i8_conversion(indices):\n vals = indices.asi8[[0] * 5]\n vals[0] = iNaT\n else:\n vals = indices.values[[0] * 5]\n vals[0] = np.nan\n\n vals_unique = vals[:2]\n idx_nan = indices._shallow_copy(vals)\n idx_unique_nan = indices._shallow_copy(vals_unique)\n assert idx_unique_nan.is_unique\n\n assert idx_nan.dtype == indices.dtype\n assert idx_unique_nan.dtype == indices.dtype\n\n for dropna, expected in zip([False, True],\n [idx_unique_nan,\n idx_unique]):\n for i in [idx_nan, idx_unique_nan]:\n result = i._get_unique_index(dropna=dropna)\n tm.assert_index_equal(result, expected)\n\n def test_sort(self, indices):\n pytest.raises(TypeError, indices.sort)\n\n def test_mutability(self, indices):\n if not len(indices):\n return\n pytest.raises(TypeError, indices.__setitem__, 0, indices[0])\n\n def test_view(self, indices):\n assert indices.view().name == indices.name\n\n def test_compat(self, indices):\n assert indices.tolist() == list(indices)\n\n def test_memory_usage(self):\n for name, index in compat.iteritems(self.indices):\n result = index.memory_usage()\n if len(index):\n index.get_loc(index[0])\n result2 = index.memory_usage()\n result3 = index.memory_usage(deep=True)\n\n # RangeIndex, IntervalIndex\n # don't have engines\n if not isinstance(index, (RangeIndex, IntervalIndex)):\n assert result2 > result\n\n if index.inferred_type == 'object':\n assert result3 > result2\n\n else:\n\n # we report 0 for no-length\n assert result == 0\n\n def test_argsort(self):\n for k, ind in self.indices.items():\n\n # separately tested\n if k in ['catIndex']:\n continue\n\n result = ind.argsort()\n expected = np.array(ind).argsort()\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n def test_numpy_argsort(self):\n for k, ind in self.indices.items():\n result = np.argsort(ind)\n expected = ind.argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n # these are the only two types that perform\n # pandas compatibility input validation - the\n # rest already perform separate (or no) such\n # validation via their 'values' attribute as\n # defined in pandas.core.indexes/base.py - they\n # cannot be changed at the moment due to\n # backwards compatibility concerns\n if isinstance(type(ind), (CategoricalIndex, RangeIndex)):\n msg = \"the 'axis' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg,\n np.argsort, ind, axis=1)\n\n msg = \"the 'kind' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.argsort,\n ind, kind='mergesort')\n\n msg = \"the 'order' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.argsort,\n ind, order=('a', 'b'))\n\n def test_pickle(self, indices):\n self.verify_pickle(indices)\n original_name, indices.name = indices.name, 'foo'\n self.verify_pickle(indices)\n indices.name = original_name\n\n def test_take(self):\n indexer = [4, 3, 0, 2]\n for k, ind in self.indices.items():\n\n # separate\n if k in ['boolIndex', 'tuples', 'empty']:\n continue\n\n result = ind.take(indexer)\n expected = ind[indexer]\n assert result.equals(expected)\n\n if not isinstance(ind,\n (DatetimeIndex, PeriodIndex, TimedeltaIndex)):\n # GH 10791\n with pytest.raises(AttributeError):\n ind.freq\n\n def test_take_invalid_kwargs(self):\n idx = self.create_index()\n indices = [1, 2]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n tm.assert_raises_regex(TypeError, msg, idx.take,\n indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, mode='clip')\n\n def test_repeat(self):\n rep = 2\n i = self.create_index()\n expected = pd.Index(i.values.repeat(rep), name=i.name)\n tm.assert_index_equal(i.repeat(rep), expected)\n\n i = self.create_index()\n rep = np.arange(len(i))\n expected = pd.Index(i.values.repeat(rep), name=i.name)\n tm.assert_index_equal(i.repeat(rep), expected)\n\n def test_numpy_repeat(self):\n rep = 2\n i = self.create_index()\n expected = i.repeat(rep)\n tm.assert_index_equal(np.repeat(i, rep), expected)\n\n msg = \"the 'axis' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, np.repeat,\n i, rep, axis=0)\n\n @pytest.mark.parametrize('klass', [list, tuple, np.array, Series])\n def test_where(self, klass):\n i = self.create_index()\n\n cond = [True] * len(i)\n result = i.where(klass(cond))\n expected = i\n tm.assert_index_equal(result, expected)\n\n cond = [False] + [True] * len(i[1:])\n expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)\n result = i.where(klass(cond))\n tm.assert_index_equal(result, expected)\n\n def test_setops_errorcases(self):\n for name, idx in compat.iteritems(self.indices):\n # # non-iterable input\n cases = [0.5, 'xxx']\n methods = [idx.intersection, idx.union, idx.difference,\n idx.symmetric_difference]\n\n for method in methods:\n for case in cases:\n tm.assert_raises_regex(TypeError,\n \"Input must be Index \"\n \"or array-like\",\n method, case)\n\n def test_intersection_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[:5]\n second = idx[:3]\n intersect = first.intersection(second)\n\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n assert tm.equalContents(intersect, second)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assert_raises_regex(ValueError, msg):\n result = first.intersection(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.intersection(case)\n assert tm.equalContents(result, second)\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assert_raises_regex(TypeError, msg):\n result = first.intersection([1, 2, 3])\n\n def test_union_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[3:]\n second = idx[:5]\n everything = idx\n union = first.union(second)\n assert tm.equalContents(union, everything)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assert_raises_regex(ValueError, msg):\n result = first.union(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.union(case)\n assert tm.equalContents(result, everything)\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assert_raises_regex(TypeError, msg):\n result = first.union([1, 2, 3])\n\n def test_difference_base(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[2:]\n second = idx[:4]\n answer = idx[4:]\n result = first.difference(second)\n\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n assert tm.equalContents(result, answer)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assert_raises_regex(ValueError, msg):\n result = first.difference(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):\n assert result.__class__ == answer.__class__\n tm.assert_numpy_array_equal(result.sort_values().asi8,\n answer.sort_values().asi8)\n else:\n result = first.difference(case)\n assert tm.equalContents(result, answer)\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assert_raises_regex(TypeError, msg):\n result = first.difference([1, 2, 3])\n\n def test_symmetric_difference(self):\n for name, idx in compat.iteritems(self.indices):\n first = idx[1:]\n second = idx[:-1]\n if isinstance(idx, CategoricalIndex):\n pass\n else:\n answer = idx[[0, -1]]\n result = first.symmetric_difference(second)\n assert tm.equalContents(result, answer)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(idx, PeriodIndex):\n msg = \"can only call with other PeriodIndex-ed objects\"\n with tm.assert_raises_regex(ValueError, msg):\n result = first.symmetric_difference(case)\n elif isinstance(idx, CategoricalIndex):\n pass\n else:\n result = first.symmetric_difference(case)\n assert tm.equalContents(result, answer)\n\n if isinstance(idx, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with tm.assert_raises_regex(TypeError, msg):\n first.symmetric_difference([1, 2, 3])\n\n def test_insert_base(self):\n\n for name, idx in compat.iteritems(self.indices):\n result = idx[1:4]\n\n if not len(idx):\n continue\n\n # test 0th element\n assert idx[0:4].equals(result.insert(0, idx[0]))\n\n def test_delete_base(self):\n\n for name, idx in compat.iteritems(self.indices):\n\n if not len(idx):\n continue\n\n if isinstance(idx, RangeIndex):\n # tested in class\n continue\n\n expected = idx[1:]\n result = idx.delete(0)\n assert result.equals(expected)\n assert result.name == expected.name\n\n expected = idx[:-1]\n result = idx.delete(-1)\n assert result.equals(expected)\n assert result.name == expected.name\n\n with pytest.raises((IndexError, ValueError)):\n # either depending on numpy version\n result = idx.delete(len(idx))\n\n def test_equals(self):\n\n for name, idx in compat.iteritems(self.indices):\n assert idx.equals(idx)\n assert idx.equals(idx.copy())\n assert idx.equals(idx.astype(object))\n\n assert not idx.equals(list(idx))\n assert not idx.equals(np.array(idx))\n\n # Cannot pass in non-int64 dtype to RangeIndex\n if not isinstance(idx, RangeIndex):\n same_values = Index(idx, dtype=object)\n assert idx.equals(same_values)\n assert same_values.equals(idx)\n\n if idx.nlevels == 1:\n # do not test MultiIndex\n assert not idx.equals(pd.Series(idx))\n\n def test_equals_op(self):\n # GH9947, GH10637\n index_a = self.create_index()\n if isinstance(index_a, PeriodIndex):\n return\n\n n = len(index_a)\n index_b = index_a[0:-1]\n index_c = index_a[0:-1].append(index_a[-2:-1])\n index_d = index_a[0:1]\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == index_b\n expected1 = np.array([True] * n)\n expected2 = np.array([True] * (n - 1) + [False])\n tm.assert_numpy_array_equal(index_a == index_a, expected1)\n tm.assert_numpy_array_equal(index_a == index_c, expected2)\n\n # test comparisons with numpy arrays\n array_a = np.array(index_a)\n array_b = np.array(index_a[0:-1])\n array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))\n array_d = np.array(index_a[0:1])\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == array_b\n tm.assert_numpy_array_equal(index_a == array_a, expected1)\n tm.assert_numpy_array_equal(index_a == array_c, expected2)\n\n # test comparisons with Series\n series_a = Series(array_a)\n series_b = Series(array_b)\n series_c = Series(array_c)\n series_d = Series(array_d)\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == series_b\n\n tm.assert_numpy_array_equal(index_a == series_a, expected1)\n tm.assert_numpy_array_equal(index_a == series_c, expected2)\n\n # cases where length is 1 for one of them\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == index_d\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == series_d\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n index_a == array_d\n msg = \"Can only compare identically-labeled Series objects\"\n with tm.assert_raises_regex(ValueError, msg):\n series_a == series_d\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n series_a == array_d\n\n # comparing with a scalar should broadcast; note that we are excluding\n # MultiIndex because in this case each item in the index is a tuple of\n # length 2, and therefore is considered an array of length 2 in the\n # comparison instead of a scalar\n if not isinstance(index_a, MultiIndex):\n expected3 = np.array([False] * (len(index_a) - 2) + [True, False])\n # assuming the 2nd to last item is unique in the data\n item = index_a[-2]\n tm.assert_numpy_array_equal(index_a == item, expected3)\n tm.assert_series_equal(series_a == item, Series(expected3))\n\n def test_numpy_ufuncs(self):\n # test ufuncs of numpy 1.9.2. see:\n # http://docs.scipy.org/doc/numpy/reference/ufuncs.html\n\n # some functions are skipped because it may return different result\n # for unicode input depending on numpy version\n\n for name, idx in compat.iteritems(self.indices):\n for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,\n np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin,\n np.arccos, np.arctan, np.sinh, np.cosh, np.tanh,\n np.arcsinh, np.arccosh, np.arctanh, np.deg2rad,\n np.rad2deg]:\n if isinstance(idx, DatetimeIndexOpsMixin):\n # raise TypeError or ValueError (PeriodIndex)\n # PeriodIndex behavior should be changed in future version\n with pytest.raises(Exception):\n with np.errstate(all='ignore'):\n func(idx)\n elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):\n # coerces to float (e.g. np.sin)\n with np.errstate(all='ignore'):\n result = func(idx)\n exp = Index(func(idx.values), name=idx.name)\n\n tm.assert_index_equal(result, exp)\n assert isinstance(result, pd.Float64Index)\n else:\n # raise AttributeError or TypeError\n if len(idx) == 0:\n continue\n else:\n with pytest.raises(Exception):\n with np.errstate(all='ignore'):\n func(idx)\n\n for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:\n if isinstance(idx, DatetimeIndexOpsMixin):\n # raise TypeError or ValueError (PeriodIndex)\n with pytest.raises(Exception):\n func(idx)\n elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):\n # Results in bool array\n result = func(idx)\n assert isinstance(result, np.ndarray)\n assert not isinstance(result, Index)\n else:\n if len(idx) == 0:\n continue\n else:\n with pytest.raises(Exception):\n func(idx)\n\n def test_hasnans_isnans(self):\n # GH 11343, added tests for hasnans / isnans\n for name, index in self.indices.items():\n if isinstance(index, MultiIndex):\n pass\n else:\n idx = index.copy()\n\n # cases in indices doesn't include NaN\n expected = np.array([False] * len(idx), dtype=bool)\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert not idx.hasnans\n\n idx = index.copy()\n values = idx.values\n\n if len(index) == 0:\n continue\n elif isinstance(index, DatetimeIndexOpsMixin):\n values[1] = iNaT\n elif isinstance(index, (Int64Index, UInt64Index)):\n continue\n else:\n values[1] = np.nan\n\n if isinstance(index, PeriodIndex):\n idx = index.__class__(values, freq=index.freq)\n else:\n idx = index.__class__(values)\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans\n\n def test_fillna(self):\n # GH 11343\n for name, index in self.indices.items():\n if len(index) == 0:\n pass\n elif isinstance(index, MultiIndex):\n idx = index.copy()\n msg = \"isna is not defined for MultiIndex\"\n with tm.assert_raises_regex(NotImplementedError, msg):\n idx.fillna(idx[0])\n else:\n idx = index.copy()\n result = idx.fillna(idx[0])\n tm.assert_index_equal(result, idx)\n assert result is not idx\n\n msg = \"'value' must be a scalar, passed: \"\n with tm.assert_raises_regex(TypeError, msg):\n idx.fillna([idx[0]])\n\n idx = index.copy()\n values = idx.values\n\n if isinstance(index, DatetimeIndexOpsMixin):\n values[1] = iNaT\n elif isinstance(index, (Int64Index, UInt64Index)):\n continue\n else:\n values[1] = np.nan\n\n if isinstance(index, PeriodIndex):\n idx = index.__class__(values, freq=index.freq)\n else:\n idx = index.__class__(values)\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans\n\n def test_nulls(self):\n # this is really a smoke test for the methods\n # as these are adequately tested for function elsewhere\n\n for name, index in self.indices.items():\n if len(index) == 0:\n tm.assert_numpy_array_equal(\n index.isna(), np.array([], dtype=bool))\n elif isinstance(index, MultiIndex):\n idx = index.copy()\n msg = \"isna is not defined for MultiIndex\"\n with tm.assert_raises_regex(NotImplementedError, msg):\n idx.isna()\n else:\n\n if not index.hasnans:\n tm.assert_numpy_array_equal(\n index.isna(), np.zeros(len(index), dtype=bool))\n tm.assert_numpy_array_equal(\n index.notna(), np.ones(len(index), dtype=bool))\n else:\n result = isna(index)\n tm.assert_numpy_array_equal(index.isna(), result)\n tm.assert_numpy_array_equal(index.notna(), ~result)\n\n def test_empty(self):\n # GH 15270\n index = self.create_index()\n assert not index.empty\n assert index[:0].empty\n\n def test_join_self_unique(self, join_type):\n index = self.create_index()\n if index.is_unique:\n joined = index.join(index, how=join_type)\n assert (index == joined).all()\n\n def test_searchsorted_monotonic(self, indices):\n # GH17271\n # not implemented for tuple searches in MultiIndex\n # or Intervals searches in IntervalIndex\n if isinstance(indices, (MultiIndex, IntervalIndex)):\n return\n\n # nothing to test if the index is empty\n if indices.empty:\n return\n value = indices[0]\n\n # determine the expected results (handle dupes for 'right')\n expected_left, expected_right = 0, (indices == value).argmin()\n if expected_right == 0:\n # all values are the same, expected_right should be length\n expected_right = len(indices)\n\n # test _searchsorted_monotonic in all cases\n # test searchsorted only for increasing\n if indices.is_monotonic_increasing:\n ssm_left = indices._searchsorted_monotonic(value, side='left')\n assert expected_left == ssm_left\n\n ssm_right = indices._searchsorted_monotonic(value, side='right')\n assert expected_right == ssm_right\n\n ss_left = indices.searchsorted(value, side='left')\n assert expected_left == ss_left\n\n ss_right = indices.searchsorted(value, side='right')\n assert expected_right == ss_right\n\n elif indices.is_monotonic_decreasing:\n ssm_left = indices._searchsorted_monotonic(value, side='left')\n assert expected_left == ssm_left\n\n ssm_right = indices._searchsorted_monotonic(value, side='right')\n assert expected_right == ssm_right\n\n else:\n # non-monotonic should raise.\n with pytest.raises(ValueError):\n indices._searchsorted_monotonic(value, side='left')\n\n def test_map(self):\n # callable\n index = self.create_index()\n\n # we don't infer UInt64\n if isinstance(index, pd.UInt64Index):\n expected = index.astype('int64')\n else:\n expected = index\n\n result = index.map(lambda x: x)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"mapper\",\n [\n lambda values, index: {i: e for e, i in zip(values, index)},\n lambda values, index: pd.Series(values, index)])\n def test_map_dictlike(self, mapper):\n\n index = self.create_index()\n if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):\n pytest.skip(\"skipping tests for {}\".format(type(index)))\n\n identity = mapper(index.values, index)\n\n # we don't infer to UInt64 for a dict\n if isinstance(index, pd.UInt64Index) and isinstance(identity, dict):\n expected = index.astype('int64')\n else:\n expected = index\n\n result = index.map(identity)\n tm.assert_index_equal(result, expected)\n\n # empty mappable\n expected = pd.Index([np.nan] * len(index))\n result = index.map(mapper(expected, index))\n tm.assert_index_equal(result, expected)\n\n def test_putmask_with_wrong_mask(self):\n # GH18368\n index = self.create_index()\n\n with pytest.raises(ValueError):\n index.putmask(np.ones(len(index) + 1, np.bool), 1)\n\n with pytest.raises(ValueError):\n index.putmask(np.ones(len(index) - 1, np.bool), 1)\n\n with pytest.raises(ValueError):\n index.putmask('foo', 1)\n\n @pytest.mark.parametrize('copy', [True, False])\n @pytest.mark.parametrize('name', [None, 'foo'])\n @pytest.mark.parametrize('ordered', [True, False])\n def test_astype_category(self, copy, name, ordered):\n # GH 18630\n index = self.create_index()\n if name:\n index = index.rename(name)\n\n # standard categories\n dtype = CategoricalDtype(ordered=ordered)\n result = index.astype(dtype, copy=copy)\n expected = CategoricalIndex(index.values, name=name, ordered=ordered)\n tm.assert_index_equal(result, expected)\n\n # non-standard categories\n dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered)\n result = index.astype(dtype, copy=copy)\n expected = CategoricalIndex(index.values, name=name, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n if ordered is False:\n # dtype='category' defaults to ordered=False, so only test once\n result = index.astype('category', copy=copy)\n expected = CategoricalIndex(index.values, name=name)\n tm.assert_index_equal(result, expected)\n" ]
[ [ "pandas.compat.iteritems", "pandas.util.testing.assert_raises_regex", "pandas.util.testing.assert_numpy_array_equal", "pandas.Series", "pandas.util.testing.round_trip_pickle", "pandas.CategoricalIndex", "pandas.core.dtypes.dtypes.CategoricalDtype", "numpy.argsort", "numpy.repeat", "numpy.errstate", "numpy.arange", "pandas.util.testing.assert_index_equal", "pandas.core.dtypes.common.needs_i8_conversion", "numpy.array", "pandas.util.testing.equalContents", "pandas.Index", "pandas.isna", "pandas.option_context" ] ]
hyunynim/im2avatar
[ "db7f01b79f7edf200815351b6d5821044605c0fd" ]
[ "train_color_human.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport os\nimport sys\nsys.path.append('./utils')\nsys.path.append('./models')\n\nimport dataset_human as dataset\nimport model_color as model\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('train_dir', './train_color_human',\n \"\"\"Directory where to write summaries and checkpoint.\"\"\")\ntf.app.flags.DEFINE_string('base_dir', './data/human_im2avatar', \n \"\"\"The path containing all the samples.\"\"\")\ntf.app.flags.DEFINE_string('data_list_path', './data_list', \n \"\"\"The path containing data lists.\"\"\")\n\ntf.app.flags.DEFINE_integer('train_epochs', 501, \"\"\"\"\"\")\ntf.app.flags.DEFINE_integer('batch_size', 55, \"\"\"\"\"\")\ntf.app.flags.DEFINE_integer('gpu', 1, \"\"\"\"\"\")\ntf.app.flags.DEFINE_float('learning_rate', 0.0003, \"\"\"\"\"\")\ntf.app.flags.DEFINE_float('wd', 0.00001, \"\"\"\"\"\")\ntf.app.flags.DEFINE_integer('epochs_to_save',20, \"\"\"\"\"\")\ntf.app.flags.DEFINE_integer('decay_step',20000, \"\"\"for lr\"\"\")\ntf.app.flags.DEFINE_float('decay_rate', 0.7, \"\"\"for lr\"\"\")\n\nIM_DIM = 128 \nVOL_DIM = 64 \n\nBATCH_SIZE = FLAGS.batch_size\nTRAIN_EPOCHS = FLAGS.train_epochs\nGPU_INDEX = FLAGS.gpu\nBASE_LEARNING_RATE = FLAGS.learning_rate\nDECAY_STEP = FLAGS.decay_step\nDECAY_RATE = FLAGS.decay_rate\n\nBN_INIT_DECAY = 0.5\nBN_DECAY_DECAY_RATE = 0.5\nBN_DECAY_DECAY_STEP = float(DECAY_STEP)\nBN_DECAY_CLIP = 0.99\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(GPU_INDEX)\n\nTRAIN_DIR = FLAGS.train_dir\nif not os.path.exists(TRAIN_DIR): \n os.makedirs(TRAIN_DIR)\nLOG_FOUT = open(os.path.join(TRAIN_DIR, 'log_train.txt'), 'w')\nLOG_FOUT.write(str(tf.flags._global_parser.parse_args())+'\\n')\n\ndef log_string(out_str):\n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n\ndef get_learning_rate(batch):\n learning_rate = tf.train.exponential_decay(\n BASE_LEARNING_RATE, # Base learning rate.\n batch * BATCH_SIZE, # Current index into the dataset.\n DECAY_STEP, # Decay step.\n DECAY_RATE, # Decay rate.\n staircase=True)\n learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!\n return learning_rate \n\ndef get_bn_decay(batch):\n bn_momentum = tf.train.exponential_decay(\n BN_INIT_DECAY,\n batch*BATCH_SIZE,\n BN_DECAY_DECAY_STEP,\n BN_DECAY_DECAY_RATE,\n staircase=True)\n bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)\n return bn_decay \n\n\ndef train(dataset_):\n with tf.Graph().as_default():\n with tf.device('/gpu:'+str(GPU_INDEX)):\n is_train_pl = tf.placeholder(tf.bool)\n img_pl, vol_clr_pl, vol_flow_pl = model.placeholder_inputs(BATCH_SIZE, IM_DIM, VOL_DIM)\n\n global_step = tf.Variable(0)\n bn_decay = get_bn_decay(global_step)\n \n pred_reg_clr, pred_conf, pred_flow, pred_blended_clr = model.get_model(img_pl, is_train_pl, weight_decay=FLAGS.wd, bn_decay=bn_decay)\n loss = model.get_loss(pred_reg_clr, pred_blended_clr, vol_clr_pl, pred_flow, vol_flow_pl)\n \n learning_rate = get_learning_rate(global_step)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(loss, global_step=global_step)\n\n saver = tf.train.Saver()\n\n config = tf.ConfigProto()\n config.gpu_options.allocator_type = 'BFC'\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n\n with tf.Session(config=config) as sess:\n model_path = os.path.join(TRAIN_DIR, \"trained_models\")\n if tf.gfile.Exists(os.path.join(model_path, \"checkpoint\")):\n ckpt = tf.train.get_checkpoint_state(model_path)\n restorer = tf.train.Saver()\n restorer.restore(sess, ckpt.model_checkpoint_path)\n print (\"Load parameters from checkpoint.\")\n else:\n sess.run(tf.global_variables_initializer())\n\n train_sample_size = dataset_.getTrainSampleSize()\n train_batches = train_sample_size // BATCH_SIZE\n\n for epoch in range(TRAIN_EPOCHS):\n dataset_.shuffleTrainNames()\n\n for batch_idx in range(train_batches):\n imgs, vols_flow, vols_clr = dataset_.next_flow_batch(batch_idx * BATCH_SIZE, BATCH_SIZE, vol_dim=VOL_DIM) \n feed_dict = {img_pl: imgs, vol_clr_pl: vols_clr, vol_flow_pl: vols_flow, is_train_pl: True}\n\n step = sess.run(global_step)\n _, loss_val = sess.run([train_op, loss], feed_dict=feed_dict)\n\n log_string(\"<TRAIN> Epoch {} - Batch {}: loss: {}.\".format(epoch, batch_idx, loss_val))\n\n if epoch % FLAGS.epochs_to_save == 0:\n checkpoint_path = os.path.join(model_path, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=epoch)\n\ndef main():\n train_dataset = dataset.Dataset(base_path=FLAGS.base_dir, \n data_list_path=FLAGS.data_list_path)\n train(train_dataset)\n\nif __name__ == '__main__':\n main()\n\n\n\n\n" ]
[ [ "tensorflow.placeholder", "tensorflow.minimum", "tensorflow.app.flags.DEFINE_string", "tensorflow.global_variables_initializer", "tensorflow.train.get_checkpoint_state", "tensorflow.train.AdamOptimizer", "tensorflow.Graph", "tensorflow.train.exponential_decay", "tensorflow.ConfigProto", "tensorflow.flags._global_parser.parse_args", "tensorflow.app.flags.DEFINE_float", "tensorflow.Variable", "tensorflow.train.Saver", "tensorflow.Session", "tensorflow.maximum", "tensorflow.app.flags.DEFINE_integer" ] ]
vsobolmaven/python-statlib
[ "f0dc8c1a93c5536c3c4a32fa425ddd081349dccd" ]
[ "statlib/pstat.py" ]
[ "# Copyright (c) 1999-2007 Gary Strangman; All Rights Reserved.\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n# \r\n# The above copyright notice and this permission notice shall be included in\r\n# all copies or substantial portions of the Software.\r\n# \r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\n# THE SOFTWARE.\r\n#\r\n# Comments and/or additions are welcome (send e-mail to:\r\n# [email protected]).\r\n# \r\n\"\"\"\r\npstat.py module\r\n\r\n#################################################\r\n####### Written by: Gary Strangman ###########\r\n####### Last modified: Dec 18, 2007 ###########\r\n#################################################\r\n\r\nThis module provides some useful list and array manipulation routines\r\nmodeled after those found in the |Stat package by Gary Perlman, plus a\r\nnumber of other useful list/file manipulation functions. The list-based\r\nfunctions include:\r\n\r\n abut (source,*args)\r\n simpleabut (source, addon)\r\n colex (listoflists,cnums)\r\n collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)\r\n dm (listoflists,criterion)\r\n flat (l)\r\n linexand (listoflists,columnlist,valuelist)\r\n linexor (listoflists,columnlist,valuelist)\r\n linedelimited (inlist,delimiter)\r\n lineincols (inlist,colsize) \r\n lineincustcols (inlist,colsizes)\r\n list2string (inlist)\r\n makelol(inlist)\r\n makestr(x)\r\n printcc (lst,extra=2)\r\n printincols (listoflists,colsize)\r\n pl (listoflists)\r\n printl(listoflists)\r\n replace (lst,oldval,newval)\r\n recode (inlist,listmap,cols='all')\r\n remap (listoflists,criterion)\r\n roundlist (inlist,num_digits_to_round_floats_to)\r\n sortby(listoflists,sortcols)\r\n unique (inlist)\r\n duplicates(inlist)\r\n writedelimited (listoflists, delimiter, file, writetype='w')\r\n\r\nSome of these functions have alternate versions which are defined only if\r\nNumeric (NumPy) can be imported. These functions are generally named as\r\nabove, with an 'a' prefix.\r\n\r\n aabut (source, *args)\r\n acolex (a,indices,axis=1)\r\n acollapse (a,keepcols,collapsecols,sterr=0,ns=0)\r\n adm (a,criterion)\r\n alinexand (a,columnlist,valuelist)\r\n alinexor (a,columnlist,valuelist)\r\n areplace (a,oldval,newval)\r\n arecode (a,listmap,col='all')\r\n arowcompare (row1, row2)\r\n arowsame (row1, row2)\r\n asortrows(a,axis=0)\r\n aunique(inarray)\r\n aduplicates(inarray)\r\n\r\nCurrently, the code is all but completely un-optimized. In many cases, the\r\narray versions of functions amount simply to aliases to built-in array\r\nfunctions/methods. Their inclusion here is for function name consistency.\r\n\"\"\"\r\n\r\n## CHANGE LOG:\r\n## ==========\r\n## 07-11-26 ... edited to work with numpy\r\n## 01-11-15 ... changed list2string() to accept a delimiter\r\n## 01-06-29 ... converted exec()'s to eval()'s to make compatible with Py2.1\r\n## 01-05-31 ... added duplicates() and aduplicates() functions\r\n## 00-12-28 ... license made GPL, docstring and import requirements\r\n## 99-11-01 ... changed version to 0.3\r\n## 99-08-30 ... removed get, getstrings, put, aget, aput (into io.py)\r\n## 03/27/99 ... added areplace function, made replace fcn recursive\r\n## 12/31/98 ... added writefc function for ouput to fixed column sizes\r\n## 12/07/98 ... fixed import problem (failed on collapse() fcn)\r\n## added __version__ variable (now 0.2)\r\n## 12/05/98 ... updated doc-strings\r\n## added features to collapse() function\r\n## added flat() function for lists\r\n## fixed a broken asortrows() \r\n## 11/16/98 ... fixed minor bug in aput for 1D arrays\r\n##\r\n## 11/08/98 ... fixed aput to output large arrays correctly\r\n\r\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom . import stats # required 3rd party module\r\nimport string, copy\r\nfrom types import *\r\nfrom six.moves import map\nfrom six.moves import range\n\r\n__version__ = 0.4\r\n\r\n###=========================== LIST FUNCTIONS ==========================\r\n###\r\n### Here are the list functions, DEFINED FOR ALL SYSTEMS.\r\n### Array functions (for NumPy-enabled computers) appear below.\r\n###\r\n\r\ndef abut (source,*args):\r\n \"\"\"\r\nLike the |Stat abut command. It concatenates two lists side-by-side\r\nand returns the result. '2D' lists are also accomodated for either argument\r\n(source or addon). CAUTION: If one list is shorter, it will be repeated\r\nuntil it is as long as the longest list. If this behavior is not desired,\r\nuse pstat.simpleabut().\r\n\r\nUsage: abut(source, args) where args=any # of lists\r\nReturns: a list of lists as long as the LONGEST list past, source on the\r\n 'left', lists in <args> attached consecutively on the 'right'\r\n\"\"\"\r\n\r\n if type(source) not in [list,tuple]:\r\n source = [source]\r\n for addon in args:\r\n if type(addon) not in [list,tuple]:\r\n addon = [addon]\r\n if len(addon) < len(source): # is source list longer?\r\n if len(source) % len(addon) == 0: # are they integer multiples?\r\n repeats = len(source)/len(addon) # repeat addon n times\r\n origadd = copy.deepcopy(addon)\r\n for i in range(repeats-1):\r\n addon = addon + origadd\r\n else:\r\n repeats = len(source)/len(addon)+1 # repeat addon x times,\r\n origadd = copy.deepcopy(addon) # x is NOT an integer\r\n for i in range(repeats-1):\r\n addon = addon + origadd\r\n addon = addon[0:len(source)]\r\n elif len(source) < len(addon): # is addon list longer?\r\n if len(addon) % len(source) == 0: # are they integer multiples?\r\n repeats = len(addon)/len(source) # repeat source n times\r\n origsour = copy.deepcopy(source)\r\n for i in range(repeats-1):\r\n source = source + origsour\r\n else:\r\n repeats = len(addon)/len(source)+1 # repeat source x times,\r\n origsour = copy.deepcopy(source) # x is NOT an integer\r\n for i in range(repeats-1):\r\n source = source + origsour\r\n source = source[0:len(addon)]\r\n\r\n source = simpleabut(source,addon)\r\n return source\r\n\r\n\r\ndef simpleabut (source, addon):\r\n \"\"\"\r\nConcatenates two lists as columns and returns the result. '2D' lists\r\nare also accomodated for either argument (source or addon). This DOES NOT\r\nrepeat either list to make the 2 lists of equal length. Beware of list pairs\r\nwith different lengths ... the resulting list will be the length of the\r\nFIRST list passed.\r\n\r\nUsage: simpleabut(source,addon) where source, addon=list (or list-of-lists)\r\nReturns: a list of lists as long as source, with source on the 'left' and\r\n addon on the 'right'\r\n\"\"\"\r\n if type(source) not in [list,tuple]:\r\n source = [source]\r\n if type(addon) not in [list,tuple]:\r\n addon = [addon]\r\n minlen = min(len(source),len(addon))\r\n list_ = copy.deepcopy(source) # start abut process\r\n if type(source[0]) not in [list_,tuple]:\r\n if type(addon[0]) not in [list_,tuple]:\r\n for i in range(minlen):\r\n list_[i] = [source[i]] + [addon[i]] # source/addon = column\r\n else:\r\n for i in range(minlen):\r\n list_[i] = [source[i]] + addon[i] # addon=list_-of-list_s\r\n else:\r\n if type(addon[0]) not in [list_,tuple]:\r\n for i in range(minlen):\r\n list_[i] = source[i] + [addon[i]] # source=list_-of-list_s\r\n else:\r\n for i in range(minlen):\r\n list_[i] = source[i] + addon[i] # source/addon = list_-of-list_s\r\n source = list_\r\n return source\r\n\r\n\r\ndef colex (listoflists,cnums):\r\n \"\"\"\r\nExtracts from listoflists the columns specified in the list 'cnums'\r\n(cnums can be an integer, a sequence of integers, or a string-expression that\r\ncorresponds to a slice operation on the variable x ... e.g., 'x[3:]' will colex\r\ncolumns 3 onward from the listoflists).\r\n\r\nUsage: colex (listoflists,cnums)\r\nReturns: a list-of-lists corresponding to the columns from listoflists\r\n specified by cnums, in the order the column numbers appear in cnums\r\n\"\"\"\r\n global index\r\n column = 0\r\n if type(cnums) in [list,tuple]: # if multiple columns to get\r\n index = cnums[0]\r\n column = [x[index] for x in listoflists]\r\n for col in cnums[1:]:\r\n index = col\r\n column = abut(column,[x[index] for x in listoflists])\r\n elif type(cnums) == str: # if an 'x[3:]' type expr.\r\n evalstring = 'map(lambda x: x'+cnums+', listoflists)'\r\n column = eval(evalstring)\r\n else: # else it's just 1 col to get\r\n index = cnums\r\n column = [x[index] for x in listoflists]\r\n return column\r\n\r\n\r\ndef collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):\r\n \"\"\"\r\nAverages data in collapsecol, keeping all unique items in keepcols\r\n(using unique, which keeps unique LISTS of column numbers), retaining the\r\nunique sets of values in keepcols, the mean for each. Setting fcn1\r\nand/or fcn2 to point to a function rather than None (e.g., stats.sterr, len)\r\nwill append those results (e.g., the sterr, N) after each calculated mean.\r\ncfcn is the collapse function to apply (defaults to mean, defined here in the\r\npstat module to avoid circular imports with stats.py, but harmonicmean or\r\nothers could be passed).\r\n\r\nUsage: collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)\r\nReturns: a list of lists with all unique permutations of entries appearing in\r\n columns (\"conditions\") specified by keepcols, abutted with the result of\r\n cfcn (if cfcn=None, defaults to the mean) of each column specified by\r\n collapsecols.\r\n\"\"\"\r\n def collmean (inlist):\r\n s = 0\r\n for item in inlist:\r\n s = s + item\r\n return s/float(len(inlist))\r\n\r\n if type(keepcols) not in [list,tuple]:\r\n keepcols = [keepcols]\r\n if type(collapsecols) not in [list,tuple]:\r\n collapsecols = [collapsecols]\r\n if cfcn == None:\r\n cfcn = collmean\r\n if keepcols == []:\r\n means = [0]*len(collapsecols)\r\n for i in range(len(collapsecols)):\r\n avgcol = colex(listoflists,collapsecols[i])\r\n means[i] = cfcn(avgcol)\r\n if fcn1:\r\n try:\r\n test = fcn1(avgcol)\r\n except:\r\n test = 'N/A'\r\n means[i] = [means[i], test]\r\n if fcn2:\r\n try:\r\n test = fcn2(avgcol)\r\n except:\r\n test = 'N/A'\r\n try:\r\n means[i] = means[i] + [len(avgcol)]\r\n except TypeError:\r\n means[i] = [means[i],len(avgcol)]\r\n return means\r\n else:\r\n values = colex(listoflists,keepcols)\r\n uniques = unique(values)\r\n uniques.sort()\r\n newlist = []\r\n if type(keepcols) not in [list,tuple]: keepcols = [keepcols]\r\n for item in uniques:\r\n if type(item) not in [list,tuple]: item =[item]\r\n tmprows = linexand(listoflists,keepcols,item)\r\n for col in collapsecols:\r\n avgcol = colex(tmprows,col)\r\n item.append(cfcn(avgcol))\r\n if fcn1 != None:\r\n try:\r\n test = fcn1(avgcol)\r\n except:\r\n test = 'N/A'\r\n item.append(test)\r\n if fcn2 != None:\r\n try:\r\n test = fcn2(avgcol)\r\n except:\r\n test = 'N/A'\r\n item.append(test)\r\n newlist.append(item)\r\n return newlist\r\n\r\n\r\ndef dm (listoflists,criterion):\r\n \"\"\"\r\nReturns rows from the passed list of lists that meet the criteria in\r\nthe passed criterion expression (a string as a function of x; e.g., 'x[3]>=9'\r\nwill return all rows where the 4th column>=9 and \"x[2]=='N'\" will return rows\r\nwith column 2 equal to the string 'N').\r\n\r\nUsage: dm (listoflists, criterion)\r\nReturns: rows from listoflists that meet the specified criterion.\r\n\"\"\"\r\n function = 'filter(lambda x: '+criterion+',listoflists)'\r\n lines = eval(function)\r\n return lines\r\n\r\n\r\ndef flat(l):\r\n \"\"\"\r\nReturns the flattened version of a '2D' list. List-correlate to the a.ravel()()\r\nmethod of NumPy arrays.\r\n\r\nUsage: flat(l)\r\n\"\"\"\r\n newl = []\r\n for i in range(len(l)):\r\n for j in range(len(l[i])):\r\n newl.append(l[i][j])\r\n return newl\r\n\r\n\r\ndef linexand (listoflists,columnlist,valuelist):\r\n \"\"\"\r\nReturns the rows of a list of lists where col (from columnlist) = val\r\n(from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]).\r\nlen(columnlist) must equal len(valuelist).\r\n\r\nUsage: linexand (listoflists,columnlist,valuelist)\r\nReturns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i\r\n\"\"\"\r\n if type(columnlist) not in [list,tuple]:\r\n columnlist = [columnlist]\r\n if type(valuelist) not in [list,tuple]:\r\n valuelist = [valuelist]\r\n criterion = ''\r\n for i in range(len(columnlist)):\r\n if type(valuelist[i])==str:\r\n critval = '\\'' + valuelist[i] + '\\''\r\n else:\r\n critval = str(valuelist[i])\r\n criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'\r\n criterion = criterion[0:-3] # remove the \"and\" after the last crit\r\n function = 'filter(lambda x: '+criterion+',listoflists)'\r\n lines = eval(function)\r\n return lines\r\n\r\n\r\ndef linexor (listoflists,columnlist,valuelist):\r\n \"\"\"\r\nReturns the rows of a list of lists where col (from columnlist) = val\r\n(from valuelist) for ANY pair of values (colunmlist[i],valuelist[i[).\r\nOne value is required for each column in columnlist. If only one value\r\nexists for columnlist but multiple values appear in valuelist, the\r\nvaluelist values are all assumed to pertain to the same column.\r\n\r\nUsage: linexor (listoflists,columnlist,valuelist)\r\nReturns: the rows of listoflists where columnlist[i]=valuelist[i] for ANY i\r\n\"\"\"\r\n if type(columnlist) not in [list,tuple]:\r\n columnlist = [columnlist]\r\n if type(valuelist) not in [list,tuple]:\r\n valuelist = [valuelist]\r\n criterion = ''\r\n if len(columnlist) == 1 and len(valuelist) > 1:\r\n columnlist = columnlist*len(valuelist)\r\n for i in range(len(columnlist)): # build an exec string\r\n if type(valuelist[i])==str:\r\n critval = '\\'' + valuelist[i] + '\\''\r\n else:\r\n critval = str(valuelist[i])\r\n criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'\r\n criterion = criterion[0:-2] # remove the \"or\" after the last crit\r\n function = 'filter(lambda x: '+criterion+',listoflists)'\r\n lines = eval(function)\r\n return lines\r\n\r\n\r\ndef linedelimited (inlist,delimiter):\r\n \"\"\"\r\nReturns a string composed of elements in inlist, with each element\r\nseparated by 'delimiter.' Used by function writedelimited. Use '\\t'\r\nfor tab-delimiting.\r\n\r\nUsage: linedelimited (inlist,delimiter)\r\n\"\"\"\r\n outstr = ''\r\n for item in inlist:\r\n if type(item) != str:\r\n item = str(item)\r\n outstr = outstr + item + delimiter\r\n outstr = outstr[0:-1]\r\n return outstr\r\n\r\n\r\ndef lineincols (inlist,colsize):\r\n \"\"\"\r\nReturns a string composed of elements in inlist, with each element\r\nright-aligned in columns of (fixed) colsize.\r\n\r\nUsage: lineincols (inlist,colsize) where colsize is an integer\r\n\"\"\"\r\n outstr = ''\r\n for item in inlist:\r\n if type(item) != str:\r\n item = str(item)\r\n size = len(item)\r\n if size <= colsize:\r\n for i in range(colsize-size):\r\n outstr = outstr + ' '\r\n outstr = outstr + item\r\n else:\r\n outstr = outstr + item[0:colsize+1]\r\n return outstr\r\n\r\n\r\ndef lineincustcols (inlist,colsizes):\r\n \"\"\"\r\nReturns a string composed of elements in inlist, with each element\r\nright-aligned in a column of width specified by a sequence colsizes. The\r\nlength of colsizes must be greater than or equal to the number of columns\r\nin inlist.\r\n\r\nUsage: lineincustcols (inlist,colsizes)\r\nReturns: formatted string created from inlist\r\n\"\"\"\r\n outstr = ''\r\n for i in range(len(inlist)):\r\n if type(inlist[i]) != str:\r\n item = str(inlist[i])\r\n else:\r\n item = inlist[i]\r\n size = len(item)\r\n if size <= colsizes[i]:\r\n for j in range(colsizes[i]-size):\r\n outstr = outstr + ' '\r\n outstr = outstr + item\r\n else:\r\n outstr = outstr + item[0:colsizes[i]+1]\r\n return outstr\r\n\r\n\r\ndef list2string (inlist,delimit=' '):\r\n \"\"\"\r\nConverts a 1D list to a single long string for file output, using\r\nthe string.join function.\r\n\r\nUsage: list2string (inlist,delimit=' ')\r\nReturns: the string created from inlist\r\n\"\"\"\r\n stringlist = list(map(makestr,inlist))\r\n return string.join(stringlist,delimit)\r\n\r\n\r\ndef makelol(inlist):\r\n \"\"\"\r\nConverts a 1D list to a 2D list (i.e., a list-of-lists). Useful when you\r\nwant to use put() to write a 1D list one item per line in the file.\r\n\r\nUsage: makelol(inlist)\r\nReturns: if l = [1,2,'hi'] then returns [[1],[2],['hi']] etc.\r\n\"\"\"\r\n x = []\r\n for item in inlist:\r\n x.append([item])\r\n return x\r\n\r\n\r\ndef makestr (x):\r\n if type(x) != str:\r\n x = str(x)\r\n return x\r\n\r\n\r\ndef printcc (lst,extra=2):\r\n \"\"\"\r\nPrints a list of lists in columns, customized by the max size of items\r\nwithin the columns (max size of items in col, plus 'extra' number of spaces).\r\nUse 'dashes' or '\\\\n' in the list-of-lists to print dashes or blank lines,\r\nrespectively.\r\n\r\nUsage: printcc (lst,extra=2)\r\nReturns: None\r\n\"\"\"\r\n if type(lst[0]) not in [list,tuple]:\r\n lst = [lst]\r\n rowstokill = []\r\n list2print = copy.deepcopy(lst)\r\n for i in range(len(lst)):\r\n if lst[i] == ['\\n'] or lst[i]=='\\n' or lst[i]=='dashes' or lst[i]=='' or lst[i]==['']:\r\n rowstokill = rowstokill + [i]\r\n rowstokill.reverse() # delete blank rows from the end\r\n for row in rowstokill:\r\n del list2print[row]\r\n maxsize = [0]*len(list2print[0])\r\n for col in range(len(list2print[0])):\r\n items = colex(list2print,col)\r\n items = list(map(makestr,items))\r\n maxsize[col] = max(list(map(len,items))) + extra\r\n for row in lst:\r\n if row == ['\\n'] or row == '\\n' or row == '' or row == ['']:\r\n print()\r\n elif row == ['dashes'] or row == 'dashes':\r\n dashes = [0]*len(maxsize)\r\n for j in range(len(maxsize)):\r\n dashes[j] = '-'*(maxsize[j]-2)\r\n print(lineincustcols(dashes,maxsize))\r\n else:\r\n print(lineincustcols(row,maxsize))\r\n return None\r\n\r\n\r\ndef printincols (listoflists,colsize):\r\n \"\"\"\r\nPrints a list of lists in columns of (fixed) colsize width, where\r\ncolsize is an integer.\r\n\r\nUsage: printincols (listoflists,colsize)\r\nReturns: None\r\n\"\"\"\r\n for row in listoflists:\r\n print(lineincols(row,colsize))\r\n return None\r\n\r\n\r\ndef pl (listoflists):\r\n \"\"\"\r\nPrints a list of lists, 1 list (row) at a time.\r\n\r\nUsage: pl(listoflists)\r\nReturns: None\r\n\"\"\"\r\n for row in listoflists:\r\n if row[-1] == '\\n':\r\n print(row, end=' ')\r\n else:\r\n print(row)\r\n return None\r\n\r\n\r\ndef printl(listoflists):\r\n \"\"\"Alias for pl.\"\"\"\r\n pl(listoflists)\r\n return\r\n\r\n\r\ndef replace (inlst,oldval,newval):\r\n \"\"\"\r\nReplaces all occurrences of 'oldval' with 'newval', recursively.\r\n\r\nUsage: replace (inlst,oldval,newval)\r\n\"\"\"\r\n lst = inlst*1\r\n for i in range(len(lst)):\r\n if type(lst[i]) not in [list,tuple]:\r\n if lst[i]==oldval: lst[i]=newval\r\n else:\r\n lst[i] = replace(lst[i],oldval,newval)\r\n return lst\r\n\r\n\r\ndef recode (inlist,listmap,cols=None):\r\n \"\"\"\r\nChanges the values in a list to a new set of values (useful when\r\nyou need to recode data from (e.g.) strings to numbers. cols defaults\r\nto None (meaning all columns are recoded).\r\n\r\nUsage: recode (inlist,listmap,cols=None) cols=recode cols, listmap=2D list\r\nReturns: inlist with the appropriate values replaced with new ones\r\n\"\"\"\r\n lst = copy.deepcopy(inlist)\r\n if cols != None:\r\n if type(cols) not in [list,tuple]:\r\n cols = [cols]\r\n for col in cols:\r\n for row in range(len(lst)):\r\n try:\r\n idx = colex(listmap,0).index(lst[row][col])\r\n lst[row][col] = listmap[idx][1]\r\n except ValueError:\r\n pass\r\n else:\r\n for row in range(len(lst)):\r\n for col in range(len(lst)):\r\n try:\r\n idx = colex(listmap,0).index(lst[row][col])\r\n lst[row][col] = listmap[idx][1]\r\n except ValueError:\r\n pass\r\n return lst\r\n\r\n\r\ndef remap (listoflists,criterion):\r\n \"\"\"\r\nRemaps values in a given column of a 2D list (listoflists). This requires\r\na criterion as a function of 'x' so that the result of the following is\r\nreturned ... map(lambda x: 'criterion',listoflists). \r\n\r\nUsage: remap(listoflists,criterion) criterion=string\r\nReturns: remapped version of listoflists\r\n\"\"\"\r\n function = 'map(lambda x: '+criterion+',listoflists)'\r\n lines = eval(function)\r\n return lines\r\n\r\n\r\ndef roundlist (inlist,digits):\r\n \"\"\"\r\nGoes through each element in a 1D or 2D inlist, and applies the following\r\nfunction to all elements of float ... round(element,digits).\r\n\r\nUsage: roundlist(inlist,digits)\r\nReturns: list with rounded floats\r\n\"\"\"\r\n if type(inlist[0]) in [int, float]:\r\n inlist = [inlist]\r\n l = inlist*1\r\n for i in range(len(l)):\r\n for j in range(len(l[i])):\r\n if type(l[i][j])==float:\r\n l[i][j] = round(l[i][j],digits)\r\n return l\r\n\r\n\r\ndef sortby(listoflists,sortcols):\r\n \"\"\"\r\nSorts a list of lists on the column(s) specified in the sequence\r\nsortcols.\r\n\r\nUsage: sortby(listoflists,sortcols)\r\nReturns: sorted list, unchanged column ordering\r\n\"\"\"\r\n newlist = abut(colex(listoflists,sortcols),listoflists)\r\n newlist.sort()\r\n try:\r\n numcols = len(sortcols)\r\n except TypeError:\r\n numcols = 1\r\n crit = '[' + str(numcols) + ':]'\r\n newlist = colex(newlist,crit)\r\n return newlist\r\n\r\n\r\ndef unique (inlist):\r\n \"\"\"\r\nReturns all unique items in the passed list. If the a list-of-lists\r\nis passed, unique LISTS are found (i.e., items in the first dimension are\r\ncompared).\r\n\r\nUsage: unique (inlist)\r\nReturns: the unique elements (or rows) in inlist\r\n\"\"\"\r\n uniques = []\r\n for item in inlist:\r\n if item not in uniques:\r\n uniques.append(item)\r\n return uniques\r\n\r\ndef duplicates(inlist):\r\n \"\"\"\r\nReturns duplicate items in the FIRST dimension of the passed list.\r\n\r\nUsage: duplicates (inlist)\r\n\"\"\"\r\n dups = []\r\n for i in range(len(inlist)):\r\n if inlist[i] in inlist[i+1:]:\r\n dups.append(inlist[i])\r\n return dups\r\n\r\n\r\ndef nonrepeats(inlist):\r\n \"\"\"\r\nReturns items that are NOT duplicated in the first dim of the passed list.\r\n\r\nUsage: nonrepeats (inlist)\r\n\"\"\"\r\n nonrepeats = []\r\n for i in range(len(inlist)):\r\n if inlist.count(inlist[i]) == 1:\r\n nonrepeats.append(inlist[i])\r\n return nonrepeats\r\n\r\n\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n#=================== PSTAT ARRAY FUNCTIONS =====================\r\n\r\ntry: # DEFINE THESE *ONLY* IF numpy IS AVAILABLE\r\n import numpy as N\r\n \r\n def aabut (source, *args):\r\n \"\"\"\r\n Like the |Stat abut command. It concatenates two arrays column-wise\r\n and returns the result. CAUTION: If one array is shorter, it will be\r\n repeated until it is as long as the other.\r\n \r\n Usage: aabut (source, args) where args=any # of arrays\r\n Returns: an array as long as the LONGEST array past, source appearing on the\r\n 'left', arrays in <args> attached on the 'right'.\r\n \"\"\"\r\n if len(source.shape)==1:\r\n width = 1\r\n source = N.resize(source,[source.shape[0],width])\r\n else:\r\n width = source.shape[1]\r\n for addon in args:\r\n if len(addon.shape)==1:\r\n width = 1\r\n addon = N.resize(addon,[source.shape[0],width])\r\n else:\r\n width = source.shape[1]\r\n if len(addon) < len(source):\r\n addon = N.resize(addon,[source.shape[0],addon.shape[1]])\r\n elif len(source) < len(addon):\r\n source = N.resize(source,[addon.shape[0],source.shape[1]])\r\n source = N.concatenate((source,addon),1)\r\n return source\r\n \r\n \r\n def acolex (a,indices,axis=1):\r\n \"\"\"\r\n Extracts specified indices (a list) from passed array, along passed\r\n axis (column extraction is default). BEWARE: A 1D array is presumed to be a\r\n column-array (and that the whole array will be returned as a column).\r\n \r\n Usage: acolex (a,indices,axis=1)\r\n Returns: the columns of a specified by indices\r\n \"\"\"\r\n if type(indices) not in [list,tuple,N.ndarray]:\r\n indices = [indices]\r\n if len(N.shape(a)) == 1:\r\n cols = N.resize(a,[a.shape[0],1])\r\n else:\r\n cols = N.take(a,indices,axis)\r\n return cols\r\n \r\n \r\n def acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):\r\n \"\"\"\r\n Averages data in collapsecol, keeping all unique items in keepcols\r\n (using unique, which keeps unique LISTS of column numbers), retaining\r\n the unique sets of values in keepcols, the mean for each. If stderror or\r\n N of the mean are desired, set either or both parameters to 1.\r\n \r\n Usage: acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)\r\n Returns: unique 'conditions' specified by the contents of columns specified\r\n by keepcols, abutted with the mean(s) of column(s) specified by\r\n collapsecols\r\n \"\"\"\r\n def acollmean (inarray):\r\n return N.sum(N.ravel(inarray))\r\n \r\n if type(keepcols) not in [list,tuple,N.ndarray]:\r\n keepcols = [keepcols]\r\n if type(collapsecols) not in [list,tuple,N.ndarray]:\r\n collapsecols = [collapsecols]\r\n \r\n if cfcn == None:\r\n cfcn = acollmean\r\n if keepcols == []:\r\n avgcol = acolex(a,collapsecols)\r\n means = N.sum(avgcol)/float(len(avgcol))\r\n if fcn1!=None:\r\n try:\r\n test = fcn1(avgcol)\r\n except:\r\n test = N.array(['N/A']*len(means))\r\n means = aabut(means,test)\r\n if fcn2!=None:\r\n try:\r\n test = fcn2(avgcol)\r\n except:\r\n test = N.array(['N/A']*len(means))\r\n means = aabut(means,test)\r\n return means\r\n else:\r\n if type(keepcols) not in [list,tuple,N.ndarray]:\r\n keepcols = [keepcols]\r\n values = colex(a,keepcols) # so that \"item\" can be appended (below)\r\n uniques = unique(values) # get a LIST, so .sort keeps rows intact\r\n uniques.sort()\r\n newlist = []\r\n for item in uniques:\r\n if type(item) not in [list,tuple,N.ndarray]:\r\n item =[item]\r\n tmprows = alinexand(a,keepcols,item)\r\n for col in collapsecols:\r\n avgcol = acolex(tmprows,col)\r\n item.append(acollmean(avgcol))\r\n if fcn1!=None:\r\n try:\r\n test = fcn1(avgcol)\r\n except:\r\n test = 'N/A'\r\n item.append(test)\r\n if fcn2!=None:\r\n try:\r\n test = fcn2(avgcol)\r\n except:\r\n test = 'N/A'\r\n item.append(test)\r\n newlist.append(item)\r\n try:\r\n new_a = N.array(newlist)\r\n except TypeError:\r\n new_a = N.array(newlist,'O')\r\n return new_a\r\n \r\n \r\n def adm (a,criterion):\r\n \"\"\"\r\n Returns rows from the passed list of lists that meet the criteria in\r\n the passed criterion expression (a string as a function of x).\r\n \r\n Usage: adm (a,criterion) where criterion is like 'x[2]==37'\r\n \"\"\"\r\n function = 'filter(lambda x: '+criterion+',a)'\r\n lines = eval(function)\r\n try:\r\n lines = N.array(lines)\r\n except:\r\n lines = N.array(lines,dtype='O')\r\n return lines\r\n \r\n \r\n def isstring(x):\r\n if type(x)==str:\r\n return 1\r\n else:\r\n return 0\r\n \r\n \r\n def alinexand (a,columnlist,valuelist):\r\n \"\"\"\r\n Returns the rows of an array where col (from columnlist) = val\r\n (from valuelist). One value is required for each column in columnlist.\r\n \r\n Usage: alinexand (a,columnlist,valuelist)\r\n Returns: the rows of a where columnlist[i]=valuelist[i] for ALL i\r\n \"\"\"\r\n if type(columnlist) not in [list,tuple,N.ndarray]:\r\n columnlist = [columnlist]\r\n if type(valuelist) not in [list,tuple,N.ndarray]:\r\n valuelist = [valuelist]\r\n criterion = ''\r\n for i in range(len(columnlist)):\r\n if type(valuelist[i])==str:\r\n critval = '\\'' + valuelist[i] + '\\''\r\n else:\r\n critval = str(valuelist[i])\r\n criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'\r\n criterion = criterion[0:-3] # remove the \"and\" after the last crit\r\n return adm(a,criterion)\r\n \r\n \r\n def alinexor (a,columnlist,valuelist):\r\n \"\"\"\r\n Returns the rows of an array where col (from columnlist) = val (from\r\n valuelist). One value is required for each column in columnlist.\r\n The exception is if either columnlist or valuelist has only 1 value,\r\n in which case that item will be expanded to match the length of the\r\n other list.\r\n \r\n Usage: alinexor (a,columnlist,valuelist)\r\n Returns: the rows of a where columnlist[i]=valuelist[i] for ANY i\r\n \"\"\"\r\n if type(columnlist) not in [list,tuple,N.ndarray]:\r\n columnlist = [columnlist]\r\n if type(valuelist) not in [list,tuple,N.ndarray]:\r\n valuelist = [valuelist]\r\n criterion = ''\r\n if len(columnlist) == 1 and len(valuelist) > 1:\r\n columnlist = columnlist*len(valuelist)\r\n elif len(valuelist) == 1 and len(columnlist) > 1:\r\n valuelist = valuelist*len(columnlist)\r\n for i in range(len(columnlist)):\r\n if type(valuelist[i])==str:\r\n critval = '\\'' + valuelist[i] + '\\''\r\n else:\r\n critval = str(valuelist[i])\r\n criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'\r\n criterion = criterion[0:-2] # remove the \"or\" after the last crit\r\n return adm(a,criterion)\r\n \r\n \r\n def areplace (a,oldval,newval):\r\n \"\"\"\r\n Replaces all occurrences of oldval with newval in array a.\r\n \r\n Usage: areplace(a,oldval,newval)\r\n \"\"\"\r\n return N.where(a==oldval,newval,a)\r\n \r\n \r\n def arecode (a,listmap,col='all'):\r\n \"\"\"\r\n Remaps the values in an array to a new set of values (useful when\r\n you need to recode data from (e.g.) strings to numbers as most stats\r\n packages require. Can work on SINGLE columns, or 'all' columns at once.\r\n @@@BROKEN 2007-11-26\r\n \r\n Usage: arecode (a,listmap,col='all')\r\n Returns: a version of array a where listmap[i][0] = (instead) listmap[i][1]\r\n \"\"\"\r\n ashape = a.shape\r\n if col == 'all':\r\n work = a.ravel()\r\n else:\r\n work = acolex(a,col)\r\n work = work.ravel()\r\n for pair in listmap:\r\n if type(pair[1]) == str or work.dtype.char=='O' or a.dtype.char=='O':\r\n work = N.array(work,dtype='O')\r\n a = N.array(a,dtype='O')\r\n for i in range(len(work)):\r\n if work[i]==pair[0]:\r\n work[i] = pair[1]\r\n if col == 'all':\r\n return N.reshape(work,ashape)\r\n else:\r\n return N.concatenate([a[:,0:col],work[:,N.newaxis],a[:,col+1:]],1)\r\n else: # must be a non-Object type array and replacement\r\n work = N.where(work==pair[0],pair[1],work)\r\n return N.concatenate([a[:,0:col],work[:,N.newaxis],a[:,col+1:]],1)\r\n \r\n \r\n def arowcompare(row1, row2):\r\n \"\"\"\r\n Compares two rows from an array, regardless of whether it is an\r\n array of numbers or of python objects (which requires the cmp function).\r\n @@@PURPOSE? 2007-11-26\r\n \r\n Usage: arowcompare(row1,row2)\r\n Returns: an array of equal length containing 1s where the two rows had\r\n identical elements and 0 otherwise\r\n \"\"\"\r\n return \r\n if row1.dtype.char=='O' or row2.dtype=='O':\r\n cmpvect = N.logical_not(abs(N.array(list(map(cmp,row1,row2))))) # cmp fcn gives -1,0,1\r\n else:\r\n cmpvect = N.equal(row1,row2)\r\n return cmpvect\r\n \r\n \r\n def arowsame(row1, row2):\r\n \"\"\"\r\n Compares two rows from an array, regardless of whether it is an\r\n array of numbers or of python objects (which requires the cmp function).\r\n \r\n Usage: arowsame(row1,row2)\r\n Returns: 1 if the two rows are identical, 0 otherwise.\r\n \"\"\"\r\n cmpval = N.alltrue(arowcompare(row1,row2))\r\n return cmpval\r\n \r\n \r\n def asortrows(a,axis=0):\r\n \"\"\"\r\n Sorts an array \"by rows\". This differs from the Numeric.sort() function,\r\n which sorts elements WITHIN the given axis. Instead, this function keeps\r\n the elements along the given axis intact, but shifts them 'up or down'\r\n relative to one another.\r\n \r\n Usage: asortrows(a,axis=0)\r\n Returns: sorted version of a\r\n \"\"\"\r\n return N.sort(a,axis=axis,kind='mergesort')\r\n \r\n \r\n def aunique(inarray):\r\n \"\"\"\r\n Returns unique items in the FIRST dimension of the passed array. Only\r\n works on arrays NOT including string items.\r\n \r\n Usage: aunique (inarray)\r\n \"\"\"\r\n uniques = N.array([inarray[0]])\r\n if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY\r\n for item in inarray[1:]:\r\n if N.add.reduce(N.equal(uniques,item).ravel()) == 0:\r\n try:\r\n uniques = N.concatenate([uniques,N.array[N.newaxis,:]])\r\n except TypeError:\r\n uniques = N.concatenate([uniques,N.array([item])])\r\n else: # IT MUST BE A 2+D ARRAY\r\n if inarray.dtype.char != 'O': # not an Object array\r\n for item in inarray[1:]:\r\n if not N.sum(N.alltrue(N.equal(uniques,item),1)):\r\n try:\r\n uniques = N.concatenate( [uniques,item[N.newaxis,:]] )\r\n except TypeError: # the item to add isn't a list\r\n uniques = N.concatenate([uniques,N.array([item])])\r\n else:\r\n pass # this item is already in the uniques array\r\n else: # must be an Object array, alltrue/equal functions don't work\r\n for item in inarray[1:]:\r\n newflag = 1\r\n for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=>\r\n test = N.sum(abs(N.array(list(map(cmp,item,unq)))))\r\n if test == 0: # if item identical to any 1 row in uniques\r\n newflag = 0 # then not a novel item to add\r\n break\r\n if newflag == 1:\r\n try:\r\n uniques = N.concatenate( [uniques,item[N.newaxis,:]] )\r\n except TypeError: # the item to add isn't a list\r\n uniques = N.concatenate([uniques,N.array([item])])\r\n return uniques\r\n \r\n \r\n def aduplicates(inarray):\r\n \"\"\"\r\n Returns duplicate items in the FIRST dimension of the passed array. Only\r\n works on arrays NOT including string items.\r\n \r\n Usage: aunique (inarray)\r\n \"\"\"\r\n inarray = N.array(inarray)\r\n if len(inarray.shape) == 1: # IF IT'S A 1D ARRAY\r\n dups = []\r\n inarray = inarray.tolist()\r\n for i in range(len(inarray)):\r\n if inarray[i] in inarray[i+1:]:\r\n dups.append(inarray[i])\r\n dups = aunique(dups)\r\n else: # IT MUST BE A 2+D ARRAY\r\n dups = []\r\n aslist = inarray.tolist()\r\n for i in range(len(aslist)):\r\n if aslist[i] in aslist[i+1:]:\r\n dups.append(aslist[i])\r\n dups = unique(dups)\r\n dups = N.array(dups)\r\n return dups\r\n \r\nexcept ImportError: # IF NUMERIC ISN'T AVAILABLE, SKIP ALL arrayfuncs\r\n pass\r\n" ]
[ [ "numpy.sum", "numpy.take", "numpy.equal", "numpy.reshape", "numpy.where", "numpy.ravel", "numpy.shape", "numpy.sort", "numpy.concatenate", "numpy.array", "numpy.resize" ] ]
nbl97/nni
[ "1530339d3e964a5ea95a0afde1775ec9167cdcc0" ]
[ "nni/retiarii/nn/pytorch/cell.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport copy\nimport warnings\nfrom typing import Callable, Dict, List, Union, Optional, Tuple, Sequence, cast\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal\n\nimport torch\nimport torch.nn as nn\n\nfrom .api import ChosenInputs, LayerChoice, InputChoice\nfrom .nn import ModuleList # pylint: disable=no-name-in-module\nfrom .mutation_utils import generate_new_label\n\n\nclass _ListIdentity(nn.Identity):\n # workaround for torchscript\n def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:\n return x\n\n\nclass _DefaultPostprocessor(nn.Module):\n # this is also a workaround for torchscript\n\n def forward(self, this_cell: torch.Tensor, prev_cell: List[torch.Tensor]) -> torch.Tensor:\n return this_cell\n\n\n_cell_op_factory_type = Callable[[int, int, Optional[int]], nn.Module]\n\n\nclass Cell(nn.Module):\n \"\"\"\n Cell structure that is popularly used in NAS literature.\n\n Find the details in:\n\n * `Neural Architecture Search with Reinforcement Learning <https://arxiv.org/abs/1611.01578>`__.\n * `Learning Transferable Architectures for Scalable Image Recognition <https://arxiv.org/abs/1707.07012>`__.\n * `DARTS: Differentiable Architecture Search <https://arxiv.org/abs/1806.09055>`__\n\n `On Network Design Spaces for Visual Recognition <https://arxiv.org/abs/1905.13214>`__\n is a good summary of how this structure works in practice.\n\n A cell consists of multiple \"nodes\". Each node is a sum of multiple operators. Each operator is chosen from\n ``op_candidates``, and takes one input from previous nodes and predecessors. Predecessor means the input of cell.\n The output of cell is the concatenation of some of the nodes in the cell (by default all the nodes).\n\n Two examples of searched cells are illustrated in the figure below.\n In these two cells, ``op_candidates`` are series of convolutions and pooling operations.\n ``num_nodes_per_node`` is set to 2. ``num_nodes`` is set to 5. ``merge_op`` is ``loose_end``.\n Assuming nodes are enumerated from bottom to top, left to right,\n ``output_node_indices`` for the normal cell is ``[2, 3, 4, 5, 6]``.\n For the reduction cell, it's ``[4, 5, 6]``.\n Please take a look at this\n `review article <https://sh-tsang.medium.com/review-nasnet-neural-architecture-search-network-image-classification-23139ea0425d>`__\n if you are interested in details.\n\n .. image:: ../../../img/nasnet_cell.png\n :width: 900\n :align: center\n\n Here is a glossary table, which could help better understand the terms used above:\n\n .. list-table::\n :widths: 25 75\n :header-rows: 1\n\n * - Name\n - Brief Description\n * - Cell\n - A cell consists of ``num_nodes`` nodes.\n * - Node\n - A node is the **sum** of ``num_ops_per_node`` operators.\n * - Operator\n - Each operator is independently chosen from a list of user-specified candidate operators.\n * - Operator's input\n - Each operator has one input, chosen from previous nodes as well as predecessors.\n * - Predecessors\n - Input of cell. A cell can have multiple predecessors. Predecessors are sent to *preprocessor* for preprocessing.\n * - Cell's output\n - Output of cell. Usually concatenation of some nodes (possibly all nodes) in the cell. Cell's output,\n along with predecessors, are sent to *postprocessor* for postprocessing.\n * - Preprocessor\n - Extra preprocessing to predecessors. Usually used in shape alignment (e.g., predecessors have different shapes).\n By default, do nothing.\n * - Postprocessor\n - Extra postprocessing for cell's output. Usually used to chain cells with multiple Predecessors\n (e.g., the next cell wants to have the outputs of both this cell and previous cell as its input).\n By default, directly use this cell's output.\n\n .. tip::\n\n It's highly recommended to make the candidate operators have an output of the same shape as input.\n This is because, there can be dynamic connections within cell. If there's shape change within operations,\n the input shape of the subsequent operation becomes unknown.\n In addition, the final concatenation could have shape mismatch issues.\n\n Parameters\n ----------\n op_candidates : list of module or function, or dict\n A list of modules to choose from, or a function that accepts current index and optionally its input index, and returns a module.\n For example, (2, 3, 0) means the 3rd op in the 2nd node, accepts the 0th node as input.\n The index are enumerated for all nodes including predecessors from 0.\n When first created, the input index is ``None``, meaning unknown.\n Note that in graph execution engine, support of function in ``op_candidates`` is limited.\n num_nodes : int\n Number of nodes in the cell.\n num_ops_per_node: int\n Number of operators in each node. The output of each node is the sum of all operators in the node. Default: 1.\n num_predecessors : int\n Number of inputs of the cell. The input to forward should be a list of tensors. Default: 1.\n merge_op : \"all\", or \"loose_end\"\n If \"all\", all the nodes (except predecessors) will be concatenated as the cell's output, in which case, ``output_node_indices``\n will be ``list(range(num_predecessors, num_predecessors + num_nodes))``.\n If \"loose_end\", only the nodes that have never been used as other nodes' inputs will be concatenated to the output.\n Predecessors are not considered when calculating unused nodes.\n Details can be found in `NDS paper <https://arxiv.org/abs/1905.13214>`__. Default: all.\n preprocessor : callable\n Override this if some extra transformation on cell's input is intended.\n It should be a callable (``nn.Module`` is also acceptable) that takes a list of tensors which are predecessors,\n and outputs a list of tensors, with the same length as input.\n By default, it does nothing to the input.\n postprocessor : callable\n Override this if customization on the output of the cell is intended.\n It should be a callable that takes the output of this cell, and a list which are predecessors.\n Its return type should be either one tensor, or a tuple of tensors.\n The return value of postprocessor is the return value of the cell's forward.\n By default, it returns only the output of the current cell.\n concat_dim : int\n The result will be a concatenation of several nodes on this dim. Default: 1.\n label : str\n Identifier of the cell. Cell sharing the same label will semantically share the same choice.\n\n Examples\n --------\n Choose between conv2d and maxpool2d.\n The cell have 4 nodes, 1 op per node, and 2 predecessors.\n\n >>> cell = nn.Cell([nn.Conv2d(32, 32, 3, padding=1), nn.MaxPool2d(3, padding=1)], 4, 1, 2)\n\n In forward:\n\n >>> cell([input1, input2])\n\n The \"list bracket\" can be omitted:\n\n >>> cell(only_input) # only one input\n >>> cell(tensor1, tensor2, tensor3) # multiple inputs\n\n Use ``merge_op`` to specify how to construct the output.\n The output will then have dynamic shape, depending on which input has been used in the cell.\n\n >>> cell = nn.Cell([nn.Conv2d(32, 32, 3), nn.MaxPool2d(3)], 4, 1, 2, merge_op='loose_end')\n >>> cell_out_channels = len(cell.output_node_indices) * 32\n\n The op candidates can be callable that accepts node index in cell, op index in node, and input index.\n\n >>> cell = nn.Cell([\n ... lambda node_index, op_index, input_index: nn.Conv2d(32, 32, 3, stride=2 if input_index < 1 else 1),\n ... ], 4, 1, 2)\n\n Predecessor example: ::\n\n class Preprocessor:\n def __init__(self):\n self.conv1 = nn.Conv2d(16, 32, 1)\n self.conv2 = nn.Conv2d(64, 32, 1)\n\n def forward(self, x):\n return [self.conv1(x[0]), self.conv2(x[1])]\n\n cell = nn.Cell([nn.Conv2d(32, 32, 3), nn.MaxPool2d(3)], 4, 1, 2, preprocessor=Preprocessor())\n cell([torch.randn(1, 16, 48, 48), torch.randn(1, 64, 48, 48)]) # the two inputs will be sent to conv1 and conv2 respectively\n\n Warnings\n --------\n :class:`Cell` is not supported in :ref:`graph-based execution engine <graph-based-execution-engine>`.\n\n Attributes\n ----------\n output_node_indices : list of int\n An attribute that contains indices of the nodes concatenated to the output (a list of integers).\n\n When the cell is first instantiated in the base model, or when ``merge_op`` is ``all``,\n ``output_node_indices`` must be ``range(num_predecessors, num_predecessors + num_nodes)``.\n\n When ``merge_op`` is ``loose_end``, ``output_node_indices`` is useful to compute the shape of this cell's output,\n because the output shape depends on the connection in the cell, and which nodes are \"loose ends\" depends on mutation.\n \"\"\"\n\n def __init__(self,\n op_candidates: Union[\n Callable[[], List[nn.Module]],\n List[nn.Module],\n List[_cell_op_factory_type],\n Dict[str, nn.Module],\n Dict[str, _cell_op_factory_type]\n ],\n num_nodes: int,\n num_ops_per_node: int = 1,\n num_predecessors: int = 1,\n merge_op: Literal['all', 'loose_end'] = 'all',\n preprocessor: Optional[Callable[[List[torch.Tensor]], List[torch.Tensor]]] = None,\n postprocessor: Optional[Callable[[torch.Tensor, List[torch.Tensor]],\n Union[Tuple[torch.Tensor, ...], torch.Tensor]]] = None,\n concat_dim: int = 1,\n *,\n label: Optional[str] = None):\n super().__init__()\n self._label = generate_new_label(label)\n\n # modules are created in \"natural\" order\n # first create preprocessor\n self.preprocessor = preprocessor or _ListIdentity()\n # then create intermediate ops\n self.ops = ModuleList()\n self.inputs = ModuleList()\n # finally postprocessor\n self.postprocessor = postprocessor or _DefaultPostprocessor()\n\n self.num_nodes = num_nodes\n self.num_ops_per_node = num_ops_per_node\n self.num_predecessors = num_predecessors\n assert merge_op in ['all', 'loose_end']\n self.merge_op = merge_op\n self.output_node_indices = list(range(num_predecessors, num_predecessors + num_nodes))\n\n self.concat_dim = concat_dim\n\n # fill-in the missing modules\n self._create_modules(op_candidates)\n\n def _create_modules(self, op_candidates):\n for i in range(self.num_predecessors, self.num_nodes + self.num_predecessors):\n self.ops.append(ModuleList())\n self.inputs.append(ModuleList())\n for k in range(self.num_ops_per_node):\n inp = InputChoice(i, 1, label=f'{self.label}/input_{i}_{k}')\n chosen = None\n\n if isinstance(inp, ChosenInputs):\n # now we are in the fixed mode\n # the length of chosen should be 1\n chosen = inp.chosen[0]\n if self.merge_op == 'loose_end' and chosen in self.output_node_indices:\n # remove it from concat indices\n self.output_node_indices.remove(chosen)\n\n # this is needed because op_candidates can be very complex\n # the type annoation and docs for details\n ops = self._convert_op_candidates(op_candidates, i, k, chosen)\n\n # though it's layer choice and input choice here, in fixed mode, the chosen module will be created.\n cast(ModuleList, self.ops[-1]).append(LayerChoice(ops, label=f'{self.label}/op_{i}_{k}'))\n cast(ModuleList, self.inputs[-1]).append(inp)\n\n @property\n def label(self):\n return self._label\n\n def forward(self, *inputs: Union[List[torch.Tensor], torch.Tensor]) -> Union[Tuple[torch.Tensor, ...], torch.Tensor]:\n \"\"\"Forward propagation of cell.\n\n Parameters\n ----------\n inputs\n Can be a list of tensors, or several tensors.\n The length should be equal to ``num_predecessors``.\n\n Returns\n -------\n Tuple[torch.Tensor] | torch.Tensor\n The return type depends on the output of ``postprocessor``.\n By default, it's the output of ``merge_op``, which is a contenation (on ``concat_dim``)\n of some of (possibly all) the nodes' outputs in the cell.\n \"\"\"\n processed_inputs: List[torch.Tensor]\n if len(inputs) == 1 and isinstance(inputs[0], list):\n processed_inputs = list(inputs[0]) # shallow copy\n else:\n processed_inputs = cast(List[torch.Tensor], list(inputs))\n assert len(processed_inputs) == self.num_predecessors, 'The number of inputs must be equal to `num_predecessors`.'\n states: List[torch.Tensor] = self.preprocessor(processed_inputs)\n for ops, inps in zip(\n cast(Sequence[Sequence[LayerChoice]], self.ops),\n cast(Sequence[Sequence[InputChoice]], self.inputs)\n ):\n current_state = []\n for op, inp in zip(ops, inps):\n current_state.append(op(inp(states)))\n current_state = torch.sum(torch.stack(current_state), 0)\n states.append(current_state)\n if self.merge_op == 'all':\n # a special case for graph engine\n this_cell = torch.cat(states[self.num_predecessors:], self.concat_dim)\n else:\n this_cell = torch.cat([states[k] for k in self.output_node_indices], self.concat_dim)\n return self.postprocessor(this_cell, processed_inputs)\n\n @staticmethod\n def _convert_op_candidates(op_candidates, node_index, op_index, chosen) -> Union[Dict[str, nn.Module], List[nn.Module]]:\n # convert the complex type into the type that is acceptable to LayerChoice\n def convert_single_op(op):\n if isinstance(op, nn.Module):\n return copy.deepcopy(op)\n elif callable(op):\n # FIXME: I don't know how to check whether we are in graph engine.\n return op(node_index, op_index, chosen)\n else:\n raise TypeError(f'Unrecognized type {type(op)} for op {op}')\n\n if isinstance(op_candidates, list):\n return [convert_single_op(op) for op in op_candidates]\n elif isinstance(op_candidates, dict):\n return {key: convert_single_op(op) for key, op in op_candidates.items()}\n elif callable(op_candidates):\n warnings.warn(f'Directly passing a callable into Cell is deprecated. Please consider migrating to list or dict.',\n DeprecationWarning)\n return op_candidates()\n else:\n raise TypeError(f'Unrecognized type {type(op_candidates)} for {op_candidates}')\n" ]
[ [ "torch.stack", "torch.cat" ] ]
LBJ-Wade/MultiLens
[ "0d88d734b07c178725f926b0a055c6084b91f6d7" ]
[ "MultiLens/MultiLens.py" ]
[ "#! /usr/bin/env python\n\n# Copyright (C) 2016 ETH Zurich, Institute for Astronomy\n\n# System imports\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\n\n# External modules\nimport numpy as np\nimport copy\n\n# MultiLens imports\nfrom MultiLens.analytic_lens import AnalyticLens\nfrom MultiLens.Cosmo.cosmo import CosmoProp\n\nclass MultiLens(object):\n \"\"\"\n this class aims to compute the lensing quantities of multi-plane lenses with full ray-tracing and approximation methods\n \"\"\"\n\n def __init__(self):\n self.analyticLens = AnalyticLens()\n self.cosmo = CosmoProp()\n\n def full_ray_tracing(self, lensAssembly, z_source, x_array, y_array, observer_frame=True):\n \"\"\"\n full ray-tracing routine (eqn 10,11 in Birrer in prep), implemented with equation 12 in a recursive way\n (!assuming flat cosmology!)\n :param object_list: list of sources with specified physical deflection angles (sorted by redshift)\n :param z_source: redshift of the source\n :param x_array: x-coords of the rays\n :param y_array: y-coords of the rays\n :return: deflections delta x_coords, delta y_coords such that x_source = x - delta x_source\n \"\"\"\n if observer_frame:\n self._full_ray_tracing_observer(lensAssembly)\n object_list = lensAssembly.object_array\n alpha_x_tot = copy.deepcopy(x_array)\n alpha_y_tot = copy.deepcopy(y_array)\n x_k = np.zeros_like(alpha_x_tot)\n y_k = np.zeros_like(alpha_x_tot)\n z_last = 0\n for lensObject in object_list:\n z = lensObject.redshift\n if z < z_source:\n T_k_last = self.cosmo.T_xy(z_last, z)\n x_k += alpha_x_tot*T_k_last\n y_k += alpha_y_tot*T_k_last\n x_k_phys, y_k_phys = x_k/(1+z), y_k/(1+z)\n alpha_x, alpha_y = lensObject.deflection(x_k_phys, y_k_phys)\n alpha_x_tot -= alpha_x\n alpha_y_tot -= alpha_y\n z_last = z\n else:\n pass\n T_k_last = self.cosmo.T_xy(z_last, z_source)\n D_s = self.cosmo.D_xy(0, z_source)\n x_k += alpha_x_tot*T_k_last\n y_k += alpha_y_tot*T_k_last\n x_s_phys, y_s_phys = x_k/(1+z_source), y_k/(1+z_source)\n beta_sx = x_s_phys / D_s\n beta_sy = y_s_phys / D_s\n return beta_sx, beta_sy\n\n def _full_ray_tracing_observer(self, lensAssembly):\n \"\"\"\n computes the real positions of the lens objects given the position in the observer frame\n :param lensAssembly:\n :return:\n \"\"\"\n object_list = lensAssembly.object_array\n alpha_x_tot, alpha_y_tot = lensAssembly.get_visible_positions()\n x_k = np.zeros_like(alpha_x_tot)\n y_k = np.zeros_like(alpha_y_tot)\n z_last = 0\n i = 0\n for lensObject in object_list:\n z = lensObject.redshift\n T_k_last = self.cosmo.T_xy(z_last, z)\n x_k += alpha_x_tot*T_k_last\n y_k += alpha_y_tot*T_k_last\n x_k_phys, y_k_phys = x_k/(1+z), y_k/(1+z)\n lensObject.update_position(x_k_phys[i], y_k_phys[i]) # update position of the i'th lens according to the deflection\n alpha_x, alpha_y = lensObject.deflection(x_k_phys, y_k_phys)\n alpha_x_tot -= alpha_x\n alpha_y_tot -= alpha_y\n z_last = z\n i += 1\n return 0\n\n def combined_ray_tracing(self, lensAssembly, z_source, x_array, y_array, observer_frame=True):\n \"\"\"\n ray-tracing routine with Born approximation for the objects specified (eqn 17 in Birrer in prep)\n :param object_list: list of sources with specified physical deflection angles (sorted by redshift)\n :param z_source: redshift of the source\n :param x_array: x-coords of the rays\n :param y_array: y-coords of the rays\n :return: deflections delta x_coords, delta y_coords such that x_source = x - delta x_source\n \"\"\"\n if observer_frame:\n self._combined_ray_tracing_observer(lensAssembly, z_source)\n else:\n lensAssembly.reset_observer_frame()\n object_list = lensAssembly.object_array\n mainLens = lensAssembly.main_deflector()\n z_d = mainLens.redshift\n beta_dx = x_array.copy()\n beta_dy = y_array.copy()\n beta_sx = x_array.copy()\n beta_sy = y_array.copy()\n alpha_x_foreground = np.zeros_like(x_array)\n alpha_y_foreground = np.zeros_like(y_array)\n alpha_dx, alpha_dy = 0, 0\n Ds = self.cosmo.D_xy(0, z_source)\n Dd = self.cosmo.D_xy(0, z_d)\n i = 0\n for lensObject in object_list:\n z = lensObject.redshift\n if z < z_d:\n D_k = self.cosmo.D_xy(0, z)\n D_ks = self.cosmo.D_xy(z, z_source)\n D_kd = self.cosmo.D_xy(z, z_d)\n alpha_x, alpha_y = lensObject.deflection(D_k*x_array, D_k*y_array)\n alpha_x_foreground += alpha_x\n alpha_y_foreground += alpha_y\n beta_sx -= D_ks/Ds*alpha_x\n beta_sy -= D_ks/Ds*alpha_y\n beta_dx -= D_kd/Dd*alpha_x\n beta_dy -= D_kd/Dd*alpha_y\n elif lensObject.main is True:\n D_ds = self.cosmo.D_xy(z_d, z_source)\n alpha_dx, alpha_dy = lensObject.deflection(Dd*beta_dx, Dd*beta_dy)\n beta_sx -= D_ds/Ds*alpha_dx\n beta_sy -= D_ds/Ds*alpha_dy\n elif z >= z_d:\n D_k = self.cosmo.D_xy(0, z)\n D_ks = self.cosmo.D_xy(z, z_source)\n D_kd = self.cosmo.D_xy(z_d, z)\n beta_x = beta_dx - D_kd/D_k*(alpha_dx + alpha_x_foreground) # equation 16 in Birrer in prep\n beta_y = beta_dy - D_kd/D_k*(alpha_dy + alpha_y_foreground) # equation 16 in Birrer in prep\n alpha_x, alpha_y = lensObject.deflection(D_k*beta_x, D_k*beta_y)\n beta_sx -= D_ks/Ds*alpha_x\n beta_sy -= D_ks/Ds*alpha_y\n i += 1\n return beta_sx, beta_sy\n\n def _combined_ray_tracing_observer(self, lensAssembly, z_source):\n \"\"\"\n computes the real position of the lensing objects given observer frame coordinates\n :param lensAssembly:\n :return:\n \"\"\"\n object_list = lensAssembly.object_array\n mainLens = lensAssembly.main_deflector()\n z_d = mainLens.redshift\n x_array, y_array = lensAssembly.get_visible_positions()\n beta_dx = x_array.copy()\n beta_dy = y_array.copy()\n beta_sx = x_array.copy()\n beta_sy = y_array.copy()\n alpha_x_foreground = 0\n alpha_y_foreground = 0\n alpha_dx, alpha_dy = 0, 0\n Ds = self.cosmo.D_xy(0, z_source)\n Dd = self.cosmo.D_xy(0, z_d)\n i = 0\n for lensObject in object_list:\n z = lensObject.redshift\n if z < z_d:\n D_k = self.cosmo.D_xy(0, z)\n D_ks = self.cosmo.D_xy(z, z_source)\n D_kd = self.cosmo.D_xy(z, z_d)\n lensObject.update_position(D_k*x_array[i], D_k*y_array[i])\n alpha_x, alpha_y = lensObject.deflection(D_k*x_array, D_k*y_array)\n alpha_x_foreground += alpha_x\n alpha_y_foreground += alpha_y\n beta_sx -= D_ks/Ds*alpha_x\n beta_sy -= D_ks/Ds*alpha_y\n beta_dx -= D_kd/Dd*alpha_x\n beta_dy -= D_kd/Dd*alpha_y\n elif lensObject.main is True:\n D_ds = self.cosmo.D_xy(z_d, z_source)\n lensObject.update_position(Dd*x_array[i], Dd*y_array[i])\n alpha_dx, alpha_dy = lensObject.deflection(Dd*beta_dx, Dd*beta_dy)\n alpha_dx *= D_ds/Ds\n alpha_dy *= D_ds/Ds\n beta_sx -= alpha_dx\n beta_sy -= alpha_dy\n elif z >= z_d:\n D_k = self.cosmo.D_xy(0, z)\n D_kd = self.cosmo.D_xy(z_d, z)\n beta_x = beta_dx - D_kd/D_k*(alpha_dx + alpha_x_foreground) # equation 16 in Birrer in prep\n beta_y = beta_dy - D_kd/D_k*(alpha_dy + alpha_y_foreground) # equation 16 in Birrer in prep\n lensObject.update_position(D_k*beta_x[i], D_k*beta_y[i])\n i += 1\n return 0\n\n def born_ray_tracing(self, lensAssembly, z_source, x_array, y_array):\n \"\"\"\n routine with Born approximation for all objects (eqn 14 in Birrer in prep)\n :param object_list: list of sources with specified physical deflection angles (sorted by redshift)\n :param z_source: redshift of the source\n :param x_array: x-coords of the rays\n :param y_array: y-coords of the rays\n :return: deflections delta x_coords, delta y_coords such that x_source = x - delta x_source\n \"\"\"\n lensAssembly.reset_observer_frame()\n object_list = lensAssembly.object_array\n beta_sx = copy.deepcopy(y_array)\n beta_sy = copy.deepcopy(x_array)\n Ds = self.cosmo.D_xy(0, z_source)\n for lensObject in object_list:\n z = lensObject.redshift\n if z < z_source:\n D_k = self.cosmo.D_xy(0, z)\n D_ks = self.cosmo.D_xy(z, z_source)\n delta_x, delta_y = lensObject.deflection(D_k*x_array, D_k*y_array)\n beta_sx -= delta_x*D_ks/Ds\n beta_sy -= delta_y*D_ks/Ds\n return beta_sx, beta_sy\n\n def analytic_mapping(self, lensAssembly, z_source, x_array, y_array, LOS_corrected=True, observer_frame=True):\n \"\"\"\n computes equation 29 in Birrer in prep with analytic terms for the LOS structure\n :param object_list:\n :param z_source:\n :param x_array:\n :param y_array:\n :return:\n \"\"\"\n if observer_frame:\n self._full_ray_tracing_observer(lensAssembly)\n else:\n lensAssembly.reset_observer_frame()\n object_list = lensAssembly.object_array\n mainLens = lensAssembly.main_deflector()\n z_d = mainLens.redshift\n D_ds = self.cosmo.D_xy(z_d, z_source)\n Ds = self.cosmo.D_xy(0, z_source)\n Dd = self.cosmo.D_xy(0, z_d)\n gamma_A = self.analyticLens.shear_lens(object_list, z_d)\n gamma_B = self.analyticLens.shear_foreground(object_list, z_lens=z_d, z_source=z_source)\n if LOS_corrected is True:\n gamma_C = self.analyticLens.shear_background_first_order(object_list, z_d, z_source)\n else:\n gamma_C = self.analyticLens.shear_background_zero(object_list, z_d, z_source)\n gamma_BC = gamma_B + gamma_C\n x_lens = gamma_A[0][0]*x_array + gamma_A[0][1]*y_array + x_array\n y_lens = gamma_A[1][0]*x_array + gamma_A[1][1]*y_array + y_array\n shear_x = gamma_BC[0][0]*x_array + gamma_BC[0][1]*y_array\n shear_y = gamma_BC[1][0]*x_array + gamma_BC[1][1]*y_array\n\n alpha_x, alpha_y = mainLens.deflection(Dd*x_lens, Dd*y_lens)\n beta_sx = x_array - D_ds/Ds * alpha_x + shear_x\n beta_sy = y_array - D_ds/Ds * alpha_y + shear_y\n return beta_sx, beta_sy\n\n def analytic_matrices(self, lensAssembly, z_source, LOS_corrected=True, observer_frame=True):\n \"\"\"\n computes equation 29 in Birrer in prep with analytic terms for the LOS structure\n :param object_list:\n :param z_source:\n :param x_array:\n :param y_array:\n :return:\n \"\"\"\n if observer_frame:\n self._full_ray_tracing_observer(lensAssembly)\n else:\n lensAssembly.reset_observer_frame()\n object_list = lensAssembly.object_array\n mainLens = lensAssembly.main_deflector()\n z_d = mainLens.redshift\n gamma_A = self.analyticLens.shear_lens(object_list, z_d)\n gamma_B = self.analyticLens.shear_foreground(object_list, z_lens=z_d, z_source=z_source)\n if LOS_corrected is True:\n gamma_C = self.analyticLens.shear_background_first_order(object_list, z_d, z_source)\n else:\n gamma_C = self.analyticLens.shear_background_zero(object_list, z_d, z_source)\n gamma_BC = gamma_B + gamma_C\n\n return gamma_A, gamma_BC" ]
[ [ "numpy.zeros_like" ] ]
kizzhang/langevinSimulation
[ "98e0c3f33e4c9d6ecb11972e5fbe9388626274f8" ]
[ "abp_modified.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 19 12:08:47 2021\r\n\r\n@author: Kaneki\r\n\"\"\"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 18 17:28:36 2021\r\n\r\n@author: Kaneki\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\n\r\ndef Periodicity(pos,l):\r\n if pos >= -l and pos <= l :\r\n return pos\r\n elif pos < -l:\r\n return pos + 2*l\r\n elif pos > l:\r\n return pos - 2*l\r\n\r\ndef ABP_move(t, dt, N, crit_coll_num, l):\r\n coll_num = np.zeros((N, int(t/dt)))\r\n for i in range(0, int(t/dt) - 1): # time evolution\r\n # Collision\r\n for p1 in range(0,N):\r\n for p2 in range(p1,N):\r\n if p1 == p2:\r\n continue\r\n \r\n # Collision criteria\r\n r = np.sqrt((x[p1,i] - x[p2,i]) ** 2 + (y[p1,i] - y[p2,i]) ** 2)\r\n if r > 2.1 * a:\r\n continue\r\n else:\r\n coll_num[p1,i] += 1\r\n coll_num[p2,i] += 1\r\n \r\n \r\n for dum in range(len(coll_num)):\r\n if coll_num[dum, i] >= crit_coll_num:\r\n theta[dum,i] = theta[dum,i] + np.random.uniform(0,2*np.pi) # a random angle to avoid coll \r\n dx = v * np.cos(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * np.random.randn()\r\n dy = v * np.sin(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * np.random.randn()\r\n \r\n \r\n x_new = x[dum,i] + dx\r\n y_new = y[dum,i] + dy\r\n \r\n theta[dum,i+1] = theta[dum,i] + np.sqrt(2*Dr*dt) * np.random.randn()\r\n\r\n # Periodic boundary condition on x \r\n x[dum,i+1] = Periodicity(x_new, l)\r\n \r\n # Periodic boundary condition on y \r\n y[dum,i+1] = Periodicity(y_new, l)\r\n \r\n # x position if there is no jump\r\n x_nojump[dum,i+1] = x_nojump[dum,i] + dx \r\n \r\n # y position if there is no jump\r\n y_nojump[dum,i+1] = y_nojump[dum,i] + dy\r\n \r\n else:\r\n dx = v * np.cos(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * np.random.randn()\r\n dy = v * np.sin(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * np.random.randn()\r\n \r\n x_new = x[dum,i] + dx\r\n y_new = y[dum,i] + dy\r\n \r\n theta[dum,i+1] = theta[dum,i] + np.sqrt(2*Dr*dt) * np.random.randn() \r\n \r\n # Periodic boundary condition on x \r\n x[dum,i+1] = Periodicity(x_new, l)\r\n \r\n # Periodic boundary condition on x \r\n y[dum,i+1] = Periodicity(y_new,l)\r\n \r\n # x position if there is no jump\r\n x_nojump[dum,i+1] = x_nojump[dum,i] + dx \r\n \r\n # y position if there is no jump\r\n y_nojump[dum,i+1] = y_nojump[dum,i] + dy\r\n \r\n print(\"Time Step: \", i)\r\n return x, y, theta, coll_num\r\n\r\n# CONSTANTS\r\n\r\nv = 3.12e-5 # swimming speed of B. Subtilis [m/s]\r\nk = 1.38e-23 # Boltzmann constant [m^2kg/s^2K]\r\nT = 293 # Room temperature [K]\r\neta = 1e-3 # viscosity of water [Pa s]\r\na = 2e-6 # spherical cell radius [m]\r\nDr = k*T/(8*np.pi*eta*a**3) # rotational diffusion coefficient of B. Subtilis\r\nDt = k*T/(6*np.pi*eta*a) # translation diffusion coefficient of B. Subtilis\r\n\r\n# ADJUSTABLE PARAMETERS\r\n\r\nt = 10 # time over which motion is observed [s]\r\ndt = 0.01 # time step between recorded positions\r\nN = 900 # number of cells \r\ncrit_coll_num = 1 # number of collisions a bacetrium will walk away\r\nl = 0.5 * 1e-4 # box width\r\npsi = N * np.pi * a**2 / (2*l)**2 # packing fraction\r\n\r\n\r\n# INITIAL CONDITIONS\r\n\r\ntheta = np.zeros((N,int(t/dt))) # initial swimming orientation [radians]\r\nx = np.zeros((N,int(t/dt))) # initial x position [m]\r\ny = np.zeros((N,int(t/dt))) # initial y position [m]\r\nx_nojump = np.zeros((N,int(t/dt))) # x position without jump\r\ny_nojump = np.zeros((N,int(t/dt))) # y position without jump\r\n\r\n# Initializing x y theta\r\nfor n in range(N): \r\n # x positions\r\n x[n,0] = np.random.uniform(-l,l)\r\n x_nojump[n,0] = x[n,0]\r\n \r\n # y positions\r\n y[n,0] = np.random.uniform(-l,l)\r\n y_nojump[n,0] = y[n,0]\r\n \r\n theta[n,0] = np.random.uniform(-2*np.pi, 2*np.pi)\r\n \r\n \r\nx,y,_,col_num = ABP_move(t,dt,N,crit_coll_num,l)\r\nprint(\"Packing Fraction = \", psi)\r\n\r\n'''\r\nimport pandas as pd\r\ndf_x = pd.DataFrame(x)\r\ndf_y = pd.DataFrame(y)\r\ndf_x_non_p = pd.DataFrame(x_nojump)\r\ndf_y_non_p = pd.DataFrame(y_nojump)\r\n\r\n\r\ndf_x.to_csv('x_p.dat')\r\ndf_y.to_csv('y_p.dat')\r\ndf_x_non_p.to_csv('x_nonp.dat')\r\ndf_y_non_p.to_csv('y_nonp.dat')\r\n'''\r\n\r\n# MAIN SCRIPT\r\n\r\nfig = plt.figure(dpi = 141)\r\nax = plt.axes(xlim=(-1*l, 1*l), ylim=(-1*l, 1*l))\r\nax.set_aspect(1)\r\nfig.canvas.draw()\r\n\r\ns = (ax.get_window_extent().width * 72./fig.dpi * a / l)**2\r\n\r\nscat = ax.scatter([], [], s)\r\nscat1 = ax.scatter([], [], s) \r\n\r\ndef animation(frame):\r\n data = np.hstack((x[:,frame, np.newaxis], y[:,frame, np.newaxis]))\r\n scat.set_offsets(data)\r\n return scat,\r\n \r\ndef animation_non_Periodic(frame):\r\n data1 = np.hstack((x_nojump[:,frame, np.newaxis], y_nojump[:,frame, np.newaxis]))\r\n scat1.set_offsets(data1)\r\n return scat1,\r\n\r\ndef animation_with_trajectory(frame):\r\n ax.cla()\r\n for i in range(N):\r\n ax.plot(x[i,:frame], y[i,:frame], linestyle = '-', color = 'blue')\r\n ax.plot(x[i,frame], y[i,frame], 'ro')\r\n \r\n ax.set_xlim(-l,l)\r\n ax.set_ylim(-l,l)\r\n\r\nani = FuncAnimation(fig, animation, frames=range(int(t/dt)),\\\r\n interval = 10, repeat=False)\r\n\r\nani.save(\"movie2.mp4\", fps = 40)\r\n" ]
[ [ "numpy.random.uniform", "matplotlib.pyplot.figure", "numpy.random.randn", "numpy.cos", "matplotlib.pyplot.axes", "numpy.hstack", "numpy.sqrt", "numpy.sin" ] ]
cathyzhyi/mlir-npcomp
[ "79a3f639bfb449ba1409ff6dff161badf5a11b44" ]
[ "frontends/pytorch/test/ivalue_import/list.py" ]
[ "# -*- Python -*-\n# This file is licensed under a pytorch-style license\n# See frontends/pytorch/LICENSE for license information.\n\nimport typing\n\nimport torch\nimport torch_mlir\n\n# RUN: %PYTHON %s | npcomp-opt | FileCheck %s\n\nmb = torch_mlir.ModuleBuilder()\n\nclass TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.l = [1, 2]\n# CHECK: torch.class_type @[[CLASSTYPE:.*]] {\n# TODO: Don't lose element type.\n# CHECK: torch.attr \"l\" : !basicpy.ListType\n# CHECK: }\n# CHECK: %[[N1:.*]] = basicpy.numeric_constant 1 : i64\n# CHECK: %[[N2:.*]] = basicpy.numeric_constant 2 : i64\n# CHECK: %[[LIST:.*]] = basicpy.build_list %[[N1]], %[[N2]] : (i64, i64) -> !basicpy.ListType\n# CHECK: torch.nn_module {\n# CHECK: torch.slot \"l\", %[[LIST]] : !basicpy.ListType\n# CHECK: } : !torch.nn.Module<\"[[CLASSTYPE]]\">\n\n\ntest_module = TestModule()\nrecursivescriptmodule = torch.jit.script(test_module)\n# TODO: Automatically handle unpacking Python class RecursiveScriptModule into the underlying ScriptModule.\nmb.import_module(recursivescriptmodule._c)\nmb.module.operation.print()\n" ]
[ [ "torch.jit.script" ] ]
LeoLugoF/TDSpectrum
[ "bec2a86294ab563db889e8a4b1f9c9d7c4599871" ]
[ "TDSpectrum.py" ]
[ "import matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport platform\r\nfrom math import exp\r\n\r\n#########################################################################\r\n# TDSpectrum #\r\n# Made by: Leonardo Israel Lugo Fuentes (LeoLugoF) # \r\n# Date: 30/May/2019 #\r\n# Requirements: Mathplotlib, numpy. #\r\n# #\r\n# Description: #\r\n# This program reads gaussian output files with .log or .out termation. #\r\n# It reads the oscilator forces and wave lenght for each excitation, #\r\n# and shows a UV-Vis spectrum, but the data can also be saved. #\r\n# Data is stored with name of the old file + \".txt\". #\r\n# How the calculations are done can be consulted at the gaussian page: #\r\n# https://gaussian.com/uvvisplot/ #\r\n# Note: Y values are in absorbance e(L mol-1 cm-1) #\r\n# #\r\n# Command line:[python3] [*.py] [*.log/*.out] [Sigma] [MinWave] ... #\r\n# [MaxWave] [NONE/-s] #\r\n# Arguments: #\r\n# [Sigma] = Value of sigma; Gaussian recommends 0.4 (eV). #\r\n# [MinWave] = Minimum Wavelenght (nm) #\r\n# [MaxWave] = Maximum Wavelenght (nm) #\r\n# [-s] = Saves only the data; doesn't show the graph #\r\n# [NONE] = Shows the graph only without saving data #\r\n# #\r\n# Examples: #\r\n# Example 1: python TDSpectrum.py YourFile.log 0.4 300 500 #\r\n# Example 2: python TDSpectrum.py YourFile.out 0.2 200 600 -s #\r\n# #\r\n# The first example will show only the UV-Vis plot. #\r\n# The second example will save only the data without showing the plot. #\r\n#########################################################################\r\n\r\nOSplit = \"\\\\\"\r\nif platform.system() == \"Linux\":\r\n OSplit = \"/\"\r\n\r\nclass Global:\r\n \"\"\"Global variables; Stores information.\"\"\"\r\n WaveLenghts = np.array([])\r\n Forces = np.array([])\r\n XValues = np.array([])\r\n YValues = np.array([])\r\n ShowPlot = True\r\n\r\ndef ReadFile(FilePath):\r\n \"\"\"Reads the output file and stores the information.\"\"\"\r\n fstream = open(FilePath,\"r\")\r\n lines = fstream.readlines()\r\n fstream.close()\r\n for line in lines:\r\n if \"Excited State\" in line and \"<S**2>=\" in line:\r\n i = 0\r\n for subsentence in line.split(\" \"):\r\n if(len(subsentence) > 1):\r\n if i == 6:\r\n # This element always corresponds to the Wavelenght (nm)\r\n Global.WaveLenghts = np.append(Global.WaveLenghts,float(subsentence))\r\n i += 1\r\n elif i == 8:\r\n # This element always corresponds to the oscilator force (F)\r\n Global.Forces = np.append(Global.Forces,float(subsentence.split(\"=\")[1]))\r\n break\r\n else:\r\n i += 1 \r\n return\r\n\r\ndef DoCalcs(Sigma,MinWave,MaxWave):\r\n \"\"\"Calculates the Y values from the MinWave and MaxWave giving with the sigma value.\"\"\"\r\n CMm1 = Sigma*(10**7)*0.000806556\r\n NMm1 = 0.000806556*Sigma\r\n Global.XValues = np.arange(MinWave,MaxWave,1)\r\n Global.YValues = np.zeros(len(Global.XValues))\r\n Matrix = np.zeros((len(Global.XValues),len(Global.WaveLenghts)))\r\n #Row number\r\n i = 0\r\n for row in Matrix:\r\n #Column Number\r\n j = 0\r\n for cell in row:\r\n Constant = 130629740*(Global.Forces[j]/CMm1)\r\n Matrix[i,j] = Constant*exp(-((((1/Global.XValues[i])-(1/Global.WaveLenghts[j]))/NMm1)**2)) \r\n j += 1 \r\n i += 1\r\n #Sum columns\r\n i = 0\r\n for Row in Matrix:\r\n Summatory = 0\r\n for Cell in Row:\r\n Summatory += Cell\r\n Global.YValues[i] = Summatory\r\n i += 1\r\n return\r\n\r\ndef ShowGraph():\r\n \"\"\"Shows the plot,\"\"\"\r\n fig, ax = plt.subplots()\r\n ax.plot(Global.XValues,Global.YValues)\r\n plt.xlabel(\"λ(nm)\")\r\n plt.ylabel(\"e(L mol-1 cm-1)\")\r\n plt.show()\r\n\r\ndef SaveFile():\r\n \"\"\"Stores the x and y data into a text file.\"\"\"\r\n SaveFilePath = FilePath.split(\".\")[0] + \".txt\"\r\n f = open(SaveFilePath,\"a\")\r\n i = 0\r\n for XValue in Global.XValues:\r\n f.write(str(XValue) + \"\\t\" + str(Global.YValues[i]) + \"\\n\")\r\n i += 1\r\n f.close()\r\n\r\n\r\nFilePath = \"\"\r\ni = 0\r\n#Reads the extra comment arguments giving\r\nfor arg in sys.argv:\r\n if \".out\" in arg or \".log\" in arg or \".OUT\" in arg or \".LOG\" in arg:\r\n FilePath = os.getcwd() + OSplit + arg\r\n elif \"-s\" in arg:\r\n Global.ShowPlot = False\r\n else:\r\n try:\r\n Number = float(arg)\r\n if i == 0:\r\n Sigma = Number\r\n if i == 1:\r\n MinWave = Number\r\n if i == 2:\r\n MaxWave = Number\r\n i += 1\r\n except:\r\n pass\r\n \r\n#If no comment arguments are giving it will ask for it.\r\nif FilePath == \"\":\r\n FilePath = input(\"Please Insert the file path: \")\r\n ReadFile(FilePath)\r\n Sigma = input(\"Sigma Value: \")\r\n MinWave = input(\"Min WaveLenght (nm): \")\r\n MaxWave = input(\"Max WaveLenght (nm): \")\r\n\r\nReadFile(FilePath)\r\nif(len(Global.WaveLenghts) == 0):\r\n print(\"No excited states found.\")\r\nelse:\r\n DoCalcs(float(Sigma),float(MinWave),float(MaxWave))\r\n if Global.ShowPlot is True:\r\n ShowGraph()\r\n else:\r\n SaveFile()\r\n" ]
[ [ "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
anyboby/ConstrainedMBPO
[ "036f4ffefc464e676a287c35c92cc5c0b8925fcf" ]
[ "softlearning/samplers/model_sampler.py" ]
[ "from collections import defaultdict\nfrom collections import deque, OrderedDict\nfrom itertools import islice\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nfrom softlearning.samplers.cpo_sampler import CpoSampler\nfrom softlearning.policies.safe_utils.logx import EpochLogger\nfrom softlearning.policies.safe_utils.mpi_tools import mpi_sum\n\nfrom .base_sampler import BaseSampler\nACTION_PROCESS_ENVS = [\n 'Safexp-PointGoal2',\n ]\nEPS = 1e-8\n\nclass ModelSampler(CpoSampler):\n def __init__(self,\n max_path_length,\n batch_size=1000,\n store_last_n_paths = 10,\n cares_about_cost = False,\n max_uncertainty_c = 3,\n max_uncertainty_r = 3,\n rollout_mode = False,\n logger = None):\n self._max_path_length = max_path_length\n self._path_length = np.zeros(batch_size)\n\n self.cares_about_cost = cares_about_cost\n self.rollout_mode = rollout_mode\n\n if logger:\n self.logger = logger\n else: \n self.logger = EpochLogger()\n\n self._store_last_n_paths = store_last_n_paths\n self._last_n_paths = deque(maxlen=store_last_n_paths)\n\n self._current_path = defaultdict(list)\n self._last_path_return = 0\n self._max_path_return = -np.inf\n self._current_observation = None\n self._last_action = None\n self._max_uncertainty_c = max_uncertainty_c\n self._max_uncertainty_rew = max_uncertainty_r,\n\n self._total_samples = 0\n self._n_episodes = 0\n self._total_Vs = 0\n self._total_CVs = 0\n self._total_rew = 0\n self._total_rew_var = 0\n self._total_cost = 0\n self._total_cost_var = 0\n self._total_dyn_ep_var = 0\n self._path_dyn_var = 0\n self._total_dkl = 0\n self._max_dkl = 0\n self._dyn_dkl_path = 0\n self._total_mean_var = 0\n\n self.env = None\n self.policy = None\n self.pool = None\n\n def initialize(self, env, policy, pool):\n self.env = env\n self.policy = policy\n self.pool = pool\n self.ensemble_size = env.num_networks\n\n def set_debug_buf(self, pool):\n self.pool_debug = pool\n\n def set_policy(self, policy):\n self.policy = policy\n\n def set_logger(self, logger):\n \"\"\"\n provide a logger (Sampler creates it's own logger by default, \n but you might want to share a logger between algo, samplers, etc.)\n \n automatically shares logger with agent\n Args: \n logger : instance of EpochLogger\n \"\"\" \n self.logger = logger \n\n def terminate(self):\n self.env.close()\n\n def get_diagnostics(self):\n diagnostics = OrderedDict({'pool-size': self.pool.size})\n mean_rollout_length = self._total_samples / (self.batch_size+EPS)\n\n ensemble_rew_var_perstep = self._total_rew_var/(self._total_samples+EPS)\n ensemble_cost_var_perstep = self._total_cost_var/(self._total_samples+EPS)\n ensemble_dyn_var_perstep = self._total_dyn_ep_var/(self._total_samples+EPS)\n\n if len(self._path_cost.shape)>1:\n cost_sum = np.sum(np.mean(self._path_cost, axis=0))\n else:\n cost_sum = np.sum(self._path_cost)\n \n if len(self._path_return.shape)>1:\n ret_sum = np.sum(np.mean(self._path_return, axis=0))\n else:\n ret_sum = np.sum(self._path_return)\n\n ensemble_cost_rate = cost_sum/(self._total_samples+EPS)\n ensemble_rew_rate = ret_sum/(self._total_samples+EPS)\n\n vals_mean = self._total_Vs / (self._total_samples+EPS)\n\n cval_mean = self._total_CVs / (self._total_samples+EPS)\n\n dyn_Dkl = self._total_dkl / (self._total_samples+EPS)\n mean_var = self._total_mean_var/ (self._total_samples+EPS)\n diagnostics.update({\n 'msampler/samples_added': self._total_samples,\n 'msampler/rollout_H_max': self._n_episodes,\n 'msampler/rollout_H_mean': mean_rollout_length,\n 'msampler/rew_var_perstep': ensemble_rew_var_perstep,\n 'msampler/cost_var_perstep' : ensemble_cost_var_perstep,\n 'msampler/dyn_var_perstep' : ensemble_dyn_var_perstep,\n 'msampler/cost_rate' : ensemble_cost_rate,\n 'msampler/rew_rate' : ensemble_rew_rate,\n 'msampler/v_mean':vals_mean,\n 'msampler/cv_mean':cval_mean,\n 'msampler/ens_DKL': dyn_Dkl,\n 'msampler/ens_mean_var': mean_var,\n 'msampler/max_path_return': self._max_path_return,\n 'msampler/max_dkl': self._max_dkl,\n })\n\n return diagnostics\n\n def __getstate__(self):\n state = {\n key: value for key, value in self.__dict__.items()\n if key not in ('env', 'policy', 'pool')\n }\n\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n self.env = None\n self.policy = None\n self.pool = None\n\n def clear_last_n_paths(self):\n self._last_n_paths.clear()\n\n def compute_dynamics_dkl(self, obs_batch, depth=1):\n for _ in range(depth):\n get_action_outs = self.policy.get_action_outs(obs_batch, factored=True, inc_var=True)\n a = get_action_outs['pi']\n next_obs, _, terminal, info = self.env.step(obs_batch, a)\n dyn_dkl_mean = info.get('ensemble_dkl_mean', 0)\n \n n_paths = next_obs.shape[0]\n self._total_dkl += dyn_dkl_mean*n_paths\n self._total_samples += n_paths\n\n obs_batch = next_obs[np.squeeze(~terminal)]\n\n dkl_mean = self.dyn_dkl\n dkl_path_mean = dkl_mean*depth\n\n return dkl_path_mean\n \n def set_rollout_dkl(self, dkl):\n self.dkl_lim = dkl\n\n def set_max_path_length(self, path_length):\n self._max_path_length = path_length\n\n def get_last_n_paths(self, n=None):\n if n is None:\n n = self._store_last_n_paths\n\n last_n_paths = tuple(islice(self._last_n_paths, None, n))\n\n return last_n_paths\n\n @property\n def dyn_dkl(self):\n return self._total_dkl / (self._total_samples+EPS)\n\n def batch_ready(self):\n return self.pool.size >= self.pool.max_size\n\n def _process_observations(self,\n observation,\n action,\n reward,\n cost,\n terminal,\n next_observation,\n info):\n\n processed_observation = {\n 'observations': observation,\n 'actions': action,\n 'rewards': reward,\n 'cost' : cost,\n 'terminals': terminal,\n 'next_observations': next_observation,\n 'infos': info,\n }\n\n return processed_observation\n\n def reset(self, observations):\n self.batch_size = observations.shape[0]\n\n self._starting_uncertainty = np.zeros(self.batch_size)\n\n if self.rollout_mode == 'iv_gae':\n self._current_observation = np.tile(observations[None], (self.ensemble_size, 1, 1))\n else:\n self._current_observation = observations\n\n self.policy.reset() #does nohing for cpo policy atm\n self.pool.reset(self.batch_size, self.env.dyn_target_var)\n\n self._path_length = np.zeros(self.batch_size)\n if self.rollout_mode=='iv_gae':\n self._path_return = np.zeros(shape=(self.ensemble_size, self.batch_size))\n self._path_cost = np.zeros(shape=(self.ensemble_size, self.batch_size))\n else:\n self._path_return = np.zeros(shape=(self.batch_size))\n self._path_cost = np.zeros(shape=(self.batch_size))\n\n self._path_return_var = np.zeros(self.batch_size)\n self._path_cost_var = np.zeros(self.batch_size)\n self._path_dyn_var = np.zeros(self.batch_size)\n self._dyn_dkl_path = np.zeros(self.batch_size)\n\n self.model_inds = np.random.randint(self.ensemble_size)\n self.v_inds = self.policy.random_v_inds(self.batch_size)\n self.vc_inds = self.policy.random_vc_inds(self.batch_size)\n self.batch_inds = np.arange(0, self.batch_size)\n\n self._total_samples = 0\n self._n_episodes = 0\n self._total_Vs = 0\n self._total_CVs = 0\n self._total_cost = 0\n self._total_cost_var = 0\n self._total_rew = 0\n self._total_rew_var = 0\n self._total_dyn_ep_var = 0\n self._total_dkl = 0\n self._max_dkl = 0\n self._total_mean_var = 0\n self._max_path_return = 0\n\n def sample(self, max_samples=None):\n assert self.pool.has_room #pool full! empty before sampling.\n assert self._current_observation is not None # reset before sampling !\n assert self.pool.alive_paths.any() # reset before sampling !\n\n self._n_episodes += 1\n alive_paths = self.pool.alive_paths\n current_obs = self._current_observation\n\n # Get outputs from policy\n get_action_outs = self.policy.get_action_outs(current_obs, factored=True, inc_var=True)\n \n a = get_action_outs['pi']\n logp_t = get_action_outs['logp_pi']\n pi_info_t = get_action_outs['pi_info']\n\n ##### @anyboby temporary\n ### unpack ensemble outputs, if gaussian\n if self.rollout_mode=='iv_gae':\n v_t = get_action_outs['v']\n vc_t = get_action_outs.get('vc', 0)\n\n v_var = get_action_outs.get('v_var', 0)\n vc_var = get_action_outs.get('vc_var', 0) \n else:\n v_t = get_action_outs['v'][self.v_inds,self.batch_inds]\n vc_t = get_action_outs['vc'][self.vc_inds,self.batch_inds]\n\n v_var = np.mean(get_action_outs.get('v_var', 0), axis=0)\n vc_var = np.mean(get_action_outs.get('vc_var', 0), axis=0)\n #####\n\n ## ____________________________________________ ##\n ## Step ##\n ## ____________________________________________ ##\n\n next_obs, reward, terminal, info = self.env.step(current_obs, a)\n\n reward = np.squeeze(reward, axis=-1)\n c = np.squeeze(info.get('cost', np.zeros(reward.shape)))\n terminal = np.squeeze(terminal, axis=-1)\n dyn_dkl_mean = info.get('ensemble_dkl_mean', 0)\n dyn_dkl_path = info.get('ensemble_dkl_path', 0)\n dyn_ep_var = info.get('ensemble_ep_var', np.zeros(shape=reward.shape[1:]))\n ens_mean_var = info.get('ensemble_mean_var', 0)\n\n if self._n_episodes == 1:\n self._starting_uncertainty = np.mean(dyn_ep_var, axis=-1)\n self._starting_uncertainty_dkl = dyn_dkl_path\n ## ____________________________________________ ##\n ## Check Uncertainty f. each Trajectory ##\n ## ____________________________________________ ##\n\n ### check if too uncertain before storing info of the taken step \n ### (so we don't take a \"bad step\" by appending values of next state)\n if self.rollout_mode=='uncertainty':\n next_dkl = self._dyn_dkl_path[self.pool.alive_paths]+dyn_dkl_path\n too_uncertain_paths = next_dkl>=self.dkl_lim\n else:\n too_uncertain_paths = np.zeros(shape=self.pool.alive_paths.sum(), dtype=np.bool)\n \n ### early terminate paths if max_samples is given\n if max_samples and not self.rollout_mode=='iv_gae':\n n = self._total_samples + alive_paths.sum() - too_uncertain_paths.sum()\n n = max(n-max_samples, 0)\n early_term = np.zeros_like(too_uncertain_paths[~too_uncertain_paths], dtype=np.bool)\n early_term[:n] = True\n too_uncertain_paths[~too_uncertain_paths] = early_term\n\n ### finish too uncertain paths before storing info of the taken step into buffer\n # remaining_paths refers to the paths we have finished and has the same shape \n # as our terminal mask (too_uncertain_mask)\n # alive_paths refers to all original paths and therefore has shape batch_size\n remaining_paths = self._finish_paths(too_uncertain_paths, append_vals=True, append_cvals=True)\n alive_paths = self.pool.alive_paths\n if not alive_paths.any():\n info['alive_ratio'] = 0\n return next_obs, reward, terminal, info\n\n ## ____________________________________________ ##\n ## Store Info of the remaining paths ##\n ## ____________________________________________ ##\n\n if self.rollout_mode=='iv_gae':\n current_obs = current_obs[:,remaining_paths]\n a = a[:,remaining_paths]\n next_obs = next_obs[:,remaining_paths]\n reward = reward[:,remaining_paths]\n v_t = v_t[:,remaining_paths]\n v_var = v_var[:,remaining_paths]\n\n c = c[:,remaining_paths]\n vc_t = vc_t[:, remaining_paths]\n vc_var = vc_var[:, remaining_paths]\n terminal = terminal[:,remaining_paths]\n\n logp_t = logp_t[:,remaining_paths]\n pi_info_t = {k:v[:,remaining_paths] for k,v in pi_info_t.items()}\n else:\n current_obs = current_obs[remaining_paths]\n a = a[remaining_paths]\n next_obs = next_obs[remaining_paths]\n reward = reward[remaining_paths]\n \n v_t = v_t[remaining_paths]\n v_var = v_var[remaining_paths]\n self.v_inds = self.v_inds[remaining_paths]\n\n c = c[remaining_paths]\n vc_t = vc_t[remaining_paths]\n vc_var = vc_var[remaining_paths]\n self.vc_inds = self.vc_inds[remaining_paths]\n self.batch_inds = np.arange(0,np.sum(remaining_paths))\n terminal = terminal[remaining_paths]\n dyn_dkl_path = dyn_dkl_path[remaining_paths]\n logp_t = logp_t[remaining_paths]\n pi_info_t = {k:v[remaining_paths] for k,v in pi_info_t.items()}\n\n dyn_ep_var = dyn_ep_var[remaining_paths]\n\n #### update some sampler infos\n self._total_samples += alive_paths.sum()\n\n if self.rollout_mode=='iv_gae':\n self._total_cost += c[self.model_inds].sum()\n self._total_rew += reward[self.model_inds].sum()\n self._path_return[:,alive_paths] += reward\n self._path_cost[:,alive_paths] += c\n\n else:\n self._total_cost += c.sum()\n self._total_rew += reward.sum()\n self._path_return[alive_paths] += reward\n self._path_cost[alive_paths] += c\n\n self._path_length[alive_paths] += 1\n self._path_dyn_var[alive_paths] += np.mean(dyn_ep_var, axis=-1)\n self._total_dyn_ep_var += dyn_ep_var.sum()\n\n if self.rollout_mode=='iv_gae':\n self._total_Vs += v_t[self.model_inds].sum()\n self._total_CVs += vc_t[self.model_inds].sum()\n else:\n self._total_Vs += v_t.sum()\n self._total_CVs += vc_t.sum()\n\n self._total_dkl += dyn_dkl_mean*alive_paths.sum()\n self._total_mean_var =+ ens_mean_var*alive_paths.sum()\n\n self._max_dkl = max(self._max_dkl, np.max(dyn_dkl_path))\n self._dyn_dkl_path[alive_paths] += dyn_dkl_path\n self._max_path_return = max(self._max_path_return, np.max(self._path_return))\n\n #### only store one trajectory in buffer \n self.pool.store_multiple(current_obs,\n a,\n next_obs,\n reward,\n v_t,\n v_var,\n c,\n vc_t,\n vc_var,\n np.mean(dyn_ep_var, axis=-1),\n logp_t,\n pi_info_t,\n terminal)\n\n #### terminate mature termination due to path length\n ## update obs before finishing paths (_finish_paths() uses current obs)\n self._current_observation = next_obs\n\n path_end_mask = (self._path_length >= self._max_path_length-1)[alive_paths] \n remaining_paths = self._finish_paths(term_mask=path_end_mask, append_vals=True, append_cvals=True)\n if not remaining_paths.any():\n info['alive_ratio'] = 0\n return next_obs, reward, terminal, info\n\n ## update remaining paths and obs\n if self.rollout_mode=='iv_gae':\n self._current_observation = self._current_observation[:,remaining_paths]\n prem_term_mask = np.any(terminal[:,remaining_paths], axis=0) ##@anyboby maybe check later, if terminal per model should be possible\n else:\n self._current_observation = self._current_observation[remaining_paths]\n self.v_inds = self.v_inds[remaining_paths]\n self.vc_inds = self.vc_inds[remaining_paths]\n self.batch_inds = np.arange(0,np.sum(remaining_paths))\n\n prem_term_mask = terminal\n \n #### terminate real termination due to env end\n remaining_paths = self._finish_paths(term_mask=prem_term_mask, append_vals=False, append_cvals=True)\n if not remaining_paths.any():\n info['alive_ratio'] = 0\n return next_obs, reward, terminal, info\n\n ### update alive paths\n alive_paths = self.pool.alive_paths\n if self.rollout_mode=='iv_gae':\n self._current_observation = self._current_observation[:,remaining_paths]\n else:\n self._current_observation = self._current_observation[remaining_paths]\n self.v_inds = self.v_inds[remaining_paths]\n self.vc_inds = self.vc_inds[remaining_paths]\n self.batch_inds = np.arange(0,np.sum(remaining_paths))\n\n alive_ratio = sum(alive_paths)/self.batch_size\n info['alive_ratio'] = alive_ratio\n\n return next_obs, reward, terminal, info\n\n def compute_td_losses(self, obs):\n # Get outputs from policy\n get_action_outs = self.policy.get_action_outs(obs, factored=True, inc_var=True)\n a = get_action_outs['pi']\n v = get_action_outs['v']\n vc = get_action_outs.get('vc', 0) # Agent may not use cost value func\n next_obs, reward, terminal, info = self.env.step(obs, a)\n reward = np.squeeze(reward)\n c = np.squeeze(info.get('cost', 0))\n\n nv = self.policy.get_v(next_obs, factored=True)\n nvc = self.policy.get_vc(next_obs, factored=True)\n \n td = np.repeat(reward[None], v.shape[0], axis=0) + self.policy.gamma*nv - v\n tdc = np.repeat(c[None], v.shape[0], axis=0) + self.policy.cost_gamma*nvc - vc\n \n ep_td = np.mean(np.var(td, axis=0))\n ep_tdc = np.mean(np.var(tdc, axis=0))\n return ep_td, ep_tdc\n\n\n def _finish_paths(self, term_mask, append_vals=False, append_cvals=False):\n \"\"\"\n terminates paths that are indicated in term_mask. Append_vals should be set to \n True/False to indicate, whether values of the current states of those paths should \n be appended (Note: Premature termination due to environment term should not \n include appended values, while Mature termination upon path length excertion should \n include appended values)\n\n Warning! throws error if trying to terminate an already terminated path. \n\n Args:\n term_mask: Mask with the shape of the currently alive paths that indicates which \n paths should be termianted\n append_vals: True/False whether values of the current state should be appended\n \n Returns: \n remaining_mask: A Mask that indicates the remaining alive paths. Has the same shape \n as the arg term_mask\n \"\"\"\n if not term_mask.any():\n return np.logical_not(term_mask)\n\n # We do not count env time out (mature termination) as true terminal state, append values\n if append_vals:\n if self.rollout_mode=='iv_gae':\n last_val = self.policy.get_v(self._current_observation[:,term_mask], factored=False)\n else:\n last_val = self.policy.get_v(self._current_observation[term_mask], factored=True)[self.v_inds[term_mask],np.arange(term_mask.sum())]\n else:\n # init final values\n if self.rollout_mode=='iv_gae':\n last_val = np.zeros(shape=(self.ensemble_size, term_mask.sum()))\n else:\n last_val = np.zeros(shape=(term_mask.sum()))\n\n if append_cvals:\n if self.rollout_mode=='iv_gae':\n last_cval = self.policy.get_vc(self._current_observation[:,term_mask], factored=False)\n else:\n last_cval = self.policy.get_vc(self._current_observation[term_mask], factored=True)[self.vc_inds[term_mask],np.arange(term_mask.sum())]\n else:\n # init final values\n if self.rollout_mode=='iv_gae':\n last_cval = np.zeros(shape=(self.ensemble_size, term_mask.sum()))\n else:\n last_cval = np.zeros(shape=(term_mask.sum()))\n\n self.pool.finish_path_multiple(term_mask, last_val, last_cval)\n\n remaining_path_mask = np.logical_not(term_mask)\n\n return remaining_path_mask\n \n def finish_all_paths(self):\n\n alive_paths=self.pool.alive_paths ##any paths that are still alive did not terminate by env\n # init final values and quantify according to termination type\n # Note: we do not count env time out as true terminal state\n if not alive_paths.any(): return self.get_diagnostics()\n\n if alive_paths.any():\n term_mask = np.ones(shape=alive_paths.sum(), dtype=np.bool)\n if self.policy.agent.reward_penalized:\n last_val = self.policy.get_v(self._current_observation, factored=True)[self.v_inds[term_mask],np.arange(term_mask.sum())]\n else:\n last_val = self.policy.get_v(self._current_observation, factored=True)[self.v_inds[term_mask],np.arange(term_mask.sum())]\n last_cval = self.policy.get_vc(self._current_observation, factored=True)[self.vc_inds[term_mask],np.arange(term_mask.sum())]\n\n self.pool.finish_path_multiple(term_mask, last_val, last_cval)\n \n alive_paths = self.pool.alive_paths\n assert alive_paths.sum()==0 ## something went wrong with finishing all paths\n \n return self.get_diagnostics()\n\n def set_max_uncertainty(self, max_uncertainty):\n self.max_uncertainty = max_uncertainty" ]
[ [ "numpy.sum", "numpy.tile", "numpy.zeros_like", "numpy.squeeze", "numpy.zeros", "numpy.var", "numpy.any", "numpy.repeat", "numpy.arange", "numpy.logical_not", "numpy.max", "numpy.random.randint", "numpy.mean" ] ]
solsword/fleece
[ "f2259b67add9a660cc3185cb89681520d0e61b33" ]
[ "ngen.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nGenerates images that resemble input images.\n\nStacked denoising autoencoder-style neural network code using theano.\n\nWritten with reference to tutorial code from:\n https://github.com/lisa-lab/DeepLearningTutorials.git\nTutorial page:\n http://deeplearning.net/tutorial/SdA.html\n\"\"\"\n\nimport os\nimport sys\nimport timeit\nimport datetime\nimport functools\nimport copy\n\nimport gzip\nimport pickle\n\nimport numpy\n\nimport theano\nimport theano.tensor as T\n\nfrom PIL import Image\n\ndef debug(*args, **kwargs):\n print(*args, **kwargs)\n\ndef load_data(filename=\"data/examples.pkl.gz\"):\n '''\n Loads pickled gzipped data (see pdata.py).\n '''\n debug(\"... loading data ...\")\n\n # Load the dataset\n with gzip.open(filename, 'rb') as fin:\n dataset = pickle.load(fin)\n\n # format: a dictionary with three keys:\n # \"examples\": a numpy.ndarray with 2 dimensions where each row is an\n # example\n # \"palette\": a dictionary mapping colors to integers\n # \"r_palette\": the reverse of the palette, mapping from integers to colors\n\n dataset[\"examples\"] = numpy.array([\n explode_example(ex, len(dataset[\"palette\"]))\n for ex in dataset[\"examples\"]\n ])\n\n dataset[\"examples\"] = theano.shared(\n numpy.asarray(dataset[\"examples\"], dtype=theano.config.floatX),\n borrow = False\n )\n\n return dataset\n\nclass NeuroLayer:\n \"\"\"\n A layer of neurons whose weights can be used for both interpretation and\n reconstruction. It also has functions for training to denoise on a given\n input.\n \"\"\"\n\n def __init__(self, numpy_rng, input_size, output_size):\n # RNG for adding noise to input:\n self.theano_rng = theano.tensor.shared_randomstreams.RandomStreams(\n numpy_rng.randint(2 ** 30)\n )\n\n self.input_size = input_size\n self.output_size = output_size\n\n # Weights and offsets for deconstruction (input -> output):\n wsx = int(numpy_rng.uniform(low=0, high=input_size))\n wsy = int(numpy_rng.uniform(low=0, high=output_size))\n self.weights = theano.shared(\n value=numpy.asarray(\n numpy_rng.uniform(\n low=-4 * numpy.sqrt(6. / (input_size + output_size)),\n high=4 * numpy.sqrt(6. / (input_size + output_size)),\n size=(input_size*2, output_size*2)\n )[wsx:wsx+input_size,wsy:wsy+output_size],\n dtype=theano.config.floatX\n ),\n name='weights',\n borrow=True\n )\n\n self.de_offsets = theano.shared(\n value=numpy.zeros(\n output_size,\n dtype=theano.config.floatX\n ),\n name='de_offsets',\n borrow=True\n )\n\n # Offsets for reconstruction (output -> input):\n # Note that the weights are shared for both directions.\n self.re_offsets = theano.shared(\n value=numpy.zeros(\n input_size,\n dtype=theano.config.floatX\n ),\n name='re_offsets',\n borrow=True\n )\n\n self.params = [\n self.weights,\n self.de_offsets,\n self.re_offsets\n ]\n\n # Evaluation functions:\n def get_deconstruct(self, input):\n return T.nnet.sigmoid(\n T.dot(input, self.weights) + self.de_offsets\n )\n\n def get_reconstruct(self, output):\n return T.nnet.sigmoid(\n T.dot(output, self.weights.T) + self.re_offsets\n )\n\n # Training functions:\n def get_cost_and_updates(self, input, corruption, learning_rate):\n \"\"\"\n Returns a theano expression for the cost function of the network on the\n given input, along with an update list for updating the weights based on\n the cost gradient.\n \"\"\"\n\n corrupt = self.theano_rng.binomial(\n size=input.shape,\n n=1, # number of trials\n p=1 - corruption, # probability of success per trial\n dtype=theano.config.floatX\n )\n static = self.theano_rng.random_integers(\n size=input.shape,\n low=0,\n high=1\n )\n corrupted = (\n corrupt * input\n + (1 - corrupt) * static\n )\n\n rep = self.get_deconstruct(corrupted) # internal representation\n rec = self.get_reconstruct(rep) # reconstructed input\n #cost = T.sum(input * T.log(rec) + (1 - input) * T.log(1 - rec))\n cost = T.sum((input - rec) ** 2)\n\n # the gradients of the cost w/r/t/ each parameter:\n gradients = T.grad(cost, self.params)\n # generate the list of updates\n updates = [\n (param, param - learning_rate * gr)\n for param, gr in zip(self.params, gradients)\n ] + self.theano_rng.updates()\n\n return (cost, updates)\n\nclass NeuralNet:\n \"\"\"\n A stack of auto-encoders.\n \"\"\"\n def __init__(self, numpy_rng, input_size, layer_sizes, output_size):\n self.rng = numpy_rng\n self.input_size = input_size\n self.output_size = output_size\n self.layers = []\n i_size = input_size\n for i in range(len(layer_sizes)):\n o_size = layer_sizes[i]\n self.layers.append(NeuroLayer(numpy_rng, i_size, o_size))\n i_size = o_size\n self.layers.append(NeuroLayer(numpy_rng, i_size, output_size))\n\n def get_deconstruct(self, input, limit=-1):\n result = input\n if limit < 0:\n limit = len(self.layers)\n for i in range(limit):\n result = self.layers[i].get_deconstruct(result)\n return result\n\n def get_reconstruct(self, output, limit=-1):\n result = output\n if limit < 0:\n limit = len(self.layers)\n for i in range(limit-1, -1, -1):\n result = self.layers[i].get_reconstruct(result)\n return result\n\n def get_training_functions(self, corruption_rates, ae_learning_rates):\n \"\"\"\n Returns a theano shared variable for use as input and a list of functions\n for training each layer of the stack.\n \"\"\"\n functions = []\n training_input = T.vector(name=\"training_input\", dtype=theano.config.floatX)\n for i in range(len(self.layers)):\n inp = self.get_deconstruct(training_input, limit=i)\n cost_function, updates = self.layers[i].get_cost_and_updates(\n inp,\n corruption_rates[i],\n ae_learning_rates[i]\n )\n functions.append(\n theano.function(\n inputs = [training_input],\n outputs = cost_function,\n updates = updates,\n name = \"training_function_layer_{}\".format(i)\n )\n )\n return functions\n\n def get_specialization_function(self, input, cv_extract, learning_rate):\n \"\"\"\n Returns a theano function that uses an example to specialize the network by\n training it to predict the region of the input selected by the given\n cv_extract function.\n \"\"\"\n pfunc = self.get_deconstruct(input)\n\n compare_to = cv_extract(input)\n\n cost = T.sum((compare_to - pfunc) ** 2)\n\n params = []\n for l in self.layers:\n params.extend(l.params[:-1]) # ignore the reconstruction offsets\n\n gradients = T.grad(cost, params)\n\n # generate the list of updates\n updates = [\n (param, param - learning_rate * gr)\n for param, gr in zip(params, gradients)\n ]\n # TODO: are these really unnecessary here?\n # + [l.theano_rng.updates() for l in self.layers]\n\n return theano.function(\n inputs = [input],\n outputs = cost,\n updates = updates,\n name = \"specialization_function\"\n )\n\n def pretrain(self, examples, epoch_counts, corruption_rates, learning_rates):\n \"\"\"\n Trains the network for autoencoding on the given examples, given lists of\n epoch counts, corruption rates, and learning rates each equal in length to\n the number of layers in the stack.\n \"\"\"\n tfs = self.get_training_functions(corruption_rates, learning_rates)\n indices = list(range(examples.get_value(borrow=True).shape[0]))\n start_time = timeit.default_timer()\n for i in range(len(self.layers)):\n # TODO: batches?\n for epoch in range(epoch_counts[i]):\n self.rng.shuffle(indices)\n costs = []\n for j in indices:\n cost = tfs[i](examples.get_value(borrow=True)[j].reshape(-1))\n costs.append(cost)\n debug(\n \"... [{}] epoch {: 3d} at layer {: 2d} done {} ...\".format(\n str(datetime.timedelta(seconds=timeit.default_timer()-start_time)),\n epoch + 1,\n i,\n \"(min/avg cost {:0.3f}/{:0.3f})\".format(\n float(min(costs)),\n float(sum(costs)/float(len(costs))),\n )\n )\n )\n\n def train(self, examples, cv_extract, epochs, learning_rate):\n \"\"\"\n Specializes the network for prediction on the given examples, using the\n given center extract function, the given number of epochs, and the given\n learning rate.\n \"\"\"\n input = T.vector(name=\"training_input\", dtype=theano.config.floatX)\n tf = self.get_specialization_function(input, cv_extract, learning_rate)\n indices = list(range(examples.get_value(borrow=True).shape[0]))\n start_time = timeit.default_timer()\n # TODO: batches?\n for epoch in range(epochs):\n self.rng.shuffle(indices)\n costs = []\n for j in indices:\n cost = tf(examples.get_value(borrow=True)[j].reshape(-1))\n costs.append(cost)\n debug(\n \"... [{}] epoch {: 3d} done {} ...\".format(\n str(datetime.timedelta(seconds=timeit.default_timer()-start_time)),\n epoch + 1,\n \"(min/avg cost {:0.3f}/{:0.3f})\".format(\n float(float(min(costs))),\n float(float(sum(costs)/float(len(costs))))\n )\n )\n )\n\ndef get_central_values(flat_input, input_size, center_size, palette_size):\n \"\"\"\n Takes a flat array which is assumed to represent input_size by input_size by\n palette_size data, and returns a flat array that represents the center_size\n by center_size central values of the original array.\n \"\"\"\n lc = input_size//2 - center_size//2\n rs = flat_input.reshape((input_size, input_size, palette_size))\n sel = rs[lc:lc+center_size, lc:lc+center_size, :]\n return sel.reshape([-1])\n\ndef explode_example(data, n_layers):\n \"\"\"\n Returns an array with an extra dimension that encodes that data in the given\n array as a one-hot encoding. The values in the array should all be between 0\n and n_layers (exclusive).\n \"\"\"\n result = numpy.zeros(\n list(data.shape) + [n_layers],\n dtype=theano.config.floatX\n )\n rs = data.reshape(-1)\n for i, x in enumerate(rs):\n coords = []\n irem = i\n for j in range(len(data.shape)):\n if data.shape[j+1:]:\n b = functools.reduce(lambda x, y: x*y, data.shape[j+1:], 1)\n coords.append(irem // b)\n irem = irem % b\n else:\n coords.append(irem)\n result[tuple(coords + [x])] = 1\n return result\n\ndef implode_result(data):\n \"\"\"\n Returns an array with one fewer dimension than the input, where the input's\n final dimension is taken to represent a one-hot encoding of the desired data.\n \"\"\"\n dshape = data.shape[:-1]\n n_layers = data.shape[-1]\n\n result = numpy.zeros(dshape, dtype=theano.config.floatX)\n rs = data.reshape(-1, n_layers)\n\n for i, enc in enumerate(rs):\n coords = []\n irem = i\n for j in range(len(dshape)):\n if data.shape[j+1:]:\n b = functools.reduce(lambda x, y: x*y, dshape[j+1:], 1)\n coords.append(irem // b)\n irem = irem % b\n else:\n coords.append(irem)\n result[tuple(coords)] = numpy.argmax(enc)\n return result\n\ndef fake_palette(size):\n result = {}\n fp = [\n (0xdd, 0x00, 0x00),\n (0xee, 0x99, 0x00),\n (0xff, 0xee, 0x00),\n (0x00, 0x99, 0x00),\n (0x11, 0x22, 0xee),\n (0x00, 0x00, 0x55),\n (0x55, 0x00, 0x99),\n ]\n for i in range(size):\n inc = 0x33 * (i // len(fp))\n e = fp[i%len(fp)]\n result[i] = (\n min(e[0] + inc, 0xff),\n min(e[1] + inc, 0xff),\n min(e[2] + inc, 0xff)\n )\n return result\n\ndef build_network(\n examples,\n window_size=8,\n predict_size=2,\n palette_size=16,\n batch_size = 1, # TODO: Implement this\n #layer_sizes = (0.2,),\n #ae_epochs = (1,1,),# (30,),\n #corruption_rates = (0.3,0.3,),\n #ae_learning_rates = (0.05,0.05,), # (0.005,)\n #sp_epochs = 1,\n #sp_learning_rate = 0.05,\n #layer_sizes = (0.7,),\n #ae_epochs = (5,5,),# (30,),\n #corruption_rates = (0.3,0.3,),\n #ae_learning_rates = (0.05,0.05,), # (0.005,)\n #sp_epochs = 5,\n #sp_learning_rate = 0.05,\n #layer_sizes = (0.8,0.5),\n #ae_epochs = (12, 12, 12),\n #corruption_rates = (0.3, 0.3, 0.2),\n #ae_learning_rates = (0.05, 0.05, 0.05),\n #sp_epochs = 20,\n #sp_learning_rate = 0.05,\n layer_sizes = (0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1),\n ae_epochs = (14, 14, 14, 14, 14, 14, 14, 14, 14, 14),\n corruption_rates = (0.4, 0.3, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2),\n ae_learning_rates = (\n 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04\n ),\n sp_epochs = 20,\n sp_learning_rate = 0.05,\n):\n \"\"\"\n Builds and trains a network for recognizing image fragments.\n \"\"\"\n # Calculate input and layer sizes:\n input_size = window_size * window_size * palette_size\n hidden_sizes = [int(input_size * ls) for ls in layer_sizes]\n final_size = predict_size * predict_size * palette_size\n\n # Calculate the number of training batches:\n n_train_batches = examples.get_value(borrow=True).shape[0]\n n_train_batches //= batch_size\n\n # Set up the stacked denoising autoencoders:\n numpy_rng = numpy.random.RandomState(465746)\n net = NeuralNet(\n numpy_rng=numpy_rng,\n input_size = input_size,\n layer_sizes = hidden_sizes,\n output_size = final_size\n )\n\n # Visualize the network pre-training:\n vis_network(\n net,\n fake_palette(palette_size),\n window_size=window_size,\n outfile=\"vis-pre.png\"\n )\n\n # Train the network for autoencoding:\n debug(\"... pretraining the network ...\")\n start_time = timeit.default_timer()\n net.pretrain(\n examples,\n ae_epochs,\n corruption_rates,\n ae_learning_rates\n )\n end_time = timeit.default_timer()\n debug(\n \"... pretraining finished in {} ...\".format(\n str(datetime.timedelta(seconds=end_time - start_time))\n )\n )\n\n # Specialize the network for generation:\n debug(\"... specializing the network ...\")\n start_time = timeit.default_timer()\n net.train(\n examples,\n lambda a: get_central_values(a, window_size, predict_size, palette_size),\n sp_epochs,\n sp_learning_rate\n )\n end_time = timeit.default_timer()\n debug(\n \"... specialization finished in {} ...\".format(\n str(datetime.timedelta(seconds=end_time - start_time))\n )\n )\n\n return net\n\ndef write_image(data, palette, outdir, outfile):\n size = data.shape\n img = Image.new(\"RGB\", size)\n pixels = img.load()\n\n for x in range(size[0]):\n for y in range(size[1]):\n idx = int(data[x, y])\n if idx in palette:\n pixels[x, y] = palette[idx]\n else:\n pixels[x, y] = (255, 0, 255)\n\n img.save(os.path.join(outdir, outfile))\n\ndef write_grayscale(data, outdir, outfile, nbest=50):\n rs = data.reshape(-1)\n sqside = int((len(rs)**0.5) + 0.99999)\n shape = (sqside, sqside)\n\n normed = data / numpy.max(data)\n best = numpy.argsort(normed, axis=None)[-nbest:]\n\n img = Image.new(\"RGBA\", shape)\n pixels = img.load()\n\n i = 0\n for x in range(sqside):\n for y in range(sqside):\n if i < len(normed):\n g = int(normed[i] * 256)\n r = g\n a = 255\n if i in best:\n r = 0\n else:\n g = 0\n a = 0\n i += 1\n pixels[x, y] = (r, g, g, a)\n\n img.save(os.path.join(outdir, outfile))\n\ndef vis_network(\n net,\n palette,\n window_size=8,\n show=(12, 12),\n outdir=\"out\",\n outfile=\"vis.png\"\n):\n palette_size = len(palette)\n input = T.vector(name=\"input\", dtype=theano.config.floatX)\n output = T.vector(name=\"output\", dtype=theano.config.floatX)\n\n enc = theano.function(\n inputs=[input],\n outputs=net.get_deconstruct(input)\n )\n\n dec = theano.function(\n inputs=[output],\n outputs=net.get_reconstruct(output)\n )\n\n encoded = enc(\n numpy.zeros(\n (window_size, window_size, palette_size),\n dtype=theano.config.floatX\n ).reshape(-1)\n )\n\n exemplars = []\n\n for i in range(show[0]*show[1]):\n fake = numpy.zeros(encoded.shape, dtype=theano.config.floatX)\n fake = fake.reshape(-1)\n if i >= fake.shape[0]:\n continue\n fake[i] = 1\n fake = fake.reshape(encoded.shape)\n exemplars.append(\n implode_result(\n dec(fake).reshape((window_size, window_size, palette_size))\n )\n )\n\n result = numpy.full(\n ((window_size+1) * show[0], (window_size+1) * show[1]),\n palette_size,\n dtype=theano.config.floatX\n )\n i = 0\n for x in range(show[0]):\n for y in range(show[1]):\n if i < len(exemplars):\n result[\n x*(window_size+1):(x+1)*(window_size+1) - 1,\n y*(window_size+1):(y+1)*(window_size+1) - 1\n ] = exemplars[i]\n i += 1\n\n fp = copy.deepcopy(palette)\n fp[palette_size] = (0, 0, 0)\n\n write_image(\n result,\n fp,\n outdir,\n outfile\n )\n\ndef build_ae_munge(examples, net, nbest=2):\n # Express our inputs in terms of the last layer of our neural net, and get\n # the values using the net in its current state:\n n_ex = examples.shape[0]\n exreps, _ = theano.scan(\n fn=lambda i: net.get_deconstruct(examples[i].reshape([-1])),\n sequences=[T.arange(n_ex)]\n )\n exf = theano.function([], exreps)\n exconst = T.constant(exf())\n\n # An input variable:\n input = T.tensor3(name=\"input\", dtype=theano.config.floatX)\n\n # Build an expression for computing the net's deconstruction of a variable\n # input, and putting it into a column shape:\n irepcol = net.get_deconstruct(input.reshape([-1])).reshape([-1, 1])\n\n # An expression for getting the dot products between our representations of\n # each example and our representation of the input:\n dot_products = T.dot(exconst, irepcol)\n\n # The \"best\" examples are the ones which are most similar to the encoding of\n # the input:\n whichbest = T.argsort(dot_products, axis=None)[-nbest:].reshape([nbest])\n best = exconst[whichbest,:]\n bestweights = dot_products[whichbest].reshape([nbest])\n\n # Normalize the nbest entries and combine them:\n norm = bestweights / T.sum(bestweights)\n combined = T.dot(norm, best)\n\n rec = net.get_reconstruct(combined).reshape(input.shape)\n\n munge = theano.function(\n name=\"munge\",\n inputs=[input],\n outputs=[dot_products, rec]\n )\n\n # TODO: Get rid of this?\n #munge = theano.function(\n # inputs=[input],\n # outputs=net.get_reconstruct(\n # net.get_deconstruct(input.reshape(-1))\n # ).reshape(input.shape)\n #)\n\n return munge\n\ndef build_munge(net, patch_size):\n input = T.tensor3(name=\"input\", dtype=theano.config.floatX)\n predict = net.get_deconstruct(input.reshape([-1]))\n result = predict.reshape([patch_size, patch_size, input.shape[2]])\n return theano.function(\n name=\"munge\",\n inputs=[input],\n outputs=result\n )\n\ndef get_net(\n data=None,\n outdir=\"data\",\n outfile=\"network.pkl.gz\",\n center_size=2,\n rebuild=False\n):\n fn = os.path.join(outdir, outfile)\n if not data:\n # Load data:\n data = load_data()\n\n ws = data[\"window_size\"]\n hws = int(ws/2)\n ps = len(data[\"palette\"])\n r_palette = data[\"r_palette\"]\n\n if rebuild or not os.path.exists(fn):\n debug(\"... building network from scratch ...\")\n # Build network:\n net = build_network(\n data[\"examples\"],\n window_size=ws,\n predict_size=center_size,\n palette_size=ps\n )\n\n debug(\"... pickling trained network ...\")\n with gzip.open(fn, 'wb') as fout:\n pickle.dump(net, fout)\n\n debug(\"... visualizing trained network ...\")\n vis_network(\n net,\n r_palette,\n window_size=ws\n )\n else:\n debug(\"... loading pickled network ...\")\n with gzip.open(fn, 'rb') as fin:\n net = pickle.load(fin)\n\n return net\n\n\ndef generate_image(\n outdir=\"out\",\n outfile = \"result.lvl.png\",\n #size=(128,64),\n size=(32,32),\n patch_size=2,\n step_size=1,\n cycles=1,\n ini=\"distribution\"\n):\n # Load data:\n data = load_data()\n\n ws = data[\"window_size\"]\n hws = int(ws/2)\n ps = len(data[\"palette\"])\n border = data[\"border\"]\n r_palette = data[\"r_palette\"]\n fr_dist = data[\"fr_dist\"]\n exemplar = data[\"exemplar\"]\n\n net = get_net(data=data, center_size=patch_size, rebuild=False)\n\n if ini == \"random\":\n result = numpy.random.random_integers(\n 0,\n ps - 2, # avoid the last entry, which is the border value\n (size[0] + 2*ws, size[1] + 2*ws)\n )\n elif ini == \"shuffle\":\n result = numpy.zeros((size[0] + 2*ws, size[1] + 2*ws))\n ex = exemplar.reshape(-1)\n numpy.random.shuffle(ex)\n ex = ex.reshape(exemplar.shape)[:size[0],:size[1]]\n result[ws:size[0]+ws,ws:size[1]+ws] = ex\n elif ini == \"distribution\":\n result = numpy.zeros((size[0] + 2*ws, size[1] + 2*ws))\n for x in range(ws, size[0] + ws):\n for y in range(ws, size[1] + ws):\n sofar = 0\n choice = numpy.random.uniform(0, 1)\n for w, v in fr_dist:\n sofar += w\n if sofar >= choice:\n result[x, y] = v\n break\n\n # Set our border data to the border value:\n for x in range(ws):\n for y in range(size[1] + 2*ws):\n result[x,y] = border\n for x in range(size[0] + ws, size[0] + 2*ws):\n for y in range(size[1] + 2*ws):\n result[x,y] = border\n for y in range(ws):\n for x in range(size[0] + 2*ws):\n result[x,y] = border\n for y in range(size[1] + ws, size[1] + 2*ws):\n for x in range(size[0] + 2*ws):\n result[x,y] = border\n\n write_image(result, r_palette, outdir, \"pre.lvl.png\")\n\n result = explode_example(result, ps)\n\n indices = []\n for x in range(ws - hws, size[0] + ws - hws, step_size):\n for y in range(ws - hws, size[1] + ws - hws, step_size):\n indices.append((x, y))\n\n debug(\"... starting image generation ...\")\n munge = build_munge(net, patch_size)\n\n for epoch in range(cycles):\n numpy.random.shuffle(indices)\n patch = 0\n for x, y in indices:\n if (patch % 50 == 0):\n debug(\"... generating patch {}/{} ...\".format(patch + 1, len(indices)))\n patch += 1\n\n if epoch == 0 and patch == 20:\n write_image(\n implode_result(result),\n r_palette,\n outdir,\n \"patched.lvl.png\"\n )\n\n result[\n x + ws//2 - patch_size//2:x + ws//2 - patch_size//2 + patch_size,\n y + ws//2 - patch_size//2:y + ws//2 - patch_size//2 + patch_size,\n :\n ] = munge(result[x:x+ws,y:y+ws,:])\n\n debug(\"... generation cycle {}/{} completed ...\".format(epoch + 1, cycles))\n\n result = implode_result(result)\n debug(\"... writing result image ...\")\n write_image(result, r_palette, outdir, outfile)\n debug(\"... done.\")\n\ndef test_explode(filename=\"data/examples.pkl.gz\", size=8):\n # Load the dataset\n with gzip.open(filename, 'rb') as fin:\n dataset = pickle.load(fin)\n\n ex = dataset[\"examples\"][0]\n print(ex)\n exr = ex.reshape(size, size)\n print(exr)\n expl = explode_example(exr, len(dataset[\"palette\"]))\n print(expl)\n impl = implode_result(expl)\n print(impl)\n\n expl2 = explode_example(ex, len(dataset[\"palette\"]))\n impl2 = implode_result(expl2.reshape((size, size, 15)))\n print(impl2)\n print(impl2[7, 4], impl2[7, 5])\n\n img = Image.new(\"RGB\", (size, size))\n pixels = img.load()\n\n i = 0\n for x in range(impl2.shape[0]):\n for y in range(impl2.shape[1]):\n g = int(3*impl2[x, y])\n pixels[x, y] = (g, g, g)\n i += 1\n print(impl2[x, y], end=\" \")\n print()\n\n img.save(\"t.png\")\n\nif __name__ == \"__main__\":\n #test_explode()\n #generate_image(cycles=1, ini=\"distribution\")\n generate_image(cycles=1, ini=\"shuffle\")\n" ]
[ [ "numpy.random.uniform", "numpy.random.shuffle", "numpy.zeros", "numpy.argsort", "numpy.asarray", "numpy.argmax", "numpy.random.RandomState", "numpy.max", "numpy.sqrt", "numpy.full", "numpy.random.random_integers" ] ]
PhoeniXuzoo/NU-Projects
[ "a217ad46e6876ceffb3dec1d6e52f775674b2e8b" ]
[ "EE475/Ch6P16.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef readData(csvname):\n data = np.loadtxt(csvname, delimiter=',')\n x = data[:-1, :]\n y = data[-1:, :]\n\n return x, y\n\nfot = lambda x : np.exp(-x) / (1 + np.exp(-x))\nsot = lambda x : ( 1 / (1 + np.exp(x))) * (1 - ( 1 / (1 + np.exp(x))))\n\n# power is y_p * x_p.T * w\n# firstOrderTerm is e^-power / (1 + e^-power)\ndef first_order(x, y, w, class_weight, power):\n total = np.zeros(w.shape)\n firstOrderTerm = fot(power)\n\n for i in range(np.size(y)):\n total += class_weight[i] * firstOrderTerm[:,i] * y[:,i] * x[:,[i]]\n\n return (-1) * (total / np.size(y))\n\n\ndef second_order(x, y, w, class_weight, power):\n total = np.zeros([x.shape[0], x.shape[0]])\n secondOrderTerm = sot(power)\n\n for i in range(np.size(y)):\n total += class_weight[i] * secondOrderTerm[:, i] * x[:, [i]] * x[:, [i]].T\n\n return total / np.size(y)\n\n\ndef newton_method(x, y, w, class_weight):\n power = y * np.transpose(np.dot(x.T, w))\n firstOrder = first_order(x, y, w, class_weight, power)\n secondOrder = second_order(x, y, w, class_weight, power)\n\n return w - np.dot(np.linalg.inv(secondOrder), firstOrder)\n\ndef costFunc(x, y, w, class_weight):\n temp = np.log(1 + np.exp(-y*np.transpose(np.dot(np.transpose(x), w))))\n cost = 0\n for i in range(np.size(y)):\n cost += temp[0][i] * class_weight[i]\n return cost / float(np.size(y))\n\nif __name__ == \"__main__\":\n csvname = '3d_classification_data_v2_mbalanced.csv'\n x, y = readData(csvname)\n w = np.ones([x.shape[0] + 1, 1])\n x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)\n\n positiveOneWeight = 7/11\n negativeOneWeight = 4/11\n\n class_weight = []\n for i in range(np.size(y)):\n if (y[:, i] > 0):\n class_weight.append(positiveOneWeight)\n else:\n class_weight.append(negativeOneWeight)\n\n position = x[[1, 2]]\n positiveOneXList = []\n positiveOneYList = []\n negativeOneXList = []\n negativeOneYList = []\n for i in range(position.shape[1]):\n if (y[0][i] > 0):\n positiveOneXList.append(position[0][i])\n positiveOneYList.append(position[1][i])\n else:\n negativeOneXList.append(position[0][i])\n negativeOneYList.append(position[1][i])\n\n plt.scatter(positiveOneXList, positiveOneYList, color='red')\n plt.scatter(negativeOneXList, negativeOneYList, color='blue')\n\n for i in range(5):\n w = newton_method(x, y, w, class_weight)\n\n a = -(w[1][0]/w[2][0])\n b = -(w[0][0]/w[2][0])\n\n foo = lambda x : a * x + b\n i = -0.1\n xList = []\n yList = []\n while (i < 1.1):\n xList.append(i)\n yList.append(foo(i))\n i += 0.1\n\n plt.plot(xList, yList)\n plt.show()\n\n\n\n" ]
[ [ "numpy.ones", "numpy.transpose", "numpy.zeros", "numpy.linalg.inv", "numpy.exp", "numpy.size", "matplotlib.pyplot.show", "numpy.loadtxt", "matplotlib.pyplot.plot", "numpy.dot", "matplotlib.pyplot.scatter" ] ]
vbelz/Lyrics_classifier
[ "57d4fec9b00b3835a71ebdd3c234fed629079f22" ]
[ "prepare_for_training.py" ]
[ "import spacy as spacy_en\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport pickle\nimport os\n\nmodel = spacy_en.load('en_core_web_md')\n\ndef clean_my_text(song):\n\n \"\"\" It filters punctuation, numbers, stop word\n and returns lemmatized words\"\"\"\n\n doc = model(song)\n clean_text = ''\n\n for word in doc:\n\n if (word.is_stop == False) and (word.pos_ != 'PUNCT') and (word.pos_ != 'NUM'):\n\n word = word.lemma_\n clean_text += word + ' '\n\n return clean_text\n\ndef keep_english_for_spacy_nn(df):\n \"\"\"This function takes the DataFrame for songs\n and keep songs with english as main language\n for english version of spacy neural network for word processing\"\"\"\n\n #Keep only english for spacy NN English preprocessing words\n #Network for other languages like french, spanish, portuguese are also available\n df = df.loc[df['Main Language'] == 'en',:]\n #Drop the translation column not use for lyrics in english\n df.drop(['English Translation Lyrics'],axis =1,inplace = True)\n\n return df\n\ndef apply_spacy_nn_to_DataFrame(df):\n \"\"\"Apply reduction of words using clean_my_text Function\n to the lyrics column\"\"\"\n\n df['Text Lyrics'] = df['Text Lyrics'].apply(clean_my_text)\n\n return df\n\ndef save_transform_to_disk(cv, tf, folder_save):\n\n countvectorfile = os.path.join(folder_save, 'countvector.sav')\n pickle.dump(cv, open(countvectorfile, 'wb'))\n\n Tfidfile = os.path.join(folder_save, 'Tfidfile.sav')\n pickle.dump(tf, open(Tfidfile, 'wb'))\n\n return\n\ndef prepare_training(df_read, folder_save):\n \"\"\"This function takes the database of artists as input\n and the folder where to save transform operations on data\n and return X and y for training\"\"\"\n\n #Songs in english for spacy nn (disable if multilanguage)\n df_prep = keep_english_for_spacy_nn(df_read)\n #Apply spacy nn to reduce dimension of text\n df_prep = apply_spacy_nn_to_DataFrame(df_prep)\n #Count vecorizer of words\n cv = CountVectorizer()\n corpus_vec = cv.fit_transform(df_prep['Text Lyrics'])\n #Tfidf Transform\n tf = TfidfTransformer()\n transform_vec = tf.fit_transform(corpus_vec)\n #Save transform to disk to reuse for predictions\n save_transform_to_disk(cv, tf, folder_save)\n #todense() to remove sparse formatting\n df_word_vec = pd.DataFrame(transform_vec.todense(), columns=cv.get_feature_names())\n y = df_prep['Name']\n X = df_word_vec\n\n return X,y\n" ]
[ [ "sklearn.feature_extraction.text.TfidfTransformer", "sklearn.feature_extraction.text.CountVectorizer" ] ]
readthedocs-assistant/autocnet
[ "579cccd0edc4cd870b5d9671165ebd830f1112b8" ]
[ "autocnet/matcher/tests/test_cpu_ring_matcher.py" ]
[ "from unittest import mock\n\nimport numpy as np\nimport pytest\n\nfrom autocnet.matcher import cpu_ring_matcher as rm\n\[email protected]('arr, expected', [\n (np.array([[1,0],[1,1], [2,3]]), (1,2)),\n (np.array([[0,0], [1,1], [2,2]]), (3,2)\n )])\ndef test_check_pidx_duplicates(arr, expected):\n pidx = rm.check_pidx_duplicates(arr)\n assert pidx.shape == expected\n\[email protected](\"a, b, threshold, expected\", [\n # Tests standard call\n (np.array([1,2,3]), \n np.array([[1,2,3], [4,5,6], [7,8,9]]),\n 1.5, \n np.array([0])),\n # Tests call where distances are too close\n (np.array([1,2,3]),\n np.array([[7,8,9], [1,2,4], [1,2,4.1]]),\n 1.5, \n None),\n # Tests call with close distances where the threshold is low\n (np.array([1,2,3]),\n np.array([[7,8,9], [1,2,4], [1,2,4.1]]),\n 1., \n 1),\n # Tests call when np.argmin will fail\n (np.array([np.nan, np.nan]),\n np.array([[np.nan, np.nan], [np.nan, np.nan]]),\n 1.5,\n None),\n # Tests call where descriptors are identical\n (np.array([1,2,3]),\n np.array([[1,2,3], [1,2,3], [1,2,3]]),\n 1.5,\n None)\n])\ndef test_sift_match(a, b, threshold, expected):\n assert rm.sift_match(a, b, thresh=threshold) == expected \n\[email protected](\"x,y, eidx\",[(np.array([[1,1],[2,2],[3,3], [4,4], [5,5]]),\n np.array([[1.1,1.0],[1.9,1.95],[3,3], [-4,-4], [5,5]]),\n np.array([[0,1,2,4]])),\n (np.array([[1,1], [5,5]]),\n np.array([[1,1], [3,3]]),\n [])\n ])\ndef test_ransac_permute(x, y, eidx):\n xp, yp, idx = rm.ransac_permute(x, y, 0.2, 2)\n np.testing.assert_array_equal(idx, eidx)\n\n\ndef test_add_correspondences():\n func = 'autocnet.matcher.cpu_ring_matcher.ring_match_one'\n with mock.patch(func, return_value=1):\n in_feats = np.array([[1,1], [2,2]])\n ref_feats = np.array([[1,1],[2,2],[3,3], [4,4], [5,5]])\n tar_feats = np.array([[1.1,1.0],[1.9,1.95],[3,3], [-4,-4], [5,5]])\n \n rm.add_correspondences(in_feats, ref_feats, tar_feats, None, None,\n (0,6), (0,6),(0,1))\n\ndef test_dynamically_grow():\n x = np.ones((3,3))\n y = rm.dynamically_grow_array(x,6)\n assert y.shape == (9,3)\n \ndef test_dynamically_grow_dtype():\n x = np.ones((3,3), dtype=np.int8)\n y = rm.dynamically_grow_array(x,6)\n assert np.issubdtype(y.dtype, np.float64)\n\n y = rm.dynamically_grow_array(x,6,dtype=np.int8)\n assert np.issubdtype(y.dtype, np.int8)\n\ndef test_points_in_ring():\n x = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4])\n for i in np.arange(0.5, 4.5):\n assert np.sum(rm.points_in_ring(x, i, i+1)) == 5\n\ndef test_ring_match():\n ref_feats = np.array([[1,1,1,1],\n [2,2,2,2],\n [3,3,3,3],\n [4,4,4,4]])\n tar_feats = np.array([[2,2,1.1,1],\n [2.5, 2.5, 1.1, 1.1],\n [3,3,2.1,2.1],\n [3.5, 3.5, 2.2, 2.2],\n [4,4,2.9,2.9],\n [4.5, 4.5, 3.0, 3.0],\n [5,5, 4.0, 4.1],\n [5.5, 5.5, 4.1, 4.1]])\n ref_desc = np.array([[0,0,0,0],\n [1,1,1,1],\n [2,2,2,2],\n [3,3,3,3]])\n tar_desc = np.array([[0,0,0,0],\n [6,7,8,9],\n [1,1,1,1],\n [6,7,8,9],\n [2,2,2,2],\n [6,7,8,9],\n [3,3,3,3],\n [6,7,8,9]])\n\n ring_radius = 0.5\n max_radius = 1\n target_points = 2\n tolerance = 0.1\n gr, gt, p_idx, ring = rm.ring_match(ref_feats, tar_feats, ref_desc, tar_desc,\n ring_radius=ring_radius, max_radius=max_radius,\n target_points=target_points, tolerance_val=tolerance,\n iteration_break_point=2)\n assert ring == (0.0, 0.5)\n sorted_pidx = p_idx[p_idx[:,0].astype(np.int).argsort()]\n np.testing.assert_array_equal(sorted_pidx,\n np.array([[0,0],[1,2],[2,4],[3,6]]))\n" ]
[ [ "numpy.ones", "numpy.issubdtype", "numpy.testing.assert_array_equal", "numpy.arange", "numpy.array" ] ]
horseriver/csgm
[ "0f77d9c749dd31ce03b104dc2d355267e3ced038" ]
[ "celebA_dcgan/model_def.py" ]
[ "# pylint: disable = C0103, C0111, C0301, R0914\n\n\"\"\"Model definitions for celebA\n\nThis file is partially based on\nhttps://github.com/carpedm20/DCGAN-tensorflow/blob/master/main.py\nhttps://github.com/carpedm20/DCGAN-tensorflow/blob/master/model.py\n\nThey come with the following license: https://github.com/carpedm20/DCGAN-tensorflow/blob/master/LICENSE\n\"\"\"\n\nimport tensorflow as tf\nimport ops\n\n\nclass Hparams(object):\n def __init__(self):\n self.c_dim = 3\n self.z_dim = 100\n self.gf_dim = 64\n self.df_dim = 64\n self.gfc_dim = 1024\n self.dfc_dim = 1024\n self.batch_size = 64\n\n\ndef generator(hparams, z, train, reuse):\n\n if reuse:\n tf.get_variable_scope().reuse_variables()\n\n output_size = 64\n s = output_size\n s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)\n\n g_bn0 = ops.batch_norm(name='g_bn0')\n g_bn1 = ops.batch_norm(name='g_bn1')\n g_bn2 = ops.batch_norm(name='g_bn2')\n g_bn3 = ops.batch_norm(name='g_bn3')\n\n # project `z` and reshape\n h0 = tf.reshape(ops.linear(z, hparams.gf_dim*8*s16*s16, 'g_h0_lin'), [-1, s16, s16, hparams.gf_dim * 8])\n h0 = tf.nn.relu(g_bn0(h0, train=train))\n\n h1 = ops.deconv2d(h0, [hparams.batch_size, s8, s8, hparams.gf_dim*4], name='g_h1')\n h1 = tf.nn.relu(g_bn1(h1, train=train))\n\n h2 = ops.deconv2d(h1, [hparams.batch_size, s4, s4, hparams.gf_dim*2], name='g_h2')\n h2 = tf.nn.relu(g_bn2(h2, train=train))\n\n h3 = ops.deconv2d(h2, [hparams.batch_size, s2, s2, hparams.gf_dim*1], name='g_h3')\n h3 = tf.nn.relu(g_bn3(h3, train=train))\n\n h4 = ops.deconv2d(h3, [hparams.batch_size, s, s, hparams.c_dim], name='g_h4')\n x_gen = tf.nn.tanh(h4)\n\n return x_gen\n\n\ndef discriminator(hparams, x, train, reuse):\n\n if reuse:\n tf.get_variable_scope().reuse_variables()\n\n d_bn1 = ops.batch_norm(name='d_bn1')\n d_bn2 = ops.batch_norm(name='d_bn2')\n d_bn3 = ops.batch_norm(name='d_bn3')\n\n h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv'))\n\n h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv')\n h1 = ops.lrelu(d_bn1(h1, train=train))\n\n h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv')\n h2 = ops.lrelu(d_bn2(h2, train=train))\n\n h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv')\n h3 = ops.lrelu(d_bn3(h3, train=train))\n\n h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin')\n\n d_logit = h4\n d = tf.nn.sigmoid(d_logit)\n\n return d, d_logit\n\n\ndef gen_restore_vars():\n restore_vars = ['g_bn0/beta',\n 'g_bn0/gamma',\n 'g_bn0/moving_mean',\n 'g_bn0/moving_variance',\n 'g_bn1/beta',\n 'g_bn1/gamma',\n 'g_bn1/moving_mean',\n 'g_bn1/moving_variance',\n 'g_bn2/beta',\n 'g_bn2/gamma',\n 'g_bn2/moving_mean',\n 'g_bn2/moving_variance',\n 'g_bn3/beta',\n 'g_bn3/gamma',\n 'g_bn3/moving_mean',\n 'g_bn3/moving_variance',\n 'g_h0_lin/Matrix',\n 'g_h0_lin/bias',\n 'g_h1/biases',\n 'g_h1/w',\n 'g_h2/biases',\n 'g_h2/w',\n 'g_h3/biases',\n 'g_h3/w',\n 'g_h4/biases',\n 'g_h4/w']\n return restore_vars\n\n\n\ndef discrim_restore_vars():\n restore_vars = ['d_bn1/beta',\n 'd_bn1/gamma',\n 'd_bn1/moving_mean',\n 'd_bn1/moving_variance',\n 'd_bn2/beta',\n 'd_bn2/gamma',\n 'd_bn2/moving_mean',\n 'd_bn2/moving_variance',\n 'd_bn3/beta',\n 'd_bn3/gamma',\n 'd_bn3/moving_mean',\n 'd_bn3/moving_variance',\n 'd_h0_conv/biases',\n 'd_h0_conv/w',\n 'd_h1_conv/biases',\n 'd_h1_conv/w',\n 'd_h2_conv/biases',\n 'd_h2_conv/w',\n 'd_h3_conv/biases',\n 'd_h3_conv/w',\n 'd_h3_lin/Matrix',\n 'd_h3_lin/bias']\n return restore_vars\n" ]
[ [ "tensorflow.nn.sigmoid", "tensorflow.nn.tanh", "tensorflow.reshape", "tensorflow.get_variable_scope" ] ]
bmedishe/pytorch
[ "7fc73285da2c8918cf039a2c3e0eeed241478e40" ]
[ "test/run_test.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport copy\nfrom datetime import datetime\nfrom distutils.util import strtobool\nfrom distutils.version import LooseVersion\nimport functools\nimport os\nimport pathlib\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport tempfile\n\nimport torch\nfrom torch.utils import cpp_extension\nfrom torch.testing._internal.common_utils import (\n FILE_SCHEMA,\n IS_IN_CI,\n TEST_WITH_ROCM,\n shell,\n set_cwd,\n parser as common_parser,\n)\nimport torch.distributed as dist\nfrom typing import Dict, Optional, List\n\nREPO_ROOT = pathlib.Path(__file__).resolve().parent.parent\n\ntry:\n # using tools/ to optimize test run.\n sys.path.append(str(REPO_ROOT))\n from tools.testing.test_selections import (\n export_S3_test_times,\n get_shard_based_on_S3,\n # NS: Disable target determination\n # get_slow_tests_based_on_S3,\n get_specified_test_cases,\n get_reordered_tests,\n get_test_case_configs,\n )\n # NS: Disable target determination\n # from tools.testing.modulefinder_determinator import (\n # should_run_test,\n # TARGET_DET_LIST,\n # )\n\n HAVE_TEST_SELECTION_TOOLS = True\nexcept ImportError:\n HAVE_TEST_SELECTION_TOOLS = False\n print(\n \"Unable to import test_selections from tools/testing. Running without test selection stats...\"\n )\n\n\ndef discover_tests(\n base_dir: Optional[pathlib.Path] = None,\n blocklisted_patterns: Optional[List[str]] = None,\n blocklisted_tests: Optional[List[str]] = None,\n extra_tests: Optional[List[str]] = None) -> List[str]:\n \"\"\"\n Searches for all python files starting with test_ excluding one specified by patterns\n \"\"\"\n def skip_test_p(name: str) -> bool:\n rc = False\n if blocklisted_patterns is not None:\n rc |= any(name.startswith(pattern) for pattern in blocklisted_patterns)\n if blocklisted_tests is not None:\n rc |= name in blocklisted_tests\n return rc\n cwd = pathlib.Path(__file__).resolve().parent if base_dir is None else base_dir\n all_py_files = list(cwd.glob('**/test_*.py'))\n rc = [str(fname.relative_to(cwd))[:-3] for fname in all_py_files]\n # Invert slashes on Windows\n if sys.platform == \"win32\":\n rc = [name.replace('\\\\', '/') for name in rc]\n rc = [test for test in rc if not skip_test_p(test)]\n if extra_tests is not None:\n rc += extra_tests\n return sorted(rc)\n\nTESTS = discover_tests(\n blocklisted_patterns=[\n 'ao',\n 'bottleneck_test',\n 'custom_backend',\n 'custom_operator',\n 'fx', # executed by test_fx.py\n 'jit', # executed by test_jit.py\n 'mobile',\n 'onnx',\n 'package', # executed by test_package.py\n 'quantization', # executed by test_quantization.py\n 'autograd', # executed by test_autograd.py\n ],\n blocklisted_tests=[\n 'test_bundled_images',\n 'test_cpp_extensions_aot',\n 'test_determination',\n 'test_jit_fuser',\n 'test_jit_simple',\n 'test_jit_string',\n 'test_kernel_launch_checks',\n 'test_metal',\n 'test_nnapi',\n 'test_segment_reductions',\n 'test_static_runtime',\n 'test_throughput_benchmark',\n 'test_typing',\n \"distributed/algorithms/ddp_comm_hooks/test_ddp_hooks\",\n \"distributed/algorithms/quantization/test_quantization\",\n \"distributed/bin/test_script\",\n \"distributed/elastic/multiprocessing/bin/test_script\",\n \"distributed/launcher/bin/test_script\",\n \"distributed/launcher/bin/test_script_init_method\",\n \"distributed/launcher/bin/test_script_is_torchelastic_launched\",\n \"distributed/launcher/bin/test_script_local_rank\",\n \"distributed/test_c10d_spawn\",\n 'distributions/test_transforms',\n 'distributions/test_utils',\n ],\n extra_tests=[\n \"test_cpp_extensions_aot_ninja\",\n \"test_cpp_extensions_aot_no_ninja\",\n \"distributed/elastic/timer/api_test\",\n \"distributed/elastic/timer/local_timer_example\",\n \"distributed/elastic/timer/local_timer_test\",\n \"distributed/elastic/events/lib_test\",\n \"distributed/elastic/metrics/api_test\",\n \"distributed/elastic/utils/logging_test\",\n \"distributed/elastic/utils/util_test\",\n \"distributed/elastic/utils/distributed_test\",\n \"distributed/elastic/multiprocessing/api_test\",\n \"test_deploy\",\n ]\n)\n\nFSDP_TEST = [test for test in TESTS if test.startswith(\"distributed/fsdp\")]\n\n# Tests need to be run with pytest.\nUSE_PYTEST_LIST = [\n \"distributed/pipeline/sync/skip/test_api\",\n \"distributed/pipeline/sync/skip/test_gpipe\",\n \"distributed/pipeline/sync/skip/test_inspect_skip_layout\",\n \"distributed/pipeline/sync/skip/test_leak\",\n \"distributed/pipeline/sync/skip/test_portal\",\n \"distributed/pipeline/sync/skip/test_stash_pop\",\n \"distributed/pipeline/sync/skip/test_tracker\",\n \"distributed/pipeline/sync/skip/test_verify_skippables\",\n \"distributed/pipeline/sync/test_balance\",\n \"distributed/pipeline/sync/test_bugs\",\n \"distributed/pipeline/sync/test_checkpoint\",\n \"distributed/pipeline/sync/test_copy\",\n \"distributed/pipeline/sync/test_deferred_batch_norm\",\n \"distributed/pipeline/sync/test_dependency\",\n \"distributed/pipeline/sync/test_inplace\",\n \"distributed/pipeline/sync/test_microbatch\",\n \"distributed/pipeline/sync/test_phony\",\n \"distributed/pipeline/sync/test_pipe\",\n \"distributed/pipeline/sync/test_pipeline\",\n \"distributed/pipeline/sync/test_stream\",\n \"distributed/pipeline/sync/test_transparency\",\n \"distributed/pipeline/sync/test_worker\",\n \"distributions/test_constraints\",\n \"distributions/test_transforms\",\n \"distributions/test_utils\",\n \"test_typing\",\n \"distributed/elastic/events/lib_test\",\n \"distributed/elastic/agent/server/test/api_test\",\n \"test_deploy\",\n]\n\nWINDOWS_BLOCKLIST = [\n \"distributed/nn/jit/test_instantiator\",\n \"distributed/rpc/test_faulty_agent\",\n \"distributed/rpc/test_tensorpipe_agent\",\n \"distributed/rpc/test_share_memory\",\n \"distributed/rpc/cuda/test_tensorpipe_agent\",\n \"distributed/pipeline/sync/skip/test_api\",\n \"distributed/pipeline/sync/skip/test_gpipe\",\n \"distributed/pipeline/sync/skip/test_inspect_skip_layout\",\n \"distributed/pipeline/sync/skip/test_leak\",\n \"distributed/pipeline/sync/skip/test_portal\",\n \"distributed/pipeline/sync/skip/test_stash_pop\",\n \"distributed/pipeline/sync/skip/test_tracker\",\n \"distributed/pipeline/sync/skip/test_verify_skippables\",\n \"distributed/pipeline/sync/test_balance\",\n \"distributed/pipeline/sync/test_bugs\",\n \"distributed/pipeline/sync/test_checkpoint\",\n \"distributed/pipeline/sync/test_copy\",\n \"distributed/pipeline/sync/test_deferred_batch_norm\",\n \"distributed/pipeline/sync/test_dependency\",\n \"distributed/pipeline/sync/test_inplace\",\n \"distributed/pipeline/sync/test_microbatch\",\n \"distributed/pipeline/sync/test_phony\",\n \"distributed/pipeline/sync/test_pipe\",\n \"distributed/pipeline/sync/test_pipeline\",\n \"distributed/pipeline/sync/test_stream\",\n \"distributed/pipeline/sync/test_transparency\",\n \"distributed/pipeline/sync/test_worker\",\n \"distributed/elastic/agent/server/test/api_test\",\n \"distributed/elastic/multiprocessing/api_test\",\n \"distributed/_shard/checkpoint/test_checkpoint\"\n \"distributed/_shard/checkpoint/test_file_system_checkpoint\"\n \"distributed/_shard/sharding_spec/test_sharding_spec\",\n \"distributed/_shard/sharding_plan/test_sharding_plan\",\n \"distributed/_shard/sharded_tensor/test_megatron_prototype\",\n \"distributed/_shard/sharded_tensor/test_sharded_tensor\",\n \"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard\",\n \"distributed/_shard/sharded_tensor/ops/test_chunk\",\n \"distributed/_shard/sharded_tensor/ops/test_elementwise_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_embedding\",\n \"distributed/_shard/sharded_tensor/ops/test_embedding_bag\",\n \"distributed/_shard/sharded_tensor/ops/test_binary_cmp\",\n \"distributed/_shard/sharded_tensor/ops/test_init\",\n \"distributed/_shard/sharded_tensor/ops/test_linear\",\n \"distributed/_shard/sharded_tensor/ops/test_math_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_matrix_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_softmax\",\n \"distributed/_shard/sharded_optim/test_sharded_optim\",\n \"distributed/_shard/test_partial_tensor\",\n \"distributed/_shard/test_replicated_tensor\",\n] + FSDP_TEST\n\nROCM_BLOCKLIST = [\n \"distributed/nn/jit/test_instantiator\",\n \"distributed/rpc/test_faulty_agent\",\n \"distributed/rpc/test_tensorpipe_agent\",\n \"distributed/rpc/test_share_memory\",\n \"distributed/rpc/cuda/test_tensorpipe_agent\",\n \"distributed/_shard/checkpoint/test_checkpoint\"\n \"distributed/_shard/checkpoint/test_file_system_checkpoint\"\n \"distributed/_shard/sharding_spec/test_sharding_spec\",\n \"distributed/_shard/sharding_plan/test_sharding_plan\",\n \"distributed/_shard/sharded_tensor/test_megatron_prototype\",\n \"distributed/_shard/sharded_tensor/test_sharded_tensor\",\n \"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard\",\n \"distributed/_shard/sharded_tensor/ops/test_chunk\",\n \"distributed/_shard/sharded_tensor/ops/test_elementwise_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_embedding\",\n \"distributed/_shard/sharded_tensor/ops/test_embedding_bag\",\n \"distributed/_shard/sharded_tensor/ops/test_binary_cmp\",\n \"distributed/_shard/sharded_tensor/ops/test_init\",\n \"distributed/_shard/sharded_tensor/ops/test_linear\",\n \"distributed/_shard/sharded_tensor/ops/test_math_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_matrix_ops\",\n \"distributed/_shard/sharded_tensor/ops/test_softmax\",\n \"distributed/_shard/sharded_optim/test_sharded_optim\",\n \"distributed/_shard/test_partial_tensor\",\n \"distributed/_shard/test_replicated_tensor\",\n \"test_determination\",\n \"test_jit_legacy\",\n \"test_type_hints\",\n \"test_openmp\",\n]\n\nRUN_PARALLEL_BLOCKLIST = [\n \"test_cpp_extensions_jit\",\n \"test_jit_disabled\",\n \"test_mobile_optimizer\",\n \"test_multiprocessing\",\n \"test_multiprocessing_spawn\",\n \"test_namedtuple_return_api\",\n \"test_overrides\",\n \"test_show_pickle\",\n \"test_tensorexpr\",\n \"test_cuda_primary_ctx\",\n] + FSDP_TEST\n\nWINDOWS_COVERAGE_BLOCKLIST = []\n\n# A subset of our TEST list that validates PyTorch's ops, modules, and autograd function as expected\nCORE_TEST_LIST = [\n \"test_autograd\",\n \"test_modules\",\n \"test_nn\",\n \"test_ops\",\n \"test_ops_gradients\",\n \"test_ops_jit\",\n \"test_torch\"\n]\n\n# the JSON file to store the S3 test stats\nTEST_TIMES_FILE = \".pytorch-test-times.json\"\n\n# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST\nSLOW_TEST_THRESHOLD = 300\n\nDISTRIBUTED_TESTS_CONFIG = {}\n\n\nif dist.is_available():\n DISTRIBUTED_TESTS_CONFIG[\"test\"] = {\"WORLD_SIZE\": \"1\"}\n if not TEST_WITH_ROCM and dist.is_mpi_available():\n DISTRIBUTED_TESTS_CONFIG[\"mpi\"] = {\n \"WORLD_SIZE\": \"3\",\n \"TEST_REPORT_SOURCE_OVERRIDE\": \"dist-mpi\",\n }\n if dist.is_nccl_available():\n DISTRIBUTED_TESTS_CONFIG[\"nccl\"] = {\n \"WORLD_SIZE\": \"2\" if torch.cuda.device_count() == 2 else \"3\",\n \"TEST_REPORT_SOURCE_OVERRIDE\": \"dist-nccl\",\n }\n if dist.is_gloo_available():\n DISTRIBUTED_TESTS_CONFIG[\"gloo\"] = {\n \"WORLD_SIZE\": \"2\" if torch.cuda.device_count() == 2 else \"3\",\n \"TEST_REPORT_SOURCE_OVERRIDE\": \"dist-gloo\",\n }\n\n# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python\nSIGNALS_TO_NAMES_DICT = {\n getattr(signal, n): n for n in dir(signal) if n.startswith(\"SIG\") and \"_\" not in n\n}\n\nCPP_EXTENSIONS_ERROR = \"\"\"\nNinja (https://ninja-build.org) is required for some of the C++ extensions\ntests, but it could not be found. Install ninja with `pip install ninja`\nor `conda install ninja`. Alternatively, disable said tests with\n`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.\n\"\"\"\n\nPYTORCH_COLLECT_COVERAGE = bool(os.environ.get(\"PYTORCH_COLLECT_COVERAGE\"))\n\nENABLE_PR_HISTORY_REORDERING = bool(\n os.environ.get(\"ENABLE_PR_HISTORY_REORDERING\", \"0\") == \"1\"\n)\n\nJIT_EXECUTOR_TESTS = [\n \"test_jit_profiling\",\n \"test_jit_legacy\",\n \"test_jit_fuser_legacy\",\n]\n\nDISTRIBUTED_TESTS = [test for test in TESTS if test.startswith(\"distributed\")]\n\nTESTS_REQUIRING_LAPACK = [\n \"distributions/test_constraints\",\n \"distributions/test_distributions\",\n]\n\n# Dictionary matching test modules (in TESTS) to lists of test cases (within that test_module) that would be run when\n# options.run_specified_test_cases is enabled.\n# For example:\n# {\n# \"test_nn\": [\"test_doubletensor_avg_pool3d\", \"test_share_memory\", \"test_hook_requires_grad\"],\n# ...\n# }\n# then for test_nn.py, we would ONLY run test_doubletensor_avg_pool3d, test_share_memory, and test_hook_requires_grad.\nSPECIFIED_TEST_CASES_DICT: Dict[str, List[str]] = {}\n\n# The file from which the SPECIFIED_TEST_CASES_DICT will be filled, a CSV of test cases that would be run when\n# options.run_specified_test_cases is enabled.\nSPECIFIED_TEST_CASES_FILE: str = \".pytorch_specified_test_cases.csv\"\n\n\ndef print_to_stderr(message):\n print(message, file=sys.stderr)\n\n\ndef get_test_case_args(test_module, using_pytest) -> List[str]:\n args = []\n # if test_module not specified or specified with '__all__' then run all tests\n if (\n test_module not in SPECIFIED_TEST_CASES_DICT\n or \"__all__\" in SPECIFIED_TEST_CASES_DICT[test_module]\n ):\n return args\n\n if using_pytest:\n args.append(\"-k\")\n args.append(\" or \".join(SPECIFIED_TEST_CASES_DICT[test_module]))\n else:\n for test in SPECIFIED_TEST_CASES_DICT[test_module]:\n args.append(\"-k\")\n args.append(test)\n\n return args\n\n\ndef get_executable_command(options, allow_pytest, disable_coverage=False):\n if options.coverage and not disable_coverage:\n executable = [\"coverage\", \"run\", \"--parallel-mode\", \"--source=torch\"]\n else:\n executable = [sys.executable]\n if options.pytest:\n if allow_pytest:\n executable += [\"-m\", \"pytest\"]\n else:\n print_to_stderr(\n \"Pytest cannot be used for this test. Falling back to unittest.\"\n )\n return executable\n\n\ndef run_test(\n test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None\n):\n unittest_args = options.additional_unittest_args.copy()\n if options.verbose:\n unittest_args.append(f'-{\"v\"*options.verbose}') # in case of pytest\n if test_module in RUN_PARALLEL_BLOCKLIST:\n unittest_args = [\n arg for arg in unittest_args if not arg.startswith(\"--run-parallel\")\n ]\n if extra_unittest_args:\n assert isinstance(extra_unittest_args, list)\n unittest_args.extend(extra_unittest_args)\n\n # If using pytest, replace -f with equivalent -x\n if options.pytest:\n unittest_args = [arg if arg != \"-f\" else \"-x\" for arg in unittest_args]\n elif IS_IN_CI:\n # use the downloaded test cases configuration, not supported in pytest\n unittest_args.extend([\"--import-slow-tests\", \"--import-disabled-tests\"])\n\n # Multiprocessing related tests cannot run with coverage.\n # Tracking issue: https://github.com/pytorch/pytorch/issues/50661\n disable_coverage = (\n sys.platform == \"win32\" and test_module in WINDOWS_COVERAGE_BLOCKLIST\n )\n\n # Extra arguments are not supported with pytest\n executable = get_executable_command(\n options, allow_pytest=not extra_unittest_args, disable_coverage=disable_coverage\n )\n\n # TODO: move this logic into common_utils.py instead of passing in \"-k\" individually\n # The following logic for running specified tests will only run for non-distributed tests, as those are dispatched\n # to test_distributed and not run_test (this function)\n if options.run_specified_test_cases:\n unittest_args.extend(get_test_case_args(test_module, \"pytest\" in executable))\n\n # Can't call `python -m unittest test_*` here because it doesn't run code\n # in `if __name__ == '__main__': `. So call `python test_*.py` instead.\n argv = [test_module + \".py\"] + unittest_args\n\n command = (launcher_cmd or []) + executable + argv\n print_to_stderr(\"Executing {} ... [{}]\".format(command, datetime.now()))\n return shell(command, test_directory)\n\n\ndef test_cuda_primary_ctx(test_module, test_directory, options):\n return run_test(\n test_module, test_directory, options, extra_unittest_args=[\"--subprocess\"]\n )\n\nrun_test_with_subprocess = functools.partial(run_test, extra_unittest_args=[\"--subprocess\"])\n\n\ndef get_run_test_with_subprocess_fn():\n return lambda test_module, test_directory, options: run_test_with_subprocess(test_module, test_directory, options)\n\n\n\ndef _test_cpp_extensions_aot(test_directory, options, use_ninja):\n if use_ninja:\n try:\n cpp_extension.verify_ninja_availability()\n except RuntimeError:\n print(CPP_EXTENSIONS_ERROR)\n return 1\n\n # Wipe the build folder, if it exists already\n cpp_extensions_test_dir = os.path.join(test_directory, \"cpp_extensions\")\n cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, \"build\")\n if os.path.exists(cpp_extensions_test_build_dir):\n shutil.rmtree(cpp_extensions_test_build_dir)\n\n # Build the test cpp extensions modules\n shell_env = os.environ.copy()\n shell_env[\"USE_NINJA\"] = str(1 if use_ninja else 0)\n cmd = [sys.executable, \"setup.py\", \"install\", \"--root\", \"./install\"]\n return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)\n if return_code != 0:\n return return_code\n if sys.platform != \"win32\":\n return_code = shell(\n cmd,\n cwd=os.path.join(cpp_extensions_test_dir, \"no_python_abi_suffix_test\"),\n env=shell_env,\n )\n if return_code != 0:\n return return_code\n\n # \"install\" the test modules and run tests\n python_path = os.environ.get(\"PYTHONPATH\", \"\")\n from shutil import copyfile\n\n test_module = \"test_cpp_extensions_aot\" + (\"_ninja\" if use_ninja else \"_no_ninja\")\n copyfile(\n test_directory + \"/test_cpp_extensions_aot.py\",\n test_directory + \"/\" + test_module + \".py\",\n )\n try:\n cpp_extensions = os.path.join(test_directory, \"cpp_extensions\")\n install_directory = \"\"\n # install directory is the one that is named site-packages\n for root, directories, _ in os.walk(os.path.join(cpp_extensions, \"install\")):\n for directory in directories:\n if \"-packages\" in directory:\n install_directory = os.path.join(root, directory)\n\n assert install_directory, \"install_directory must not be empty\"\n os.environ[\"PYTHONPATH\"] = os.pathsep.join([install_directory, python_path])\n return run_test(test_module, test_directory, options)\n finally:\n os.environ[\"PYTHONPATH\"] = python_path\n if os.path.exists(test_directory + \"/\" + test_module + \".py\"):\n os.remove(test_directory + \"/\" + test_module + \".py\")\n\n\ndef test_cpp_extensions_aot_ninja(test_module, test_directory, options):\n return _test_cpp_extensions_aot(test_directory, options, use_ninja=True)\n\n\ndef test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):\n return _test_cpp_extensions_aot(test_directory, options, use_ninja=False)\n\n\ndef test_distributed(test_module, test_directory, options):\n # MPI tests are broken with Python-3.9\n mpi_available = subprocess.call(\n \"command -v mpiexec\", shell=True\n ) == 0 and sys.version_info < (3, 9)\n if options.verbose and not mpi_available:\n print_to_stderr(\"MPI not available -- MPI backend tests will be skipped\")\n config = DISTRIBUTED_TESTS_CONFIG\n for backend, env_vars in config.items():\n if sys.platform == \"win32\" and backend != \"gloo\":\n continue\n if backend == \"mpi\" and not mpi_available:\n continue\n for with_init_file in {True, False}:\n if sys.platform == \"win32\" and not with_init_file:\n continue\n tmp_dir = tempfile.mkdtemp()\n if options.verbose:\n init_str = \"with {} init_method\"\n with_init = init_str.format(\"file\" if with_init_file else \"env\")\n print_to_stderr(\n \"Running distributed tests for the {} backend {}\".format(\n backend, with_init\n )\n )\n old_environ = dict(os.environ)\n os.environ[\"TEMP_DIR\"] = tmp_dir\n os.environ[\"BACKEND\"] = backend\n os.environ[\"INIT_METHOD\"] = \"env://\"\n os.environ.update(env_vars)\n if with_init_file:\n if test_module == \"test_distributed_spawn\":\n init_method = f\"{FILE_SCHEMA}{tmp_dir}/\"\n else:\n init_method = f\"{FILE_SCHEMA}{tmp_dir}/shared_init_file\"\n os.environ[\"INIT_METHOD\"] = init_method\n try:\n os.mkdir(os.path.join(tmp_dir, \"barrier\"))\n os.mkdir(os.path.join(tmp_dir, \"test_dir\"))\n if backend == \"mpi\":\n # test mpiexec for --noprefix option\n with open(os.devnull, \"w\") as devnull:\n allowrunasroot_opt = (\n \"--allow-run-as-root\"\n if subprocess.call(\n 'mpiexec --allow-run-as-root -n 1 bash -c \"\"',\n shell=True,\n stdout=devnull,\n stderr=subprocess.STDOUT,\n )\n == 0\n else \"\"\n )\n noprefix_opt = (\n \"--noprefix\"\n if subprocess.call(\n f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c \"\"',\n shell=True,\n stdout=devnull,\n stderr=subprocess.STDOUT,\n )\n == 0\n else \"\"\n )\n\n mpiexec = [\"mpiexec\", \"-n\", \"3\", noprefix_opt, allowrunasroot_opt]\n\n return_code = run_test(\n test_module, test_directory, options, launcher_cmd=mpiexec\n )\n else:\n return_code = run_test(test_module, test_directory, options, extra_unittest_args=[\"--subprocess\"])\n if return_code != 0:\n return return_code\n finally:\n shutil.rmtree(tmp_dir)\n os.environ.clear()\n os.environ.update(old_environ)\n return 0\n\n\nCUSTOM_HANDLERS = {\n \"test_cuda_primary_ctx\": test_cuda_primary_ctx,\n \"test_cpp_extensions_aot_no_ninja\": test_cpp_extensions_aot_no_ninja,\n \"test_cpp_extensions_aot_ninja\": test_cpp_extensions_aot_ninja,\n \"distributed/test_distributed_spawn\": test_distributed,\n \"distributed/test_c10d_nccl\": get_run_test_with_subprocess_fn(),\n \"distributed/test_c10d_gloo\": get_run_test_with_subprocess_fn(),\n \"distributed/test_c10d_common\": get_run_test_with_subprocess_fn(),\n \"distributed/test_c10d_spawn_gloo\": get_run_test_with_subprocess_fn(),\n \"distributed/test_c10d_spawn_nccl\": get_run_test_with_subprocess_fn(),\n \"distributed/test_store\": get_run_test_with_subprocess_fn(),\n \"distributed/test_pg_wrapper\": get_run_test_with_subprocess_fn(),\n \"distributed/rpc/test_faulty_agent\": get_run_test_with_subprocess_fn(),\n \"distributed/rpc/test_tensorpipe_agent\": get_run_test_with_subprocess_fn(),\n \"distributed/rpc/test_share_memory\": get_run_test_with_subprocess_fn(),\n \"distributed/rpc/cuda/test_tensorpipe_agent\": get_run_test_with_subprocess_fn(),\n}\n\ndef parse_test_module(test):\n return test.split(\".\")[0]\n\n\nclass TestChoices(list):\n def __init__(self, *args, **kwargs):\n super(TestChoices, self).__init__(args[0])\n\n def __contains__(self, item):\n return list.__contains__(self, parse_test_module(item))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Run the PyTorch unit test suite\",\n epilog=\"where TESTS is any of: {}\".format(\", \".join(TESTS)),\n formatter_class=argparse.RawTextHelpFormatter,\n parents=[common_parser]\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"print verbose information and test-by-test results\",\n )\n parser.add_argument(\"--jit\", \"--jit\", action=\"store_true\", help=\"run all jit tests\")\n parser.add_argument(\n \"--distributed-tests\",\n \"--distributed-tests\",\n action=\"store_true\",\n help=\"run all distributed tests\",\n )\n parser.add_argument(\n \"-core\",\n \"--core\",\n action=\"store_true\",\n help=\"Only run core tests, or tests that validate PyTorch's ops, modules,\"\n \"and autograd. They are defined by CORE_TEST_LIST.\"\n )\n parser.add_argument(\n \"-pt\",\n \"--pytest\",\n action=\"store_true\",\n help=\"If true, use `pytest` to execute the tests. E.g., this runs \"\n \"TestTorch with pytest in verbose and coverage mode: \"\n \"python run_test.py -vci torch -pt\",\n )\n parser.add_argument(\n \"-c\",\n \"--coverage\",\n action=\"store_true\",\n help=\"enable coverage\",\n default=PYTORCH_COLLECT_COVERAGE,\n )\n parser.add_argument(\n \"-i\",\n \"--include\",\n nargs=\"+\",\n choices=TestChoices(TESTS),\n default=TESTS,\n metavar=\"TESTS\",\n help=\"select a set of tests to include (defaults to ALL tests).\"\n \" tests must be a part of the TESTS list defined in run_test.py\",\n )\n parser.add_argument(\n \"-x\",\n \"--exclude\",\n nargs=\"+\",\n choices=TESTS,\n metavar=\"TESTS\",\n default=[],\n help=\"select a set of tests to exclude\",\n )\n parser.add_argument(\n \"-f\",\n \"--first\",\n choices=TESTS,\n metavar=\"TESTS\",\n help=\"select the test to start from (excludes previous tests)\",\n )\n parser.add_argument(\n \"-l\",\n \"--last\",\n choices=TESTS,\n metavar=\"TESTS\",\n help=\"select the last test to run (excludes following tests)\",\n )\n parser.add_argument(\n \"--bring-to-front\",\n nargs=\"+\",\n choices=TestChoices(TESTS),\n default=[],\n metavar=\"TESTS\",\n help=\"select a set of tests to run first. This can be used in situations\"\n \" where you want to run all tests, but care more about some set, \"\n \"e.g. after making a change to a specific component\",\n )\n parser.add_argument(\n \"--ignore-win-blocklist\",\n action=\"store_true\",\n help=\"always run blocklisted windows tests\",\n )\n # NS: Disable target determination until it can be made more reliable\n # parser.add_argument(\n # \"--determine-from\",\n # help=\"File of affected source filenames to determine which tests to run.\",\n # )\n parser.add_argument(\n \"--continue-through-error\",\n action=\"store_true\",\n help=\"Runs the full test suite despite one of the tests failing\",\n default=strtobool(os.environ.get(\"CONTINUE_THROUGH_ERROR\", \"False\")),\n )\n parser.add_argument(\n \"additional_unittest_args\",\n nargs=\"*\",\n help=\"additional arguments passed through to unittest, e.g., \"\n \"python run_test.py -i sparse -- TestSparse.test_factory_size_check\",\n )\n parser.add_argument(\n \"--export-past-test-times\",\n nargs=\"?\",\n type=str,\n const=TEST_TIMES_FILE,\n help=\"dumps test times from previous S3 stats into a file, format JSON\",\n )\n parser.add_argument(\n \"--shard\",\n nargs=2,\n type=int,\n help=\"runs a shard of the tests (taking into account other selections), e.g., \"\n \"--shard 2 3 will break up the selected tests into 3 shards and run the tests \"\n \"in the 2nd shard (the first number should not exceed the second)\",\n )\n parser.add_argument(\n \"--exclude-jit-executor\",\n action=\"store_true\",\n help=\"exclude tests that are run for a specific jit config\",\n )\n parser.add_argument(\n \"--exclude-distributed-tests\",\n action=\"store_true\",\n help=\"exclude distributed tests\",\n )\n parser.add_argument(\n \"--run-specified-test-cases\",\n nargs=\"?\",\n type=str,\n const=SPECIFIED_TEST_CASES_FILE,\n help=\"load specified test cases file dumped from previous OSS CI stats, format CSV. \"\n \" If all test cases should run for a <test_module> please add a single row: \\n\"\n \" test_filename,test_case_name\\n\"\n \" ...\\n\"\n \" <test_module>,__all__\\n\"\n \" ...\\n\"\n 'how we use the stats will be based on option \"--use-specified-test-cases-by\".',\n )\n parser.add_argument(\n \"--use-specified-test-cases-by\",\n type=str,\n choices=[\"include\", \"bring-to-front\"],\n default=\"include\",\n help='used together with option \"--run-specified-test-cases\". When specified test case '\n \"file is set, this option allows the user to control whether to only run the specified test \"\n \"modules or to simply bring the specified modules to front and also run the remaining \"\n \"modules. Note: regardless of this option, we will only run the specified test cases \"\n \" within a specified test module. For unspecified test modules with the bring-to-front \"\n \"option, all test cases will be run, as one may expect.\",\n )\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Only list the test that will run.\",\n )\n return parser.parse_args()\n\n\ndef find_test_index(test, selected_tests, find_last_index=False):\n \"\"\"Find the index of the first or last occurrence of a given test/test module in the list of selected tests.\n\n This function is used to determine the indices when slicing the list of selected tests when\n ``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.\n\n :attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests\n as part of the same test module, e.g.:\n\n ```\n selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',\n 'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']\n ```\n\n If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.\n If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.\n\n Args:\n test (str): Name of test to lookup\n selected_tests (list): List of tests\n find_last_index (bool, optional): should we lookup the index of first or last\n occurrence (first is default)\n\n Returns:\n index of the first or last occurrence of the given test\n \"\"\"\n idx = 0\n found_idx = -1\n for t in selected_tests:\n if t.startswith(test):\n found_idx = idx\n if not find_last_index:\n break\n idx += 1\n return found_idx\n\n\ndef exclude_tests(exclude_list, selected_tests, exclude_message=None):\n for exclude_test in exclude_list:\n tests_copy = selected_tests[:]\n for test in tests_copy:\n if test.startswith(exclude_test):\n if exclude_message is not None:\n print_to_stderr(\"Excluding {} {}\".format(test, exclude_message))\n selected_tests.remove(test)\n return selected_tests\n\n\ndef get_selected_tests(options):\n # First make sure run specific test cases options are processed.\n if options.run_specified_test_cases:\n if options.use_specified_test_cases_by == \"include\":\n options.include = list(SPECIFIED_TEST_CASES_DICT.keys())\n elif options.use_specified_test_cases_by == \"bring-to-front\":\n options.bring_to_front = list(SPECIFIED_TEST_CASES_DICT.keys())\n\n selected_tests = options.include\n\n # filter if there's JIT only and distributed only test options\n if options.jit:\n selected_tests = list(\n filter(lambda test_name: \"jit\" in test_name, selected_tests)\n )\n\n if options.distributed_tests:\n selected_tests = list(\n filter(lambda test_name: test_name in DISTRIBUTED_TESTS, selected_tests)\n )\n\n # Filter to only run core tests when --core option is specified\n if options.core:\n selected_tests = list(\n filter(lambda test_name: test_name in CORE_TEST_LIST, selected_tests)\n )\n\n # process reordering\n if options.bring_to_front:\n to_front = set(options.bring_to_front)\n selected_tests = options.bring_to_front + list(\n filter(lambda name: name not in to_front, selected_tests)\n )\n\n if options.first:\n first_index = find_test_index(options.first, selected_tests)\n selected_tests = selected_tests[first_index:]\n\n if options.last:\n last_index = find_test_index(options.last, selected_tests, find_last_index=True)\n selected_tests = selected_tests[: last_index + 1]\n\n # process exclusion\n if options.exclude_jit_executor:\n options.exclude.extend(JIT_EXECUTOR_TESTS)\n\n if options.exclude_distributed_tests:\n options.exclude.extend(DISTRIBUTED_TESTS)\n\n # these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375\n if torch.version.cuda is not None and LooseVersion(torch.version.cuda) == \"11.6\":\n options.exclude.extend([\"distributions/test_constraints\"])\n\n selected_tests = exclude_tests(options.exclude, selected_tests)\n\n if sys.platform == \"win32\" and not options.ignore_win_blocklist:\n target_arch = os.environ.get(\"VSCMD_ARG_TGT_ARCH\")\n if target_arch != \"x64\":\n WINDOWS_BLOCKLIST.append(\"cpp_extensions_aot_no_ninja\")\n WINDOWS_BLOCKLIST.append(\"cpp_extensions_aot_ninja\")\n WINDOWS_BLOCKLIST.append(\"cpp_extensions_jit\")\n WINDOWS_BLOCKLIST.append(\"jit\")\n WINDOWS_BLOCKLIST.append(\"jit_fuser\")\n\n # This is exception that's caused by this issue https://github.com/pytorch/pytorch/issues/69460\n # This below code should be removed once this issue is solved\n if torch.version.cuda is not None and LooseVersion(torch.version.cuda) >= \"11.5\":\n WINDOWS_BLOCKLIST.append(\"test_cpp_extensions_aot\")\n WINDOWS_BLOCKLIST.append(\"test_cpp_extensions_aot_ninja\")\n WINDOWS_BLOCKLIST.append(\"test_cpp_extensions_aot_no_ninja\")\n\n selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, \"on Windows\")\n\n elif TEST_WITH_ROCM:\n selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, \"on ROCm\")\n\n # sharding\n if options.shard:\n assert len(options.shard) == 2, \"Unexpected shard format\"\n assert min(options.shard) > 0, \"Shards must be positive numbers\"\n which_shard, num_shards = options.shard\n assert (\n which_shard <= num_shards\n ), \"Selected shard must be less than or equal to total number of shards\"\n assert num_shards <= len(\n selected_tests\n ), f\"Number of shards must be less than {len(selected_tests)}\"\n # TODO: fix this to use test_times_filename, but currently this is not working\n # because setting the export arg immeidately halts the test execution.\n selected_tests = get_shard_based_on_S3(\n which_shard, num_shards, selected_tests, TEST_TIMES_FILE\n )\n\n # skip all distributed tests if distributed package is not available.\n if not dist.is_available():\n selected_tests = exclude_tests(DISTRIBUTED_TESTS, selected_tests,\n \"PyTorch is built without distributed support.\")\n\n # skip tests that require LAPACK when it's not available\n if not torch._C.has_lapack:\n selected_tests = exclude_tests(TESTS_REQUIRING_LAPACK, selected_tests,\n \"PyTorch is built without LAPACK support.\")\n\n return selected_tests\n\n\ndef run_test_module(test: str, test_directory: str, options) -> Optional[str]:\n test_module = parse_test_module(test)\n\n # Printing the date here can help diagnose which tests are slow\n print_to_stderr(\"Running {} ... [{}]\".format(test, datetime.now()))\n handler = CUSTOM_HANDLERS.get(test_module, run_test)\n return_code = handler(test_module, test_directory, options)\n assert isinstance(return_code, int) and not isinstance(\n return_code, bool\n ), \"Return code should be an integer\"\n if return_code == 0:\n return None\n\n message = f\"{test} failed!\"\n if return_code < 0:\n # subprocess.Popen returns the child process' exit signal as\n # return code -N, where N is the signal number.\n signal_name = SIGNALS_TO_NAMES_DICT[-return_code]\n message += f\" Received signal: {signal_name}\"\n return message\n\n\ndef main():\n options = parse_args()\n\n # TODO: move this export & download function in tools/ folder\n test_times_filename = options.export_past_test_times\n if test_times_filename:\n print(\n f\"Exporting past test times from S3 to {test_times_filename}, no tests will be run.\"\n )\n export_S3_test_times(test_times_filename)\n return\n\n specified_test_cases_filename = options.run_specified_test_cases\n if specified_test_cases_filename:\n print(\n f\"Loading specified test cases to run from {specified_test_cases_filename}.\"\n )\n global SPECIFIED_TEST_CASES_DICT\n SPECIFIED_TEST_CASES_DICT = get_specified_test_cases(\n specified_test_cases_filename, TESTS\n )\n\n test_directory = str(REPO_ROOT / \"test\")\n selected_tests = get_selected_tests(options)\n\n if options.verbose:\n print_to_stderr(\"Selected tests:\\n {}\".format(\"\\n \".join(selected_tests)))\n\n if options.dry_run:\n return\n\n if options.coverage and not PYTORCH_COLLECT_COVERAGE:\n shell([\"coverage\", \"erase\"])\n\n # NS: Disable target determination until it can be made more reliable\n # if options.determine_from is not None and os.path.exists(options.determine_from):\n # slow_tests = get_slow_tests_based_on_S3(\n # TESTS, TARGET_DET_LIST, SLOW_TEST_THRESHOLD\n # )\n # print_to_stderr(\n # \"Added the following tests to target_det tests as calculated based on S3:\"\n # )\n # print_to_stderr(slow_tests)\n # with open(options.determine_from, \"r\") as fh:\n # touched_files = [\n # os.path.normpath(name.strip())\n # for name in fh.read().split(\"\\n\")\n # if len(name.strip()) > 0\n # ]\n # # HACK: Ensure the 'test' paths can be traversed by Modulefinder\n # sys.path.append(test_directory)\n # selected_tests = [\n # test\n # for test in selected_tests\n # if should_run_test(\n # TARGET_DET_LIST + slow_tests, test, touched_files, options\n # )\n # ]\n # sys.path.remove(test_directory)\n\n if IS_IN_CI:\n selected_tests = get_reordered_tests(\n selected_tests, ENABLE_PR_HISTORY_REORDERING\n )\n # downloading test cases configuration to local environment\n get_test_case_configs(dirpath=test_directory)\n\n has_failed = False\n failure_messages = []\n try:\n for test in selected_tests:\n options_clone = copy.deepcopy(options)\n if test in USE_PYTEST_LIST:\n options_clone.pytest = True\n err_message = run_test_module(test, test_directory, options_clone)\n if err_message is None:\n continue\n has_failed = True\n failure_messages.append(err_message)\n if not options_clone.continue_through_error:\n raise RuntimeError(err_message)\n print_to_stderr(err_message)\n finally:\n if options.coverage:\n from coverage import Coverage\n\n with set_cwd(test_directory):\n cov = Coverage()\n if PYTORCH_COLLECT_COVERAGE:\n cov.load()\n cov.combine(strict=False)\n cov.save()\n if not PYTORCH_COLLECT_COVERAGE:\n cov.html_report()\n\n if options.continue_through_error and has_failed:\n for err in failure_messages:\n print_to_stderr(err)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.distributed.is_mpi_available", "torch.distributed.is_available", "torch.utils.cpp_extension.verify_ninja_availability", "torch.testing._internal.common_utils.set_cwd", "torch.cuda.device_count", "torch.distributed.is_gloo_available", "torch.distributed.is_nccl_available", "torch.testing._internal.common_utils.shell" ] ]
danielkentwood/fireTS
[ "3b00ae932e13997a0d069515273c09fc24e0593d" ]
[ "fireTS/core.py" ]
[ "import numpy as np\r\nfrom sklearn.base import BaseEstimator, RegressorMixin\r\nfrom sklearn.utils.validation import check_X_y\r\nfrom fireTS.utils import shift, MetaLagFeatureProcessor\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\n\r\nclass TimeSeriesRegressor(BaseEstimator, RegressorMixin):\r\n \"\"\"\r\n TimeSeriesRegressor creates a time series model based on a\r\n general-purpose regression model defined in base_estimator.\r\n base_estimator must be a model which implements the scikit-learn APIs.\r\n \"\"\"\r\n\r\n def __init__(self, base_estimator, **base_params):\r\n self.base_estimator = base_estimator.set_params(**base_params)\r\n\r\n def set_params(self, **params):\r\n for param, value in params.items():\r\n if param in self.get_params():\r\n super(TimeSeriesRegressor, self).set_params(**{param: value})\r\n else:\r\n self.base_estimator.set_params(**{param: value})\r\n return self\r\n\r\n\r\nclass GeneralAutoRegressor(TimeSeriesRegressor, RegressorMixin):\r\n r\"\"\"\r\n The general auto regression model can be written in the following form:\r\n\r\n .. math::\r\n y(t + k) &=& f(y(t), ..., y(t-p+1), \\\\\r\n & & x_1(t - d_1), ..., x_1(t-d_1-q_1+1), \\\\\r\n & & ..., x_m(t - d_1), ..., x_m(t - d_m - q_m + 1)) + e(t)\r\n :label: gar\r\n\r\n :param object base_estimator: an estimator object that implements the\r\n scikit-learn API (fit, and predict). The\r\n estimator will be used to fit the function\r\n :math:`f` in equation :eq:`gar`.\r\n :param int auto_order: the autoregression order :math:`p` in equation\r\n :eq:`gar`.\r\n :param list exog_order: the exogenous input order, a list of integers\r\n representing the order for each exogenous input,\r\n i.e. :math:`[q_1, q_2, ..., q_m]` in equation\r\n :eq:`gar`.\r\n :param list exog_delay: the delays of the exogenous inputs, a list of\r\n integers representing the delay of each exogenous\r\n input, i.e. :math:`[d_1, d_2, ..., d_m]` in\r\n equation :eq:`gar`. By default, all the delays are\r\n set to 0.\r\n :param int pred_step: the prediction step :math:`k` in equation :eq:`gar`.\r\n By default, it is set to 1.\r\n :param dict base_params: other keyword arguments for base_estimator.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n base_estimator,\r\n auto_order,\r\n exog_order,\r\n exog_delay=None,\r\n pred_step=1,\r\n **base_params):\r\n super(GeneralAutoRegressor, self).__init__(base_estimator,\r\n **base_params)\r\n self.auto_order = auto_order\r\n self.exog_order = exog_order\r\n if exog_delay is None:\r\n exog_delay = [0] * len(exog_order)\r\n if len(exog_delay) != len(exog_order):\r\n raise ValueError(\r\n 'The length of exog_delay must be the same as the length of exog_order.'\r\n )\r\n self.exog_delay = exog_delay\r\n self.num_exog_inputs = len(exog_order)\r\n self.pred_step = pred_step\r\n\r\n def fit(self, X, y, **params):\r\n \"\"\"\r\n Create lag features and fit the base_estimator.\r\n\r\n :param array-like X: exogenous input time series, shape = (n_samples,\r\n n_exog_inputs)\r\n :param array-like y: target time series to predict, shape = (n_samples)\r\n \"\"\"\r\n X, y = self._check_and_preprocess_X_y(X, y)\r\n features, target = self._preprocess_data(X, y)\r\n self.base_estimator.fit(features, target, **params)\r\n\r\n def _preprocess_data(self, X, y):\r\n \"\"\"\r\n Helper function to prepare the data for base_estimator.\r\n \"\"\"\r\n p = self._get_lag_feature_processor(X, y)\r\n features = p.generate_lag_features()\r\n target = shift(y, -self.pred_step)\r\n\r\n # Remove NaN introduced by shift\r\n all_data = np.concatenate([target.reshape(-1, 1), features], axis=1)\r\n mask = np.isnan(all_data).any(axis=1)\r\n features, target = features[~mask], target[~mask]\r\n return features, target\r\n\r\n def _get_lag_feature_processor(self, X, y):\r\n return MetaLagFeatureProcessor(X, y, self.auto_order, self.exog_order,\r\n self.exog_delay)\r\n\r\n def grid_search(self, X, y, para_grid, **params):\r\n \"\"\"\r\n Perform grid search on the base_estimator. The function first generates\r\n the lag features and predicting targets, and then calls\r\n ``GridSearchCV`` in scikit-learn package.\r\n\r\n :param array-like X: exogenous input time series, shape = (n_samples,\r\n n_exog_inputs)\r\n :param array-like y: target time series to predict, shape = (n_samples)\r\n :param dict para_grid: use the same format in ``GridSearchCV`` in\r\n scikit-learn package.\r\n :param dict params: other keyword arguments that can be passed into\r\n ``GridSearchCV`` in scikit-learn package.\r\n \"\"\"\r\n grid = GridSearchCV(self.base_estimator, para_grid, **params)\r\n X, y = self._check_and_preprocess_X_y(X, y)\r\n features, target = self._preprocess_data(X, y)\r\n grid.fit(features, target)\r\n self.set_params(**grid.best_params_)\r\n\r\n def _predictNA(self, Xdata):\r\n # Xdata contains nan introduced by shift\r\n ypred = np.empty(Xdata.shape[0]) * np.nan\r\n mask = np.isnan(Xdata).any(axis=1)\r\n X2pred = Xdata[~mask]\r\n ypred[~mask] = self.base_estimator.predict(X2pred)\r\n return ypred\r\n\r\n def _check_and_preprocess_X_y(self, X, y):\r\n min_samples_required = max(self.auto_order, \r\n np.max(np.array(self.exog_delay) + np.array(self.exog_order))) - 1\r\n X, y = check_X_y(X, y, ensure_min_samples=min_samples_required)\r\n if len(self.exog_order) != X.shape[1]:\r\n raise ValueError(\r\n 'The number of columns of X must be the same as the length of exog_order.'\r\n )\r\n return X, y\r\n" ]
[ [ "numpy.empty", "sklearn.model_selection.GridSearchCV", "numpy.isnan", "numpy.array", "sklearn.utils.validation.check_X_y" ] ]
havanagrawal/ml-from-scratch
[ "1f5aacc3bb8f831dadf5f27cf8d6fa867b4ec3ab" ]
[ "examples/spam_trainer.py" ]
[ "import os\nimport sys\n\nimport logging\nlogging.basicConfig(format='%(asctime)s %(levelname)-8s [%(process)d] %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')\n\nimport numpy as np\n\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score\n\n_curdir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(_curdir + \"/..\")\n\nfrom classifiers import FGMClassifier\nimport datasets\n\ndef train_spam_untuned(classifier, X_train, X_test, y_train, y_test):\n clf = FGMClassifier(classifier=classifier, lmbda=0.1, max_iter=20, learning_rate='adaptive', eta=1)\n return fit_predict(clf, X_train, X_test, y_train, y_test)\n\ndef train_spam_tuned(X_train, X_test, y_train, y_test):\n param_grid = {\n 'classifier': ['logistic', 'svm'],\n 'lmbda': np.linspace(0, 1, 3)\n }\n clf = GridSearchCV(FGMClassifier(max_iter=10, verbose=True), param_grid, verbose=2)\n\n return fit_predict(clf, X_train, X_test, y_train, y_test)\n\n\ndef fit_predict(clf, X_train, X_test, y_train, y_test):\n clf.fit(X_train, y_train)\n\n logging.info(\"Predicting...\")\n logging.info(\"Training accuracy: {}\".format(clf.score(X_train, y_train)))\n logging.info(\"Test accuracy: {}\".format(clf.score(X_test, y_test)))\n\n return clf\n\ndef main():\n logging.info(\"Loading data...\")\n X_train, X_test, y_train, y_test = datasets.load_spam(standardized=True, with_intercept=True)\n\n logging.info(\"Training logistic without tuning lambda...\")\n train_spam_untuned('logistic', X_train, X_test, y_train, y_test)\n\n logging.info(\"Training svm without tuning lambda...\")\n train_spam_untuned('svm', X_train, X_test, y_train, y_test)\n\n logging.info(\"Training with GridSearchCV...\")\n clf = train_spam_tuned(X_train, X_test, y_train, y_test)\n\n logging.info(\"Best params: {}\".format(clf.best_params_))\n logging.info(\"Best estimator: {}\".format(clf.best_estimator_))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.linspace" ] ]
obulrdy6881/Drowsinss
[ "61cb9281d7dd22aee282b517e2fbf500f0ff9935" ]
[ "my_env/Lib/site-packages/sklearn/cluster/_birch.py" ]
[ "# Authors: Manoj Kumar <[email protected]>\r\n# Alexandre Gramfort <[email protected]>\r\n# Joel Nothman <[email protected]>\r\n# License: BSD 3 clause\r\n\r\nimport warnings\r\nimport numbers\r\nimport numpy as np\r\nfrom scipy import sparse\r\nfrom math import sqrt\r\n\r\nfrom ..metrics import pairwise_distances_argmin\r\nfrom ..metrics.pairwise import euclidean_distances\r\nfrom ..base import TransformerMixin, ClusterMixin, BaseEstimator\r\nfrom ..utils import check_array\r\nfrom ..utils.extmath import row_norms\r\nfrom ..utils.validation import check_is_fitted, _deprecate_positional_args\r\nfrom ..exceptions import ConvergenceWarning\r\nfrom . import AgglomerativeClustering\r\n\r\n\r\ndef _iterate_sparse_X(X):\r\n \"\"\"This little hack returns a densified row when iterating over a sparse\r\n matrix, instead of constructing a sparse matrix for every row that is\r\n expensive.\r\n \"\"\"\r\n n_samples = X.shape[0]\r\n X_indices = X.indices\r\n X_data = X.data\r\n X_indptr = X.indptr\r\n\r\n for i in range(n_samples):\r\n row = np.zeros(X.shape[1])\r\n startptr, endptr = X_indptr[i], X_indptr[i + 1]\r\n nonzero_indices = X_indices[startptr:endptr]\r\n row[nonzero_indices] = X_data[startptr:endptr]\r\n yield row\r\n\r\n\r\ndef _split_node(node, threshold, branching_factor):\r\n \"\"\"The node has to be split if there is no place for a new subcluster\r\n in the node.\r\n 1. Two empty nodes and two empty subclusters are initialized.\r\n 2. The pair of distant subclusters are found.\r\n 3. The properties of the empty subclusters and nodes are updated\r\n according to the nearest distance between the subclusters to the\r\n pair of distant subclusters.\r\n 4. The two nodes are set as children to the two subclusters.\r\n \"\"\"\r\n new_subcluster1 = _CFSubcluster()\r\n new_subcluster2 = _CFSubcluster()\r\n new_node1 = _CFNode(\r\n threshold=threshold, branching_factor=branching_factor,\r\n is_leaf=node.is_leaf,\r\n n_features=node.n_features)\r\n new_node2 = _CFNode(\r\n threshold=threshold, branching_factor=branching_factor,\r\n is_leaf=node.is_leaf,\r\n n_features=node.n_features)\r\n new_subcluster1.child_ = new_node1\r\n new_subcluster2.child_ = new_node2\r\n\r\n if node.is_leaf:\r\n if node.prev_leaf_ is not None:\r\n node.prev_leaf_.next_leaf_ = new_node1\r\n new_node1.prev_leaf_ = node.prev_leaf_\r\n new_node1.next_leaf_ = new_node2\r\n new_node2.prev_leaf_ = new_node1\r\n new_node2.next_leaf_ = node.next_leaf_\r\n if node.next_leaf_ is not None:\r\n node.next_leaf_.prev_leaf_ = new_node2\r\n\r\n dist = euclidean_distances(\r\n node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)\r\n n_clusters = dist.shape[0]\r\n\r\n farthest_idx = np.unravel_index(\r\n dist.argmax(), (n_clusters, n_clusters))\r\n node1_dist, node2_dist = dist[(farthest_idx,)]\r\n\r\n node1_closer = node1_dist < node2_dist\r\n for idx, subcluster in enumerate(node.subclusters_):\r\n if node1_closer[idx]:\r\n new_node1.append_subcluster(subcluster)\r\n new_subcluster1.update(subcluster)\r\n else:\r\n new_node2.append_subcluster(subcluster)\r\n new_subcluster2.update(subcluster)\r\n return new_subcluster1, new_subcluster2\r\n\r\n\r\nclass _CFNode:\r\n \"\"\"Each node in a CFTree is called a CFNode.\r\n\r\n The CFNode can have a maximum of branching_factor\r\n number of CFSubclusters.\r\n\r\n Parameters\r\n ----------\r\n threshold : float\r\n Threshold needed for a new subcluster to enter a CFSubcluster.\r\n\r\n branching_factor : int\r\n Maximum number of CF subclusters in each node.\r\n\r\n is_leaf : bool\r\n We need to know if the CFNode is a leaf or not, in order to\r\n retrieve the final subclusters.\r\n\r\n n_features : int\r\n The number of features.\r\n\r\n Attributes\r\n ----------\r\n subclusters_ : list\r\n List of subclusters for a particular CFNode.\r\n\r\n prev_leaf_ : _CFNode\r\n Useful only if is_leaf is True.\r\n\r\n next_leaf_ : _CFNode\r\n next_leaf. Useful only if is_leaf is True.\r\n the final subclusters.\r\n\r\n init_centroids_ : ndarray of shape (branching_factor + 1, n_features)\r\n Manipulate ``init_centroids_`` throughout rather than centroids_ since\r\n the centroids are just a view of the ``init_centroids_`` .\r\n\r\n init_sq_norm_ : ndarray of shape (branching_factor + 1,)\r\n manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.\r\n\r\n centroids_ : ndarray of shape (branching_factor + 1, n_features)\r\n View of ``init_centroids_``.\r\n\r\n squared_norm_ : ndarray of shape (branching_factor + 1,)\r\n View of ``init_sq_norm_``.\r\n\r\n \"\"\"\r\n def __init__(self, *, threshold, branching_factor, is_leaf, n_features):\r\n self.threshold = threshold\r\n self.branching_factor = branching_factor\r\n self.is_leaf = is_leaf\r\n self.n_features = n_features\r\n\r\n # The list of subclusters, centroids and squared norms\r\n # to manipulate throughout.\r\n self.subclusters_ = []\r\n self.init_centroids_ = np.zeros((branching_factor + 1, n_features))\r\n self.init_sq_norm_ = np.zeros((branching_factor + 1))\r\n self.squared_norm_ = []\r\n self.prev_leaf_ = None\r\n self.next_leaf_ = None\r\n\r\n def append_subcluster(self, subcluster):\r\n n_samples = len(self.subclusters_)\r\n self.subclusters_.append(subcluster)\r\n self.init_centroids_[n_samples] = subcluster.centroid_\r\n self.init_sq_norm_[n_samples] = subcluster.sq_norm_\r\n\r\n # Keep centroids and squared norm as views. In this way\r\n # if we change init_centroids and init_sq_norm_, it is\r\n # sufficient,\r\n self.centroids_ = self.init_centroids_[:n_samples + 1, :]\r\n self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]\r\n\r\n def update_split_subclusters(self, subcluster,\r\n new_subcluster1, new_subcluster2):\r\n \"\"\"Remove a subcluster from a node and update it with the\r\n split subclusters.\r\n \"\"\"\r\n ind = self.subclusters_.index(subcluster)\r\n self.subclusters_[ind] = new_subcluster1\r\n self.init_centroids_[ind] = new_subcluster1.centroid_\r\n self.init_sq_norm_[ind] = new_subcluster1.sq_norm_\r\n self.append_subcluster(new_subcluster2)\r\n\r\n def insert_cf_subcluster(self, subcluster):\r\n \"\"\"Insert a new subcluster into the node.\"\"\"\r\n if not self.subclusters_:\r\n self.append_subcluster(subcluster)\r\n return False\r\n\r\n threshold = self.threshold\r\n branching_factor = self.branching_factor\r\n # We need to find the closest subcluster among all the\r\n # subclusters so that we can insert our new subcluster.\r\n dist_matrix = np.dot(self.centroids_, subcluster.centroid_)\r\n dist_matrix *= -2.\r\n dist_matrix += self.squared_norm_\r\n closest_index = np.argmin(dist_matrix)\r\n closest_subcluster = self.subclusters_[closest_index]\r\n\r\n # If the subcluster has a child, we need a recursive strategy.\r\n if closest_subcluster.child_ is not None:\r\n split_child = closest_subcluster.child_.insert_cf_subcluster(\r\n subcluster)\r\n\r\n if not split_child:\r\n # If it is determined that the child need not be split, we\r\n # can just update the closest_subcluster\r\n closest_subcluster.update(subcluster)\r\n self.init_centroids_[closest_index] = \\\r\n self.subclusters_[closest_index].centroid_\r\n self.init_sq_norm_[closest_index] = \\\r\n self.subclusters_[closest_index].sq_norm_\r\n return False\r\n\r\n # things not too good. we need to redistribute the subclusters in\r\n # our child node, and add a new subcluster in the parent\r\n # subcluster to accommodate the new child.\r\n else:\r\n new_subcluster1, new_subcluster2 = _split_node(\r\n closest_subcluster.child_, threshold, branching_factor)\r\n self.update_split_subclusters(\r\n closest_subcluster, new_subcluster1, new_subcluster2)\r\n\r\n if len(self.subclusters_) > self.branching_factor:\r\n return True\r\n return False\r\n\r\n # good to go!\r\n else:\r\n merged = closest_subcluster.merge_subcluster(\r\n subcluster, self.threshold)\r\n if merged:\r\n self.init_centroids_[closest_index] = \\\r\n closest_subcluster.centroid_\r\n self.init_sq_norm_[closest_index] = \\\r\n closest_subcluster.sq_norm_\r\n return False\r\n\r\n # not close to any other subclusters, and we still\r\n # have space, so add.\r\n elif len(self.subclusters_) < self.branching_factor:\r\n self.append_subcluster(subcluster)\r\n return False\r\n\r\n # We do not have enough space nor is it closer to an\r\n # other subcluster. We need to split.\r\n else:\r\n self.append_subcluster(subcluster)\r\n return True\r\n\r\n\r\nclass _CFSubcluster:\r\n \"\"\"Each subcluster in a CFNode is called a CFSubcluster.\r\n\r\n A CFSubcluster can have a CFNode has its child.\r\n\r\n Parameters\r\n ----------\r\n linear_sum : ndarray of shape (n_features,), default=None\r\n Sample. This is kept optional to allow initialization of empty\r\n subclusters.\r\n\r\n Attributes\r\n ----------\r\n n_samples_ : int\r\n Number of samples that belong to each subcluster.\r\n\r\n linear_sum_ : ndarray\r\n Linear sum of all the samples in a subcluster. Prevents holding\r\n all sample data in memory.\r\n\r\n squared_sum_ : float\r\n Sum of the squared l2 norms of all samples belonging to a subcluster.\r\n\r\n centroid_ : ndarray of shape (branching_factor + 1, n_features)\r\n Centroid of the subcluster. Prevent recomputing of centroids when\r\n ``CFNode.centroids_`` is called.\r\n\r\n child_ : _CFNode\r\n Child Node of the subcluster. Once a given _CFNode is set as the child\r\n of the _CFNode, it is set to ``self.child_``.\r\n\r\n sq_norm_ : ndarray of shape (branching_factor + 1,)\r\n Squared norm of the subcluster. Used to prevent recomputing when\r\n pairwise minimum distances are computed.\r\n \"\"\"\r\n def __init__(self, *, linear_sum=None):\r\n if linear_sum is None:\r\n self.n_samples_ = 0\r\n self.squared_sum_ = 0.0\r\n self.centroid_ = self.linear_sum_ = 0\r\n else:\r\n self.n_samples_ = 1\r\n self.centroid_ = self.linear_sum_ = linear_sum\r\n self.squared_sum_ = self.sq_norm_ = np.dot(\r\n self.linear_sum_, self.linear_sum_)\r\n self.child_ = None\r\n\r\n def update(self, subcluster):\r\n self.n_samples_ += subcluster.n_samples_\r\n self.linear_sum_ += subcluster.linear_sum_\r\n self.squared_sum_ += subcluster.squared_sum_\r\n self.centroid_ = self.linear_sum_ / self.n_samples_\r\n self.sq_norm_ = np.dot(self.centroid_, self.centroid_)\r\n\r\n def merge_subcluster(self, nominee_cluster, threshold):\r\n \"\"\"Check if a cluster is worthy enough to be merged. If\r\n yes then merge.\r\n \"\"\"\r\n new_ss = self.squared_sum_ + nominee_cluster.squared_sum_\r\n new_ls = self.linear_sum_ + nominee_cluster.linear_sum_\r\n new_n = self.n_samples_ + nominee_cluster.n_samples_\r\n new_centroid = (1 / new_n) * new_ls\r\n new_norm = np.dot(new_centroid, new_centroid)\r\n dot_product = (-2 * new_n) * new_norm\r\n sq_radius = (new_ss + dot_product) / new_n + new_norm\r\n if sq_radius <= threshold ** 2:\r\n (self.n_samples_, self.linear_sum_, self.squared_sum_,\r\n self.centroid_, self.sq_norm_) = \\\r\n new_n, new_ls, new_ss, new_centroid, new_norm\r\n return True\r\n return False\r\n\r\n @property\r\n def radius(self):\r\n \"\"\"Return radius of the subcluster\"\"\"\r\n dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)\r\n return sqrt(\r\n ((self.squared_sum_ + dot_product) / self.n_samples_) +\r\n self.sq_norm_)\r\n\r\n\r\nclass Birch(ClusterMixin, TransformerMixin, BaseEstimator):\r\n \"\"\"Implements the Birch clustering algorithm.\r\n\r\n It is a memory-efficient, online-learning algorithm provided as an\r\n alternative to :class:`MiniBatchKMeans`. It constructs a tree\r\n data structure with the cluster centroids being read off the leaf.\r\n These can be either the final cluster centroids or can be provided as input\r\n to another clustering algorithm such as :class:`AgglomerativeClustering`.\r\n\r\n Read more in the :ref:`User Guide <birch>`.\r\n\r\n .. versionadded:: 0.16\r\n\r\n Parameters\r\n ----------\r\n threshold : float, default=0.5\r\n The radius of the subcluster obtained by merging a new sample and the\r\n closest subcluster should be lesser than the threshold. Otherwise a new\r\n subcluster is started. Setting this value to be very low promotes\r\n splitting and vice-versa.\r\n\r\n branching_factor : int, default=50\r\n Maximum number of CF subclusters in each node. If a new samples enters\r\n such that the number of subclusters exceed the branching_factor then\r\n that node is split into two nodes with the subclusters redistributed\r\n in each. The parent subcluster of that node is removed and two new\r\n subclusters are added as parents of the 2 split nodes.\r\n\r\n n_clusters : int, instance of sklearn.cluster model, default=3\r\n Number of clusters after the final clustering step, which treats the\r\n subclusters from the leaves as new samples.\r\n\r\n - `None` : the final clustering step is not performed and the\r\n subclusters are returned as they are.\r\n\r\n - :mod:`sklearn.cluster` Estimator : If a model is provided, the model\r\n is fit treating the subclusters as new samples and the initial data\r\n is mapped to the label of the closest subcluster.\r\n\r\n - `int` : the model fit is :class:`AgglomerativeClustering` with\r\n `n_clusters` set to be equal to the int.\r\n\r\n compute_labels : bool, default=True\r\n Whether or not to compute labels for each fit.\r\n\r\n copy : bool, default=True\r\n Whether or not to make a copy of the given data. If set to False,\r\n the initial data will be overwritten.\r\n\r\n Attributes\r\n ----------\r\n root_ : _CFNode\r\n Root of the CFTree.\r\n\r\n dummy_leaf_ : _CFNode\r\n Start pointer to all the leaves.\r\n\r\n subcluster_centers_ : ndarray\r\n Centroids of all subclusters read directly from the leaves.\r\n\r\n subcluster_labels_ : ndarray\r\n Labels assigned to the centroids of the subclusters after\r\n they are clustered globally.\r\n\r\n labels_ : ndarray of shape (n_samples,)\r\n Array of labels assigned to the input data.\r\n if partial_fit is used instead of fit, they are assigned to the\r\n last batch of data.\r\n\r\n See Also\r\n --------\r\n\r\n MiniBatchKMeans\r\n Alternative implementation that does incremental updates\r\n of the centers' positions using mini-batches.\r\n\r\n Notes\r\n -----\r\n The tree data structure consists of nodes with each node consisting of\r\n a number of subclusters. The maximum number of subclusters in a node\r\n is determined by the branching factor. Each subcluster maintains a\r\n linear sum, squared sum and the number of samples in that subcluster.\r\n In addition, each subcluster can also have a node as its child, if the\r\n subcluster is not a member of a leaf node.\r\n\r\n For a new point entering the root, it is merged with the subcluster closest\r\n to it and the linear sum, squared sum and the number of samples of that\r\n subcluster are updated. This is done recursively till the properties of\r\n the leaf node are updated.\r\n\r\n References\r\n ----------\r\n * Tian Zhang, Raghu Ramakrishnan, Maron Livny\r\n BIRCH: An efficient data clustering method for large databases.\r\n https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf\r\n\r\n * Roberto Perdisci\r\n JBirch - Java implementation of BIRCH clustering algorithm\r\n https://code.google.com/archive/p/jbirch\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.cluster import Birch\r\n >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]\r\n >>> brc = Birch(n_clusters=None)\r\n >>> brc.fit(X)\r\n Birch(n_clusters=None)\r\n >>> brc.predict(X)\r\n array([0, 0, 0, 1, 1, 1])\r\n \"\"\"\r\n @_deprecate_positional_args\r\n def __init__(self, *, threshold=0.5, branching_factor=50, n_clusters=3,\r\n compute_labels=True, copy=True):\r\n self.threshold = threshold\r\n self.branching_factor = branching_factor\r\n self.n_clusters = n_clusters\r\n self.compute_labels = compute_labels\r\n self.copy = copy\r\n\r\n def fit(self, X, y=None):\r\n \"\"\"\r\n Build a CF Tree for the input data.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Input data.\r\n\r\n y : Ignored\r\n Not used, present here for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n self\r\n Fitted estimator.\r\n \"\"\"\r\n self.fit_, self.partial_fit_ = True, False\r\n return self._fit(X)\r\n\r\n def _fit(self, X):\r\n X = self._validate_data(X, accept_sparse='csr', copy=self.copy)\r\n threshold = self.threshold\r\n branching_factor = self.branching_factor\r\n\r\n if branching_factor <= 1:\r\n raise ValueError(\"Branching_factor should be greater than one.\")\r\n n_samples, n_features = X.shape\r\n\r\n # If partial_fit is called for the first time or fit is called, we\r\n # start a new tree.\r\n partial_fit = getattr(self, 'partial_fit_')\r\n has_root = getattr(self, 'root_', None)\r\n if getattr(self, 'fit_') or (partial_fit and not has_root):\r\n # The first root is the leaf. Manipulate this object throughout.\r\n self.root_ = _CFNode(threshold=threshold,\r\n branching_factor=branching_factor,\r\n is_leaf=True,\r\n n_features=n_features)\r\n\r\n # To enable getting back subclusters.\r\n self.dummy_leaf_ = _CFNode(threshold=threshold,\r\n branching_factor=branching_factor,\r\n is_leaf=True, n_features=n_features)\r\n self.dummy_leaf_.next_leaf_ = self.root_\r\n self.root_.prev_leaf_ = self.dummy_leaf_\r\n\r\n # Cannot vectorize. Enough to convince to use cython.\r\n if not sparse.issparse(X):\r\n iter_func = iter\r\n else:\r\n iter_func = _iterate_sparse_X\r\n\r\n for sample in iter_func(X):\r\n subcluster = _CFSubcluster(linear_sum=sample)\r\n split = self.root_.insert_cf_subcluster(subcluster)\r\n\r\n if split:\r\n new_subcluster1, new_subcluster2 = _split_node(\r\n self.root_, threshold, branching_factor)\r\n del self.root_\r\n self.root_ = _CFNode(threshold=threshold,\r\n branching_factor=branching_factor,\r\n is_leaf=False,\r\n n_features=n_features)\r\n self.root_.append_subcluster(new_subcluster1)\r\n self.root_.append_subcluster(new_subcluster2)\r\n\r\n centroids = np.concatenate([\r\n leaf.centroids_ for leaf in self._get_leaves()])\r\n self.subcluster_centers_ = centroids\r\n\r\n self._global_clustering(X)\r\n return self\r\n\r\n def _get_leaves(self):\r\n \"\"\"\r\n Retrieve the leaves of the CF Node.\r\n\r\n Returns\r\n -------\r\n leaves : list of shape (n_leaves,)\r\n List of the leaf nodes.\r\n \"\"\"\r\n leaf_ptr = self.dummy_leaf_.next_leaf_\r\n leaves = []\r\n while leaf_ptr is not None:\r\n leaves.append(leaf_ptr)\r\n leaf_ptr = leaf_ptr.next_leaf_\r\n return leaves\r\n\r\n def partial_fit(self, X=None, y=None):\r\n \"\"\"\r\n Online learning. Prevents rebuilding of CFTree from scratch.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features), \\\r\n default=None\r\n Input data. If X is not provided, only the global clustering\r\n step is done.\r\n\r\n y : Ignored\r\n Not used, present here for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n self\r\n Fitted estimator.\r\n \"\"\"\r\n self.partial_fit_, self.fit_ = True, False\r\n if X is None:\r\n # Perform just the final global clustering step.\r\n self._global_clustering()\r\n return self\r\n else:\r\n self._check_fit(X)\r\n return self._fit(X)\r\n\r\n def _check_fit(self, X):\r\n check_is_fitted(self)\r\n\r\n if (hasattr(self, 'subcluster_centers_') and\r\n X.shape[1] != self.subcluster_centers_.shape[1]):\r\n raise ValueError(\r\n \"Training data and predicted data do \"\r\n \"not have same number of features.\")\r\n\r\n def predict(self, X):\r\n \"\"\"\r\n Predict data using the ``centroids_`` of subclusters.\r\n\r\n Avoid computation of the row norms of X.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Input data.\r\n\r\n Returns\r\n -------\r\n labels : ndarray of shape(n_samples,)\r\n Labelled data.\r\n \"\"\"\r\n X = check_array(X, accept_sparse='csr')\r\n self._check_fit(X)\r\n kwargs = {'Y_norm_squared': self._subcluster_norms}\r\n return self.subcluster_labels_[\r\n pairwise_distances_argmin(X,\r\n self.subcluster_centers_,\r\n metric_kwargs=kwargs)\r\n ]\r\n\r\n def transform(self, X):\r\n \"\"\"\r\n Transform X into subcluster centroids dimension.\r\n\r\n Each dimension represents the distance from the sample point to each\r\n cluster centroid.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Input data.\r\n\r\n Returns\r\n -------\r\n X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)\r\n Transformed data.\r\n \"\"\"\r\n check_is_fitted(self)\r\n return euclidean_distances(X, self.subcluster_centers_)\r\n\r\n def _global_clustering(self, X=None):\r\n \"\"\"\r\n Global clustering for the subclusters obtained after fitting\r\n \"\"\"\r\n clusterer = self.n_clusters\r\n centroids = self.subcluster_centers_\r\n compute_labels = (X is not None) and self.compute_labels\r\n\r\n # Preprocessing for the global clustering.\r\n not_enough_centroids = False\r\n if isinstance(clusterer, numbers.Integral):\r\n clusterer = AgglomerativeClustering(\r\n n_clusters=self.n_clusters)\r\n # There is no need to perform the global clustering step.\r\n if len(centroids) < self.n_clusters:\r\n not_enough_centroids = True\r\n elif (clusterer is not None and not\r\n hasattr(clusterer, 'fit_predict')):\r\n raise ValueError(\"n_clusters should be an instance of \"\r\n \"ClusterMixin or an int\")\r\n\r\n # To use in predict to avoid recalculation.\r\n self._subcluster_norms = row_norms(\r\n self.subcluster_centers_, squared=True)\r\n\r\n if clusterer is None or not_enough_centroids:\r\n self.subcluster_labels_ = np.arange(len(centroids))\r\n if not_enough_centroids:\r\n warnings.warn(\r\n \"Number of subclusters found (%d) by Birch is less \"\r\n \"than (%d). Decrease the threshold.\"\r\n % (len(centroids), self.n_clusters), ConvergenceWarning)\r\n else:\r\n # The global clustering step that clusters the subclusters of\r\n # the leaves. It assumes the centroids of the subclusters as\r\n # samples and finds the final centroids.\r\n self.subcluster_labels_ = clusterer.fit_predict(\r\n self.subcluster_centers_)\r\n\r\n if compute_labels:\r\n self.labels_ = self.predict(X)\r\n" ]
[ [ "scipy.sparse.issparse", "numpy.argmin", "numpy.dot", "numpy.zeros" ] ]
cdds-uiuc/simles-book
[ "79f0fe1133d44f6b94b4bdcd0f05ff65434240c9" ]
[ "_build/jupyter_execute/content/Module02/M02_N02_Optimization.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Optimization\n\n# In[101]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n# These are some parameters to make figures nice (and big)\n\n#%matplotlib inline\n#%config InlineBackend.figure_format = 'retina' \nparams = {'legend.fontsize': 'x-large',\n 'figure.figsize': (15, 5),\n 'axes.labelsize': 'x-large',\n 'axes.titlesize':'x-large',\n 'xtick.labelsize':'x-large',\n 'ytick.labelsize':'x-large'}\nplt.rcParams.update(params)\n\n\n# # Theory\n# \n# In this notebook we will briefly consider the more general problem of fitting a model $\\tilde y(\\theta)$ to some obervations $y$, where $\\theta$ are the model parameters\n# \n# If the model is nonlinear, we will not be able to write simple, analytical estimator, so we will look for a numerical solution. This is very common for machine learning problems, where we cannot compute an optimal solution algebraically. \n# \n# We will generally fit the parameters by minimizing the misfit, i.e. by minimizing a cost function $J(\\theta,y)$:\n# \n# $$\\hat\\theta =\\text{argmin } J(\\theta,y)$$ \n# \n# A typical choice for the cost function is the least-squares cost function. If the errors are independent and normally distributed this is motivatd by maximum likelihood theory. However, this is generally a suitable function that can be useful even if there is no theoretical underpinning. For a set of observations $y(t_j)$, the cost function becomes\n# $$ J\\left[\\theta,y(t_j)\\right]=\\sum_j \\left[\\tilde y(\\theta,t_j)-y(t_j)\\right]^2$$\n# \n# For now, we will use a simple python optimiziation method to minimize the cost function. There are several such methods, but they all behave the same. They require a cost function J, that is a function of the parameters, and needs to be minimized, and an initial condition $\\theta_i$.\n# \n# ```\n# theta_hat=optimize.minimize(J,theta_init,args)\n# ```\n\n# # Example Model\n# Consider the model $\\tilde y(t)=y_0\\cdot \\exp (-t/\\tau)$\n# with parameters $\\theta =\\{y_0,\\tau\\}$, which can be also written a \n# \n# $$\\tilde y(\\theta,t)=\\theta_1 \\cdot \\exp (-t/\\theta_2)$$\n# \n# Let's first visualize the model, before fitting it to some synthetic observations\n\n# In[102]:\n\n\ndef model(theta,t):\n y_model=theta[0]*np.exp(-t/theta[1])\n return y_model\n\n\n# In[103]:\n\n\n# Let's choose some parameters and generate some pseudo-observations\ny0_true =3;\ntau_true =3;\ntheta_true=[y0_true,tau_true];\nsigma_e =0.4; # errors\n\n\n# In[104]:\n\n\n# plot model and pseudo observations\nt=np.linspace(0,10,100)\ne=stats.norm.rvs(0,sigma_e,100)\n\ny_true=model(theta_true,t)\ny_obs =y_true+e\n\nfig,ax=plt.subplots(1,1,figsize=[4,4])\nax.plot(t,y_true,'-',label='truth')\nax.plot(t,y_obs,'o',label='observations')\nax.legend();\n\n\n# **Let's plot the model for some first guesses**\n\n# In[105]:\n\n\ntheta=[[3.5,6],\n [5,3],\n [3.3,2.4]]\n\n\n# In[106]:\n\n\n#Example of models\nfig,ax=plt.subplots(1,3,figsize=[12,4])\n\nfor j in range(3):\n y =model(theta[j],t)\n ax[j].plot(t,y_true,'-',label='truth')\n ax[j].plot(t,y_obs,'o',label='observations')\n ax[j].plot(t,y,'-',label='model')\n ax[j].legend()\n\n\n# # Cost Function\n# \n# We will generally fit the parameters by minimizing themisfit, i.e. by minimizing a cost function $J(\\theta,y)$:\n# \n# $$\\hat\\theta =\\text{argmin } J(\\theta,y)$$ \n# \n# A typical choice for the cost function is the least-squares cost function. If the errors are independent and normally distributed this is motivatd by maximum likelihood theory. However, this is generally a suitable function that can be useful even if there is no theoretical underpinning. For a set of observations $y(t_j)$, the cost function becomes\n# \n# $$ J(\\theta,y(t_j))=\\sum_j \\left[\\tilde y(\\theta,t_j)-y(t_j)\\right]^2$$\n# \n\n# In[107]:\n\n\ndef Jcost(theta,y_obs,t):\n Jcost=np.sum( (y_obs-model(theta,t))**2)\n\n return Jcost\n\n\n# In[108]:\n\n\nfig,ax=plt.subplots(1,3,figsize=[12,3])\n\nfor j in range(3):\n y =model(theta[j],t)\n J=Jcost(theta[j],y_obs,t)\n ax[j].plot(t,y_true,'-',label='truth')\n ax[j].plot(t,y_obs,'o',label='observations')\n ax[j].plot(t,y,'-',label='model')\n \n title_str='J='+np.array2string(J,precision=2)\n ax[j].set_title(title_str)\n\n\n# ## Visualize the Cost Function\n\n# In[109]:\n\n\nN1=21;\nN2=20;\ny0_vec=np.linspace(1.5,4,N1);\ntau_vec=np.linspace(1,4,N2);\n\nJ=np.zeros(shape=[N1,N2]);\n\nfor j1 in range(N1):\n for j2 in range(N2):\n theta=[y0_vec[j1],tau_vec[j2]];\n J[j1,j2]=Jcost(theta,y_obs,t); \n\n\n# In[111]:\n\n\nfrom matplotlib import cm\n\nfig, ax = plt.subplots(subplot_kw={\"projection\": \"3d\"},figsize=[10,10])\nX,Y=np.meshgrid(tau_vec,y0_vec)\n\nsurf=ax.plot_surface(X,Y,J,cmap=cm.get_cmap('turbo'),\n linewidth=0, antialiased=False)\n\nax.invert_yaxis()\nax.invert_xaxis()\nax.set_ylabel('theta_1=$y_0$');\nax.set_xlabel('theta_2=tau');\nax.set_zlabel('J(theta)');\nfig.colorbar(surf, shrink=0.5, aspect=10,label='J(theta)');\n\n\n# # Optimize using scipy package\n\n# In[96]:\n\n\nfrom scipy import optimize \ntheta_i=[2,1.2]\ntheta_hat=optimize.minimize(Jcost,theta_i,args=(y_obs,t)).x;\n\nprint(theta_hat)\nprint(theta_true)\n\n\n# In[112]:\n\n\ny_true=model(theta_true,x)\ny_obs =y_true+e\n\nfig,ax=plt.subplots(1,2,figsize=[20,10])\nax[0].plot(x,y_true,'-',label='truth')\nax[0].plot(x,y_obs,'o',label='observations')\nax[0].plot(x,model(theta_i,x),'k--',label='initial gues')\nax[0].plot(x,model(theta_hat,x),'r--',label='best_fit')\nax[0].legend()\n\nax=plt.subplot(1,2,2,projection='3d')\nX,Y=np.meshgrid(tau_vec,y0_vec)\n\nsurf=ax.plot_surface(X,Y,J,cmap=cm.get_cmap('turbo'),\n linewidth=0, antialiased=False,alpha=0.6)\n\nax.invert_yaxis()\nax.invert_xaxis()\nax.set_ylabel('theta_1=$y_0$');\nax.set_xlabel('theta_2=tau');\nax.set_zlabel('J(theta)');\n#ax.grid(False)\nplt.colorbar(surf,ax=ax, shrink=0.5, aspect=10,label='J(theta)');\nax.plot3D(theta_i[1],theta_i[0],Jcost(theta_i,y_obs,t),'ko',markersize=10,label='initial guess');\nax.plot3D(theta_hat[1],theta_hat[0],Jcost(theta_hat,y_obs,t),'ro',markersize=10,label='best fit');\nax.legend();\n\n\n# # Summary\n# Most of the code above is for plotting purposes. The actual optimization is done in ver few lines of code, summarized below\n\n# In[98]:\n\n\n# define your model\ndef model(theta,t):\n y_model=theta[0]*np.exp(-t/theta[1])\n return y_model\n# define your cost function, as a function of the parameter vector\ndef Jcost(theta,y_obs,t):\n Jcost=np.sum( (y_obs-model(theta,t))**2)\n return Jcost\n\n# choose an initial guess\ntheta_init=[2,1.2]\n\n#optimize\ntheta_hat=optimize.minimize(Jcost,theta_init,args=(y_obs,t)).x;\n\n" ]
[ [ "numpy.zeros", "numpy.array2string", "scipy.stats.norm.rvs", "scipy.optimize.minimize", "matplotlib.pyplot.subplots", "numpy.exp", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.subplot", "matplotlib.cm.get_cmap", "matplotlib.pyplot.colorbar", "numpy.meshgrid", "numpy.linspace" ] ]
KaviyaSubramanian706/bt-gate-classifier
[ "082775587ccc795c54845572d45743008a8dc892" ]
[ "video_classify.py" ]
[ "import time\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\nimport os\nimport numpy as np\nimport argparse\nimport pandas as pd\nimport cv2\nimport csv\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom normalizer import Normalizer\n\nimport tensorflow as tf\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.compat.v1.Session(config=config)\n\nstds = None\nmeans = None\n\n# flags.DEFINE_string('classes', './data/coco.names', 'path to classes file')\nflags.DEFINE_string('weights', './home/shyam/bridge_tech/mobilenetv2-tf2/model/ec2_model/model_1.1_15_6_64/output/',\n 'path to weights file')\nflags.DEFINE_enum('model', None, ['MobileNetv2'])\n\nflags.DEFINE_integer('size', 64, 'resize images to')\nflags.DEFINE_string('video', None,\n 'path to video file or number for webcam)')\nflags.DEFINE_string('output', None, 'path to output video')\nflags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')\nflags.DEFINE_integer('num_classes', 2, 'number of classes in the model')\n\n\n\ndef get_mean_std(base_dir, filenames, target_size):\n n = 0\n r_mean, g_mean, b_mean = 0.0, 0.0, 0.0\n r_M2, g_M2, b_M2 = 0.0, 0.0, 0.0\n\n \n # for z, filename in enumerate(filenames):\n # if z % 1000 == 0:\n # print(\"Processing image {}/{}\".format(z+1, len(filenames)))\n\n # x = tf.keras.preprocessing.image.img_to_array(tf.keras.preprocessing.image.load_img(os.path.join(base_dir, filename), target_size=target_size))\n r = x[:, :, 0].flatten().tolist()\n g = x[:, :, 1].flatten().tolist()\n b = x[:, :, 2].flatten().tolist()\n\n for (xr, xg, xb) in zip(r, g, b):\n n = n + 1\n\n r_delta = xr - r_mean\n g_delta = xg - g_mean\n b_delta = xb - b_mean\n\n r_mean = r_mean + r_delta/n\n g_mean = g_mean + g_delta/n\n b_mean = b_mean + b_delta/n\n\n r_M2 = r_M2 + r_delta * (xr - r_mean)\n g_M2 = g_M2 + g_delta * (xg - g_mean)\n b_M2 = b_M2 + b_delta * (xb - b_mean)\n\n r_variance = r_M2 / (n - 1)\n g_variance = g_M2 / (n - 1)\n b_variance = b_M2 / (n - 1)\n\n r_std = np.sqrt(r_variance)\n g_std = np.sqrt(g_variance)\n b_std = np.sqrt(b_variance)\n\n return np.array([r_mean, g_mean, b_mean]), np.array([r_std, g_std, b_std])\n\n\nclass Normalizer():\n def __init__(self, mean=None, std=None):\n self.mean = mean\n self.std = std\n\n def __call__(self, img):\n if self.mean is not None:\n img = self.center(img)\n if self.std is not None:\n img = self.scale(img)\n return img\n\n def center(self, img):\n return img - self.mean\n\n def scale(self, img):\n return img / self.std\n\n def set_stats(self, mean, std):\n self.mean = np.array(mean).reshape(1, 1, 3)\n self.std = np.array(std).reshape(1, 1, 3)\n \n\n def get_stats(self, base_dir, filenames, target_size, calc_mean=True, calc_std=True):\n print(\"Calculating mean and standard deviation with shape: \", target_size)\n m, s = get_mean_std(base_dir, filenames, target_size)\n if calc_mean:\n self.mean = m\n self.mean = self.mean.reshape(1, 1, 3)\n print(\"Dataset mean [r, g, b] = {}\".format(m.tolist()))\n if calc_std:\n self.std = s\n self.std = self.std.reshape(1, 1, 3)\n print(\"Dataset std [r, g, b] = {}\". format(s.tolist()))\n\n return str(m.tolist()), str(s.tolist())\n\ndef main(_argv):\n #physical_devices = tf.config.experimental.list_physical_devices('GPU')\n #for physical_device in physical_devices:\n # tf.config.experimental.set_memory_growth(physical_device, True)\n\n if FLAGS.model == 'yolov3-tiny':\n model = YoloV3Tiny(FLAGS.size, classes=FLAGS.num_classes,\n anchors=yolo_tiny_anchors,masks=yolo_tiny_anchor_masks)\n model.summary()\n\n elif FLAGS.model == 'MobileNetv2':\n model = tf.keras.models.load_model('/home/shyam/bridge_tech/mobilenetv2-tf2/model/ec2_model/model_1.1_15_6_64/output/')\n model.summary()\n\n\n model.load_weights(FLAGS.weights).expect_partial()\n logging.info('weights loaded')\n\n class_names = ['Open','Closed']\n logging.info('classes loaded')\n\n times = []\n\n try:\n vid = cv2.VideoCapture(int(FLAGS.video))\n except:\n vid = cv2.VideoCapture(FLAGS.video)\n\n out = None\n\n if FLAGS.output:\n # by default VideoCapture returns float instead of int\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)\n out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))\n\n while True:\n _, img = vid.read()\n\n if img is None:\n logging.warning(\"Empty Frame\")\n time.sleep(0.1)\n break\n\n img = tf.keras.preprocessing.image.load_img(\n img, target_size=(img_height, img_width)\n )\n img_array = keras.preprocessing.image.img_to_array(img)\n img_array = tf.expand_dims(img_array, 0) # Create a batch\n\n predictions = model.predict(img_array)\n score = tf.nn.softmax(predictions[0])\n\n print(\n \"This image most likely belongs to {} with a {:.2f} percent confidence.\"\n .format(class_names[np.argmax(score)], 100 * np.max(score))\n )\n\n img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_in = tf.expand_dims(img_in, 0)\n img_in = tf.image.resize(img_in, (FLAGS.size, FLAGS.size))\n img_in = transform_images(img_in, FLAGS.size)\n\n t1 = time.time()\n boxes, scores, classes, nums = model.predict(img_in)\n t2 = time.time()\n times.append(t2-t1)\n times = times[-20:]\n\n img = draw_outputs(img, (boxes, scores, classes, nums), class_names)\n img = cv2.putText(img, \"Time: {:.2f}ms\".format(sum(times)/len(times)*1000), (0, 30),\n cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)\n if FLAGS.output:\n out.write(img)\n cv2.imshow('output', img)\n if cv2.waitKey(1) == ord('q'):\n break\n\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n" ]
[ [ "tensorflow.compat.v1.Session", "tensorflow.compat.v1.ConfigProto", "tensorflow.keras.models.load_model", "tensorflow.image.resize", "tensorflow.keras.preprocessing.image.load_img", "tensorflow.expand_dims", "numpy.argmax", "numpy.max", "numpy.sqrt", "tensorflow.nn.softmax", "numpy.array" ] ]
Alchemist101010/genesis
[ "1280f7e6af439f6eb9173804d2931078a82190d3" ]
[ "arch1.py" ]
[ "import tensorflow as tf\r\nimport keras\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sb\r\nimport sys \r\nsys.setrecursionlimit(100000)\r\n\r\nfashion = keras.datasets.fashion_mnist\r\n(train_images, train_labels), (test_images, test_labels) = fashion.load_data()\r\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\r\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\r\n\r\nplt.figure()\r\nplt.imshow(train_images[0])\r\nplt.colorbar()\r\nplt.grid(False)\r\n\r\ntrain_images = train_images / 255.0\r\ntest_images = test_images / 255.0\r\n\r\nplt.figure(figsize=(10,10))\r\nfor i in range(25):\r\n plt.subplot(5,5,i+1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.grid(False)\r\n plt.imshow(train_images[i], cmap= 'icefire')\r\n plt.xlabel(class_names[train_labels[i]])\r\n\r\nmodel = keras.Sequential([\r\n keras.layers.Flatten(input_shape=(28, 28)),\r\n keras.layers.Dense(128, activation=tf.nn.relu),\r\n keras.layers.Dense(10, activation=tf.nn.softmax)\r\n])\r\n\r\nmodel.compile(optimizer=tf.train.AdamOptimizer(), \r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\nmodel.fit(train_images, train_labels, epochs=5)\r\n\r\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\r\n\r\nprint('Test accuracy:', test_acc)\r\npredictions = model.predict(test_images)\r\n\r\ndef plot_image(i, predictions_array, true_label, img):\r\n predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\r\n plt.grid(False)\r\n plt.xticks([])\r\n plt.yticks([])\r\n \r\n plt.imshow((img), cmap= 'icefire')\r\n\r\n predicted_label = np.argmax(predictions_array)\r\n if predicted_label == true_label:\r\n color = 'blue'\r\n else:\r\n color = 'red'\r\n \r\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\r\n 100*np.max(predictions_array),\r\n class_names[true_label]),\r\n color=color)\r\n\r\ndef plot_value_array(i, predictions_array, true_label):\r\n predictions_array, true_label = predictions_array[i], true_label[i]\r\n plt.grid(False)\r\n plt.xticks([])\r\n plt.yticks([])\r\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\r\n plt.ylim([0, 1]) \r\n predicted_label = np.argmax(predictions_array)\r\n \r\n thisplot[predicted_label].set_color('red')\r\n thisplot[true_label].set_color('blue')\r\n\r\n i = 0\r\n plt.figure(figsize=(6,3))\r\n plt.subplot(1,2,1)\r\n plot_image(i, predictions, test_labels, test_images)\r\n plt.subplot(1,2,2)\r\n plot_value_array(i, predictions, test_labels)\r\n\r\n i = 12\r\n plt.figure(figsize=(6,3))\r\n plt.subplot(1,2,1)\r\n plot_image(i, predictions, test_labels, test_images)\r\n plt.subplot(1,2,2)\r\n plot_value_array(i, predictions, test_labels)\r\n\r\n# Plot the first X test images, their predicted label, and the true label\r\n# Color correct predictions in blue, incorrect predictions in red\r\nnum_rows = 5\r\nnum_cols = 3\r\nnum_images = num_rows*num_cols\r\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\r\nfor i in range(num_images):\r\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\r\n plot_image(i, predictions, test_labels, test_images)\r\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\r\n plot_value_array(i, predictions, test_labels)\r\n\r\n # Grab an image from the test dataset\r\nimg = test_images[0]\r\n\r\n\r\n\r\nprint(img.shape)\r\n\r\npredictions_single = model.predict(img)\r\nprint(predictions_single)\r\n\r\nplot_value_array(0, predictions_single, test_labels)\r\n_ = plt.xticks(range(10), class_names, rotation=45)\r\nnp.argmax(predictions_single[0])" ]
[ [ "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.grid", "tensorflow.train.AdamOptimizer", "numpy.argmax", "matplotlib.pyplot.imshow", "matplotlib.pyplot.subplot", "numpy.max", "matplotlib.pyplot.ylim", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xlabel" ] ]
LarsNeR/stellargraph
[ "ee993bb600745a37d994fa4da628268b1cd657dc" ]
[ "tests/data/test_metapath_walker.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport pytest\nfrom stellargraph.data.explorer import UniformRandomMetaPathWalk\nfrom stellargraph.core.graph import StellarGraph\n\n\n# FIXME (#535): Consider using graph fixtures\ndef create_test_graph():\n \"\"\"\n Creates a simple graph for testing the BreadthFirstWalk class. The node ids are string or integers. Each node\n also has a label based on the type of its id such that nodes with string ids and those with integer ids have\n labels 's' and 'n' respectively.\n\n Returns:\n A simple graph with 13 nodes and 24 edges (including self loops for all but two of the nodes) in\n networkx format.\n\n \"\"\"\n nodes = {\n \"s\": pd.DataFrame(index=[\"0\", \"5\", \"7\", \"self loner\", \"loner\"]),\n \"n\": pd.DataFrame(index=[1, 2, 3, 4, 6, 8, 9, 10]),\n }\n edges = pd.DataFrame(\n [\n (\"0\", 1),\n (\"0\", 2),\n (1, 3),\n (1, 4),\n (3, 6),\n (4, \"7\"),\n (4, 8),\n (2, \"5\"),\n (\"5\", 9),\n (\"5\", 10),\n (\"0\", \"0\"),\n (1, 1),\n (3, 3),\n (6, 6),\n (4, 4),\n (\"7\", \"7\"),\n (8, 8),\n (2, 2),\n (\"5\", \"5\"),\n (9, 9),\n (\n \"self loner\",\n \"self loner\",\n ), # node that is not connected with any other nodes but has self loop\n ],\n columns=[\"source\", \"target\"],\n )\n\n return StellarGraph(nodes, edges)\n\n\nclass TestMetaPathWalk(object):\n def test_parameter_checking(self):\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n nodes = [1]\n n = 1\n length = 2\n seed = None\n metapaths = [[\"n\", \"s\", \"n\"]]\n\n # nodes should be a list of node ids even for a single node\n with pytest.raises(ValueError):\n mrw.run(nodes=None, n=n, length=length, metapaths=metapaths, seed=seed)\n with pytest.raises(ValueError):\n mrw.run(nodes=0, n=n, length=length, metapaths=metapaths, seed=seed)\n # n has to be positive integer\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=-1, length=length, metapaths=metapaths, seed=seed)\n with pytest.raises(TypeError):\n mrw.run(nodes=nodes, n=11.4, length=length, metapaths=metapaths, seed=seed)\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=0, length=length, metapaths=metapaths, seed=seed)\n # length has to be positive integer\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=-3, metapaths=metapaths, seed=seed)\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=0, metapaths=metapaths, seed=seed)\n with pytest.raises(TypeError):\n mrw.run(nodes=nodes, n=n, length=4.6, metapaths=metapaths, seed=seed)\n with pytest.raises(TypeError):\n mrw.run(nodes=nodes, n=n, length=1.0000001, metapaths=metapaths, seed=seed)\n # metapaths have to start and end with the same node type\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[[\"s\", \"n\"]], seed=seed)\n with pytest.raises(ValueError):\n mrw.run(\n nodes=nodes,\n n=n,\n length=length,\n metapaths=[[\"s\", \"n\", \"s\"], [\"n\", \"s\"]],\n seed=seed,\n )\n # metapaths have to have minimum length of two\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[[\"s\"]], seed=seed)\n # metapaths has to be a list of lists of strings denoting the node labels\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[\"n\", \"s\"], seed=seed)\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[[1, 2]], seed=seed)\n with pytest.raises(ValueError):\n mrw.run(\n nodes=nodes, n=n, length=length, metapaths=[[\"n\", \"s\"], []], seed=seed\n )\n with pytest.raises(ValueError):\n mrw.run(\n nodes=nodes,\n n=n,\n length=length,\n metapaths=[[\"n\", \"s\"], [\"s\", 1]],\n seed=seed,\n )\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=[(\"n\", \"s\")], seed=seed)\n with pytest.raises(ValueError):\n mrw.run(\n nodes=nodes,\n n=n,\n length=length,\n metapaths=([\"n\", \"s\"], [\"s\", \"n\", \"s\"]),\n seed=seed,\n )\n # seed has to be integer or None\n with pytest.raises(ValueError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=-1)\n with pytest.raises(TypeError):\n mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=1000.345)\n\n # If no root nodes are given, an empty list is returned which is not an error but I thought this method\n # is the best for checking this behaviour.\n walks = mrw.run(nodes=[], n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == 0\n\n def test_walk_generation_single_root_node_loner(self):\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n seed = None\n nodes = [\"loner\"] # has no edges, not even to itself\n n = 1\n length = 5\n metapaths = [[\"s\", \"n\", \"s\"]]\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert len(walks[0]) == 1\n\n n = 5\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n for walk in walks:\n assert len(walk) == 1\n\n def test_walk_generation_single_root_node_self_loner(self):\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n seed = None\n nodes = [\"self loner\"] # this node has self edges but not other edges\n n = 1\n length = 10\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert (\n len(walks[0]) == 1\n ) # for the ['s', 'n', 'n', 's'] metapath only the starting node is returned\n\n metapaths = [[\"s\", \"s\"]]\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert len(walks[0]) == length # the node is repeated length times\n for node in walks[0]:\n assert node == \"self loner\"\n\n def test_walk_generation_single_root_node(self):\n\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n nodes = [\"0\"]\n n = 1\n length = 15\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n seed = 42\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert len(walks[0]) <= length # test against maximum walk length\n\n n = 5\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n\n assert len(walks[0]) <= length # test against maximum walk length\n\n metapaths = [[\"s\", \"n\", \"s\"], [\"s\", \"n\", \"n\", \"s\"]]\n n = 1\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n * len(metapaths)\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n metapaths = [[\"s\", \"n\", \"s\"], [\"s\", \"n\", \"n\", \"s\"]]\n n = 5\n length = 100\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n * len(metapaths)\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n nodes = [8]\n metapaths = [[\"s\", \"n\", \"s\"], [\"s\", \"n\", \"n\", \"s\"]]\n n = 5\n length = 100\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert (\n len(walks) == 0\n ) # metapaths start with a node of type 's' but starting node is type 'n' so an empty list is returned\n\n def test_walk_generation_many_root_nodes(self):\n\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n nodes = [\"0\", 2]\n n = 1\n length = 15\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n seed = 42\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert (\n len(walks) == 1\n ) # the starting node 2 should not generate a walk because it is of type 'n' not 's'\n assert len(walks[0]) <= length # test against maximum walk length\n\n metapaths = [[\"s\", \"n\", \"n\", \"s\"], [\"n\", \"n\", \"s\", \"n\"]]\n\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert (\n len(walks) == 2\n ) # each starting node will generate one walk from each metapath\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n n = 2\n nodes = [\"0\", \"5\"]\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n * len(\n nodes\n ) # each starting node will generate one walk from each metapath\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n n = 2\n nodes = [\"0\", \"5\", 1, 6]\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert (\n len(walks) == n * 2\n ) # the first two starting node will generate one walk from each metapath\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n n = 5\n nodes = [\"0\", \"5\", 1, 6]\n metapaths = [[\"s\", \"n\", \"n\", \"s\"], [\"n\", \"s\", \"n\"], [\"n\", \"n\"]]\n walks = mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed)\n assert len(walks) == n * 6\n for walk in walks:\n assert len(walk) <= length # test against maximum walk length\n\n def test_init_parameters(self):\n g = create_test_graph()\n n = 2\n length = 15\n metapaths = [[\"s\", \"n\", \"n\", \"s\"]]\n seed = 42\n nodes = [\"0\", \"5\"]\n\n mrw = UniformRandomMetaPathWalk(\n g, n=n, length=length, metapaths=metapaths, seed=seed\n )\n mrw_no_params = UniformRandomMetaPathWalk(g)\n\n assert mrw.run(nodes=nodes) == mrw_no_params.run(\n nodes=nodes, n=n, length=length, metapaths=metapaths, seed=seed\n )\n\n def test_benchmark_uniformrandommetapathwalk(self, benchmark):\n\n g = create_test_graph()\n mrw = UniformRandomMetaPathWalk(g)\n\n nodes = [\"0\"]\n n = 5\n length = 5\n metapaths = [[\"s\", \"n\", \"n\", \"s\"], [\"n\", \"s\", \"n\"], [\"n\", \"n\"]]\n\n benchmark(lambda: mrw.run(nodes=nodes, n=n, length=length, metapaths=metapaths))\n" ]
[ [ "pandas.DataFrame" ] ]
Werni2A/galois
[ "97c35afdd1ad38705f2b1e643237fbd2f87bb6e3" ]
[ "tests/fields/test_instantiation.py" ]
[ "\"\"\"\nA pytest module to test instantiation of new Galois field arrays.\n\"\"\"\nimport random\n\nimport pytest\nimport numpy as np\n\nimport galois\n\nfrom ..helper import array_equal\n\n\nDTYPES = galois.dtypes.DTYPES + [np.object_]\n\n\ndef test_cant_instantiate_GF():\n v = [0, 1, 0, 1]\n with pytest.raises(NotImplementedError):\n a = galois.FieldArray(v)\n\n\nclass Test0D:\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array, galois.FieldArray])\n def test_new(self, field, type1):\n v = int(field.Random())\n vt = convert_0d(v, type1, field)\n a = field(vt)\n assert type(a) is field\n assert a == v\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array, galois.FieldArray])\n def test_valid_dtype(self, field, type1):\n v = int(field.Random())\n vt = convert_0d(v, type1, field)\n dtype = valid_dtype(field)\n a = field(vt, dtype=dtype)\n assert type(a) is field\n assert a.dtype == dtype\n assert a == v\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array, galois.FieldArray])\n def test_invalid_dtype(self, field, type1):\n v = int(field.Random())\n vt = convert_0d(v, type1, field)\n dtype = invalid_dtype(field)\n with pytest.raises(TypeError):\n a = field(vt, dtype=dtype)\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array])\n def test_non_integer(self, field, type1):\n v = float(field.order)\n vt = convert_0d(v, type1, field)\n with pytest.raises((TypeError, ValueError)):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array])\n def test_out_of_range_low(self, field, type1):\n v = -1\n vt = convert_0d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [int, list, tuple, np.array])\n def test_out_of_range_high(self, field, type1):\n v = field.order\n vt = convert_0d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n def test_copy_true(self, field):\n v = int(field.Random(low=1))\n va = np.array(v, dtype=field.dtypes[0])\n a = field(va, copy=True)\n assert type(a) is field\n assert array_equal(a, v)\n va = 1 # Change original array\n assert array_equal(a, v)\n\n def test_default_order_c(self, field):\n v = int(field.Random())\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_default_order_f(self, field):\n v = int(field.Random())\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_c(self, field):\n v = int(field.Random())\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va, order=\"C\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_f(self, field):\n v = int(field.Random())\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va, order=\"F\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_ndmin(self, field):\n v = int(field.Random())\n a = field(v, ndmin=3)\n assert type(a) is field\n assert a.shape == (1,1,1)\n\n\nclass Test1D:\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_new(self, field, type1):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n a = field(vt)\n assert type(a) is field\n assert array_equal(a, v)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_valid_dtype(self, field, type1):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n dtype = valid_dtype(field)\n a = field(vt, dtype=dtype)\n assert type(a) is field\n assert a.dtype == dtype\n assert array_equal(a, v)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_invalid_dtype(self, field, type1):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n dtype = invalid_dtype(field)\n with pytest.raises(TypeError):\n a = field(vt, dtype=dtype)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_non_integer(self, field, type1):\n v = [int(field.Random()), float(field.Random()), int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n with pytest.raises((TypeError, ValueError)):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_out_of_range_low(self, field, type1):\n v = [int(field.Random()), -1, int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_out_of_range_high(self, field, type1):\n v = [int(field.Random()), field.order, int(field.Random()), int(field.Random())]\n vt = convert_1d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n def test_copy_true(self, field):\n v = [int(field.Random(low=1)), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, dtype=field.dtypes[0])\n a = field(va, copy=True)\n assert type(a) is field\n assert array_equal(a, v)\n va[0] = 0 # Change original array\n assert array_equal(a, v)\n\n def test_default_order_c(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_default_order_f(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_c(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va, order=\"C\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_f(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va, order=\"F\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_ndmin(self, field):\n v = [int(field.Random()), int(field.Random()), int(field.Random()), int(field.Random())]\n a = field(v, ndmin=3)\n assert type(a) is field\n assert a.shape == (1,1,4)\n\n\nclass Test2D:\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_new(self, field, type1):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n a = field(vt)\n assert type(a) is field\n assert array_equal(a, v)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_valid_dtype(self, field, type1):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n dtype = valid_dtype(field)\n a = field(vt, dtype=dtype)\n assert type(a) is field\n assert a.dtype == dtype\n assert array_equal(a, v)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array, galois.FieldArray])\n def test_invalid_dtype(self, field, type1):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n dtype = invalid_dtype(field)\n with pytest.raises(TypeError):\n a = field(vt, dtype=dtype)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_non_integer(self, field, type1):\n v = [[int(field.Random()), float(field.Random())], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n with pytest.raises((TypeError, ValueError)):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_out_of_range_low(self, field, type1):\n v = [[int(field.Random()), -1], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n @pytest.mark.parametrize(\"type1\", [list, tuple, np.array])\n def test_out_of_range_high(self, field, type1):\n v = [[int(field.Random()), field.order], [int(field.Random()), int(field.Random())]]\n vt = convert_2d(v, type1, field)\n with pytest.raises(ValueError):\n a = field(vt)\n\n def test_copy_true(self, field):\n v = [[int(field.Random(low=1)), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, dtype=field.dtypes[0])\n a = field(va, copy=True)\n assert type(a) is field\n assert array_equal(a, v)\n va[0][0] = 0 # Change original array\n assert array_equal(a, v)\n\n def test_default_order_c(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert not a.flags[\"F_CONTIGUOUS\"]\n\n def test_default_order_f(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va) # Default order is \"K\" which keeps current\n assert type(a) is field\n assert not a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_c(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, order=\"F\", dtype=field.dtypes[0])\n a = field(va, order=\"C\")\n assert type(a) is field\n assert a.flags[\"C_CONTIGUOUS\"]\n assert not a.flags[\"F_CONTIGUOUS\"]\n\n def test_order_f(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n va = np.array(v, order=\"C\", dtype=field.dtypes[0])\n a = field(va, order=\"F\")\n assert type(a) is field\n assert not a.flags[\"C_CONTIGUOUS\"]\n assert a.flags[\"F_CONTIGUOUS\"]\n\n def test_ndmin(self, field):\n v = [[int(field.Random()), int(field.Random())], [int(field.Random()), int(field.Random())]]\n a = field(v, ndmin=3)\n assert type(a) is field\n assert a.shape == (1,2,2)\n\n\ndef convert_0d(v, type1, field):\n if type1 is int:\n vt = v\n elif type1 in [list, tuple]:\n vt = type1([v])\n elif type1 is np.array and field.dtypes == [np.object_]:\n vt = np.array(v, dtype=np.object_)\n elif type1 is np.array:\n vt = np.array(v)\n elif type1 is galois.FieldArray:\n vt = field(v)\n else:\n raise NotImplementedError\n return vt\n\n\ndef convert_1d(v, type1, field):\n if type1 is galois.FieldArray:\n vt = field(v)\n elif type1 is np.array and field.dtypes == [np.object_]:\n vt = np.array(v, dtype=np.object_)\n elif type1 is np.array:\n vt = np.array(v)\n else:\n vt = type1(v)\n return vt\n\n\ndef convert_2d(v, type1, field):\n if type1 is galois.FieldArray:\n vt = field(v)\n elif type1 is np.array and field.dtypes == [np.object_]:\n vt = np.array(v, dtype=np.object_)\n elif type1 is np.array:\n vt = np.array(v)\n elif type1 in [list, tuple]:\n vt = type1([type1([b for b in a]) for a in v])\n else:\n raise NotImplementedError\n return vt\n\n\ndef valid_dtype(field):\n return random.choice(field.dtypes)\n\n\ndef invalid_dtype(field):\n return random.choice([dtype for dtype in DTYPES if dtype not in field.dtypes])\n" ]
[ [ "numpy.array" ] ]
MAWUT0R/PokerRL
[ "95708a5f7a16cb151bc4253132bdfd22ea7a9b25" ]
[ "PokerRL/cfr/_MCCFRBase.py" ]
[ "# Copyright (c) 2019 Eric Steinberger\n\n\nimport copy\n\nimport numpy as np\nimport sys\n\nfrom PokerRL.game._.tree.MCPublicTree import MCPublicTree\nfrom PokerRL.game.wrappers import HistoryEnvBuilder\nfrom PokerRL.rl.rl_util import get_env_cls_from_str\n\n\nclass MCCFRBase:\n \"\"\"\n base class to all full-width (i.e. non MC) tabular CFR methods\n \"\"\"\n\n def __init__(self,\n name,\n chief_handle,\n game_cls,\n agent_bet_set,\n algo_name,\n starting_stack_sizes=None,\n innerloop_epi=None,\n sample_method='eps_greedy'\n ):\n \"\"\"\n Args:\n name (str): Under this name all logs, data, and checkpoints will appear.\n chief_handle (ChiefBase): Reference to chief worker\n game_cls (PokerEnv subclass): Class (not instance) to be trained in.\n agent_bet_set (iterable): Choosing a bet-set from bet_sets.py is recommended. If solving a\n Limit poker game, this value will not be considered, but must still\n be passed. Just set this to any list of floats (e.g. [0.0])\n starting_stack_sizes (list of ints): For each stack size in this list, a CFR strategy will be computed.\n Results are logged individually and averaged (uniform).\n If None, takes the default for the game.\n \"\"\"\n\n self._name = name # name=MC_CFR_EXAMPLE\n self._n_seats = 2\n self.touching_nodes = 0\n self._sample_method = sample_method\n\n self._chief_handle = chief_handle\n\n if starting_stack_sizes is None:\n self._starting_stack_sizes = [game_cls.DEFAULT_STACK_SIZE]\n else:\n self._starting_stack_sizes = copy.deepcopy(starting_stack_sizes)\n # self._starting_stack_sizes = [13]\n\n self._game_cls_str = game_cls.__name__ # StandardLeduc\n\n self._env_args = [\n game_cls.ARGS_CLS(n_seats=self._n_seats,\n starting_stack_sizes_list=[start_chips for _ in range(self._n_seats)], #[13, 13]\n bet_sizes_list_as_frac_of_pot=agent_bet_set,\n )\n for start_chips in self._starting_stack_sizes\n ]\n self._env_bldrs = [\n HistoryEnvBuilder(env_cls=get_env_cls_from_str(self._game_cls_str),\n env_args=self._env_args[s])\n\n for s in range(len(self._starting_stack_sizes))\n ]\n\n self._trees = [\n MCPublicTree(env_bldr=self._env_bldrs[idx],\n stack_size=self._env_args[idx].starting_stack_sizes_list,\n stop_at_street=None,\n sample_method=self._sample_method)\n for idx in range(len(self._env_bldrs))\n ]\n\n for tree in self._trees:\n tree.build_tree()\n print(\"Tree with stack size\", tree.stack_size, \"has\", tree.n_nodes, \"nodes out of which\", tree.n_nonterm,\n \"are non-terminal.\")\n\n self._algo_name = algo_name # MCCFR\n\n self._exps_curr_total = [\n self._chief_handle.create_experiment(\n self._name + \"_Curr_S\" + str(self._starting_stack_sizes[s]) + \"_total_\" + self._algo_name)\n for s in range(len(self._starting_stack_sizes))\n ]\n\n self._exps_avg_total = [\n self._chief_handle.create_experiment(\n self._name + \"_Avg_total_S\" + str(self._starting_stack_sizes[s]) + \"_\" + self._algo_name)\n for s in range(len(self._starting_stack_sizes))\n ]\n\n self._exp_all_averaged_curr_total = self._chief_handle.create_experiment(\n self._name + \"_Curr_total_averaged_\" + self._algo_name)\n\n self._exp_all_averaged_avg_total = self._chief_handle.create_experiment(\n self._name + \"_Avg_total_averaged_\" + self._algo_name)\n\n # self._chief_handle._log_buf._experiments\n # {'MC_CFR_EXAMPLE_Curr_S13_total_MCCFR': {}, \n # 'MC_CFR_EXAMPLE_Avg_total_S13_MCCFR': {}, \n # 'MC_CFR_EXAMPLE_Curr_total_averaged_MCCFR': {}, \n # 'MC_CFR_EXAMPLE_Avg_total_averaged_MCCFR': {}}\n\n self._iter_counter = None\n if innerloop_epi is None:\n self._innerloop_epi = self._trees[0]._n_nodes\n else:\n self._innerloop_epi = innerloop_epi\n\n @property\n def name(self):\n return self._name\n\n @property\n def algo_name(self):\n return self._algo_name\n\n @property\n def iter_counter(self):\n return self._iter_counter\n\n def reset(self):\n self._iter_counter = 0\n\n for p in range(self._n_seats):\n self._reset_player(p_id=p)\n\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].fill_uniform_random()\n\n self._compute_cfv()\n self._log_curr_strat_expl()\n\n def iteration(self):\n raise NotImplementedError\n\n def print_tree(self, node):\n print(\"node value\", node.reach_probs)\n for c in node.children:\n print(\"chil value\", c.reach_probs)\n # self.print_tree(c)\n\n def _compute_cfv(self):\n # Compute node.ev_weighted, node.ev_br_weighted, node.epsilon, node.exploitability\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].compute_ev()\n\n def _compute_mc_cfv(self, p_id):\n # Compute node.ev_weighted, node.ev_br_weighted, node.epsilon, node.exploitability\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].compute_mc_ev(p_id)\n\n def _regret_formula_first_it(self, ev_all_actions, strat_ev):\n raise NotImplementedError\n\n def _regret_formula_after_first_it(self, ev_all_actions, strat_ev, last_regrets):\n raise NotImplementedError\n\n def _compute_regrets(self, p_id):\n\n for t_idx in range(len(self._trees)):\n def __compute_evs(_node):\n # EV of each action\n N_ACTIONS = len(_node.children)\n ev_all_actions = np.zeros(shape=(self._env_bldrs[t_idx].rules.RANGE_SIZE, N_ACTIONS), dtype=np.float32)\n for i, child in enumerate(_node.children):\n ev_all_actions[:, i] = child.ev[p_id]\n\n # EV if playing by curr strat\n strat_ev = _node.ev[p_id]\n strat_ev = np.expand_dims(strat_ev, axis=-1).repeat(N_ACTIONS, axis=-1)\n\n self.touching_nodes += len(np.nonzero(_node.reach_probs[0])[0]) * len(np.nonzero(_node.reach_probs[1])[0])\n return strat_ev, ev_all_actions\n\n def _fill_after_first(_node):\n if _node.p_id_acting_next == p_id:\n strat_ev, ev_all_actions = __compute_evs(_node=_node)\n _node.data[\"regret\"] = self._regret_formula_after_first_it(ev_all_actions=ev_all_actions,\n strat_ev=strat_ev,\n last_regrets=_node.data[\"regret\"])\n\n for c in _node.children:\n _fill_after_first(c)\n\n def _fill_first(_node):\n if _node.p_id_acting_next == p_id:\n strat_ev, ev_all_actions = __compute_evs(_node=_node)\n\n _node.data[\"regret\"] = self._regret_formula_first_it(ev_all_actions=ev_all_actions,\n strat_ev=strat_ev)\n\n for c in _node.children:\n _fill_first(c)\n \n\n if self._iter_counter == 0:\n _fill_first(self._trees[t_idx].root)\n else:\n _fill_after_first(self._trees[t_idx].root)\n\n def _compute_new_strategy(self, p_id, inner_loop=False):\n \"\"\" Assumes regrets have been computed for player \"\"p_id\"\" already! \"\"\"\n raise NotImplementedError\n\n def _update_reach_probs(self):\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].update_reach_probs()\n\n def _compute_reach_probs(self, p_id):\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].compute_reach_probs(p_id)\n\n def _add_strategy_to_average(self, p_id):\n raise NotImplementedError\n\n def _log_curr_strat_expl(self):\n expl_totals = []\n for t_idx in range(len(self._trees)):\n METRIC = self._env_bldrs[t_idx].env_cls.WIN_METRIC\n expl_p = [\n float(self._trees[t_idx].root.exploitability[p]) * self._env_bldrs[t_idx].env_cls.EV_NORMALIZER\n for p in range(self._n_seats)\n ]\n expl_total = sum(expl_p) / self._n_seats\n expl_totals.append(expl_total)\n\n self._chief_handle.add_scalar(self._exps_curr_total[t_idx],\n \"Evaluation/\" + METRIC, self._iter_counter, expl_total)\n\n self._trees[t_idx].export_to_file(name=self._name + \"_Curr_\" + str(self._iter_counter))\n\n expl_total_averaged = sum(expl_totals) / float(len(expl_totals))\n self._chief_handle.add_scalar(self._exp_all_averaged_curr_total,\n \"Evaluation/\" + METRIC, self._iter_counter, expl_total_averaged)\n\n def _evaluate_avg_strats(self):\n expl_totals = []\n for t_idx in range(len(self._trees)):\n METRIC = self._env_bldrs[t_idx].env_cls.WIN_METRIC\n eval_tree = MCPublicTree(env_bldr=self._env_bldrs[t_idx],\n stack_size=self._env_args[t_idx].starting_stack_sizes_list,\n stop_at_street=None,\n is_debugging=False,\n )\n eval_tree.build_tree()\n\n def _fill(_node_eval, _node_train):\n if _node_eval.p_id_acting_next != eval_tree.CHANCE_ID and (not _node_eval.is_terminal):\n _node_eval.strategy = np.copy(_node_train.data[\"avg_strat\"])\n assert np.allclose(np.sum(_node_eval.strategy, axis=1), 1, atol=0.0001)\n\n for c_eval, c_train in zip(_node_eval.children, _node_train.children):\n _fill(_node_eval=c_eval, _node_train=c_train)\n\n # sets up some stuff; we overwrite strategy afterwards\n eval_tree.fill_uniform_random()\n\n # fill with strat\n _fill(_node_eval=eval_tree.root, _node_train=self._trees[t_idx].root)\n eval_tree.update_reach_probs()\n\n # compute EVs\n eval_tree.compute_ev()\n\n eval_tree.export_to_file(name=self._name + \"_Avg_\" + str(self._iter_counter))\n\n # log\n expl_p = [\n float(eval_tree.root.exploitability[p]) * self._env_bldrs[t_idx].env_cls.EV_NORMALIZER\n for p in range(eval_tree.n_seats)\n ]\n expl_total = sum(expl_p) / eval_tree.n_seats\n expl_totals.append(expl_total)\n\n self._chief_handle.add_scalar(self._exps_avg_total[t_idx],\n \"Evaluation/\" + METRIC, self._iter_counter, expl_total)\n\n expl_total_averaged = sum(expl_totals) / float(len(expl_totals))\n self._chief_handle.add_scalar(self._exp_all_averaged_avg_total,\n \"Evaluation/\" + METRIC, self._iter_counter, expl_total_averaged)\n return expl_total_averaged\n\n def _reset_player(self, p_id):\n def __reset(_node, _p_id):\n if _node.p_id_acting_next == _p_id:\n # regrets and strategies only need to be stored for one player at each node\n _node.data = {\n \"regret\": None,\n \"avg_strat\": None\n }\n _node.strategy = None\n\n for c in _node.children:\n __reset(c, _p_id=_p_id)\n\n for t_idx in range(len(self._trees)):\n __reset(self._trees[t_idx].root, _p_id=p_id)\n\n\n def _generate_samples(self, p_id, player=False, opponent=False, chance_p=False):\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].generate_samples(p_id, player, opponent, chance_p)\n\n def _calcultate_variance(self):\n variances = []\n for t_idx in range(len(self._trees)):\n self._trees[t_idx].calcultate_variance()\n v = np.mean((self._trees[t_idx].root.ev - self._trees[t_idx].root.true_ev)**2)\n # v = np.mean((self._trees[t_idx].root.ev - self._trees[t_idx].root.true_ev)**2)\n variances.append(v)\n return variances\n\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.copy", "numpy.expand_dims", "numpy.nonzero", "numpy.mean" ] ]
BouchardLab/pylearn2
[ "4cab785b870d22cd9e85a5f536d4cac234b6bf60" ]
[ "pylearn2/energy_functions/tests/test_rbm_energy.py" ]
[ "import theano\ntheano.config.compute_test_value = 'off'\nfrom pylearn2.energy_functions.rbm_energy import GRBM_Type_1\nimport numpy as N\nfrom theano.compat.six.moves import xrange\nimport theano.tensor as T\nfrom theano import function\nfrom pylearn2.utils import as_floatX\nfrom pylearn2.utils import sharedX\nfrom pylearn2.linear.matrixmul import MatrixMul\nimport unittest\n\n\nclass TestGRBM_Type_1(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.test_m = 2\n\n cls.rng = N.random.RandomState([1, 2, 3])\n cls.nv = 3\n cls.nh = 4\n\n cls.vW = cls.rng.randn(cls.nv, cls.nh)\n cls.W = sharedX(cls.vW)\n cls.vbv = as_floatX(cls.rng.randn(cls.nv))\n cls.bv = T.as_tensor_variable(cls.vbv)\n cls.bv.tag.test_value = cls.vbv\n cls.vbh = as_floatX(cls.rng.randn(cls.nh))\n cls.bh = T.as_tensor_variable(cls.vbh)\n cls.bh.tag.test_value = cls.bh\n cls.vsigma = as_floatX(cls.rng.uniform(0.1, 5))\n cls.sigma = T.as_tensor_variable(cls.vsigma)\n cls.sigma.tag.test_value = cls.vsigma\n\n cls.E = GRBM_Type_1(transformer=MatrixMul(cls.W), bias_vis=cls.bv,\n bias_hid=cls.bh, sigma=cls.sigma)\n\n cls.V = T.matrix()\n cls.V.tag.test_value = as_floatX(cls.rng.rand(cls.test_m, cls.nv))\n cls.H = T.matrix()\n cls.H.tag.test_value = as_floatX(cls.rng.rand(cls.test_m, cls.nh))\n\n cls.E_func = function([cls.V, cls.H], cls.E([cls.V, cls.H]))\n cls.F_func = function([cls.V], cls.E.free_energy(cls.V))\n cls.log_P_H_given_V_func = \\\n function([cls.H, cls.V], cls.E.log_P_H_given_V(cls.H, cls.V))\n cls.score_func = function([cls.V], cls.E.score(cls.V))\n\n cls.F_of_V = cls.E.free_energy(cls.V)\n cls.dummy = T.sum(cls.F_of_V)\n cls.negscore = T.grad(cls.dummy, cls.V)\n cls.score = - cls.negscore\n\n cls.generic_score_func = function([cls.V], cls.score)\n\n def test_mean_H_given_V(self):\n tol = 1e-6\n\n # P(h_1 | v) / P(h_2 | v) = a\n # => exp(-E(v, h_1)) / exp(-E(v,h_2)) = a\n # => exp(E(v,h_2)-E(v,h_1)) = a\n # E(v,h_2) - E(v,h_1) = log(a)\n # also log P(h_1 | v) - log P(h_2) = log(a)\n\n rng = N.random.RandomState([1, 2, 3])\n\n m = 5\n\n Vv = as_floatX(N.zeros((m, self.nv)) + rng.randn(self.nv))\n\n Hv = as_floatX(rng.randn(m, self.nh) > 0.)\n\n log_Pv = self.log_P_H_given_V_func(Hv, Vv)\n\n Ev = self.E_func(Vv, Hv)\n\n for i in xrange(m):\n for j in xrange(i + 1, m):\n log_a = log_Pv[i] - log_Pv[j]\n e = Ev[j] - Ev[i]\n\n assert abs(e-log_a) < tol\n\n def test_free_energy(self):\n\n rng = N.random.RandomState([1, 2, 3])\n\n m = 2 ** self.nh\n\n Vv = as_floatX(N.zeros((m, self.nv)) + rng.randn(self.nv))\n\n F, = self.F_func(Vv[0:1, :])\n\n Hv = as_floatX(N.zeros((m, self.nh)))\n\n for i in xrange(m):\n for j in xrange(self.nh):\n Hv[i, j] = (i & (2 ** j)) / (2 ** j)\n\n Ev = self.E_func(Vv, Hv)\n\n Fv = -N.log(N.exp(-Ev).sum())\n assert abs(F-Fv) < 1e-6\n\n def test_score(self):\n rng = N.random.RandomState([1, 2, 3])\n\n m = 10\n\n Vv = as_floatX(rng.randn(m, self.nv))\n\n Sv = self.score_func(Vv)\n gSv = self.generic_score_func(Vv)\n\n assert N.allclose(Sv, gSv)\n" ]
[ [ "numpy.random.RandomState", "numpy.allclose", "numpy.exp", "numpy.zeros" ] ]
SchubertLab/mvTCR
[ "d815749e24650f69ef68054e0078d490af91b71d" ]
[ "tcr_embedding/models/mixture_modules/separate_model.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom tcr_embedding.models.architectures.transformer import TransformerEncoder, TransformerDecoder\nfrom tcr_embedding.models.architectures.mlp import MLP\nfrom tcr_embedding.models.architectures.mlp_scRNA import build_mlp_encoder, build_mlp_decoder\nfrom tcr_embedding.models.vae_base_model import VAEBaseModel\n\n\ndef none_model(hyperparams, hdim, xdim):\n\tpass\n\n\nclass SeparateModelTorch(nn.Module):\n\tdef __init__(self, tcr_params, rna_params, joint_params):\n\t\tsuper(SeparateModelTorch, self).__init__()\n\t\thdim = joint_params['hdim']\n\t\tnum_conditional_labels = joint_params['num_conditional_labels']\n\t\tcond_dim = joint_params['cond_dim']\n\t\tcond_input = joint_params['cond_input']\n\t\tzdim = joint_params['zdim']\n\t\tshared_hidden = joint_params['shared_hidden']\n\t\tactivation = joint_params['activation']\n\t\tdropout = joint_params['dropout']\n\t\tbatch_norm = joint_params['batch_norm']\n\n\t\tnum_seq_labels = tcr_params['num_seq_labels']\n\n\t\tself.use_rna = rna_params is not None\n\t\tnum_modalities = 1 if rna_params is None else 2\n\n\t\tself.alpha_encoder = TransformerEncoder(tcr_params, hdim//2, num_seq_labels)\n\t\tself.alpha_decoder = TransformerDecoder(tcr_params, hdim*num_modalities, num_seq_labels)\n\n\t\tself.beta_encoder = TransformerEncoder(tcr_params, hdim//2, num_seq_labels)\n\t\tself.beta_decoder = TransformerDecoder(tcr_params, hdim*num_modalities, num_seq_labels)\n\n\t\tif not self.use_rna:\n\t\t\txdim = None\n\t\t\tself.gene_encoder = none_model(rna_params, xdim, hdim)\n\t\t\tself.gene_decoder = none_model(rna_params, xdim, hdim)\n\t\telse:\n\t\t\txdim = rna_params['xdim']\n\t\t\tself.gene_encoder = build_mlp_encoder(rna_params, xdim, hdim)\n\t\t\tself.gene_decoder = build_mlp_decoder(rna_params, xdim, hdim*num_modalities)\n\t\t\t# used for NB loss\n\t\t\tself.theta = torch.nn.Parameter(torch.randn(xdim))\n\n\t\tif cond_dim > 0:\n\t\t\tself.cond_emb = torch.nn.Embedding(num_conditional_labels, cond_dim)\n\t\tself.cond_input = cond_input\n\t\tcond_input_dim = cond_dim if cond_input else 0\n\n\t\tself.shared_encoder = MLP(hdim*num_modalities+cond_input_dim, zdim*2, shared_hidden, activation, 'linear',\n\t\t\t\t\t\t\t\t dropout, batch_norm, regularize_last_layer=False)\n\t\tself.shared_decoder = MLP(zdim+cond_dim, hdim*num_modalities, shared_hidden[::-1], activation, activation,\n\t\t\t\t\t\t\t\t dropout, batch_norm, regularize_last_layer=True)\n\n\n\tdef forward(self, rna, tcr, tcr_len, conditional=None):\n\t\t\"\"\"\n\t\tForward pass of autoencoder\n\t\t:param rna: torch.Tensor shape=[batch_size, num_genes]\n\t\t:param tcr: torch.Tensor shape=[batch_size, seq_len, num_seq_labels]\n\t\t:param tcr_len: torch.LongTensor shape=[batch_size] indicating how long the real unpadded length is\n\t\t:param conditional: torch.Tensor shape=[batch_size, n_cond] one-hot-encoded conditional covariates\n\t\t:return: scRNA_pred, tcr_seq_pred\n\t\t\"\"\"\n\t\talpha_seq = tcr[:, :tcr.shape[1]//2]\n\t\talpha_len = tcr_len[:, 0]\n\n\t\tbeta_seq = tcr[:, tcr.shape[1]//2:]\n\t\tbeta_len = tcr_len[:, 1]\n\n\t\th_beta = self.beta_encoder(beta_seq, beta_len) # shape=[batch_size, hdim//2]\n\t\th_alpha = self.alpha_encoder(alpha_seq, alpha_len) # shape=[batch_size, hdim//2]\n\n\t\tif conditional is not None: # more efficient than doing two concatenations\n\t\t\tcond_emb_vec = self.cond_emb(conditional)\n\n\t\tif not self.use_rna:\n\t\t\tif conditional is not None and self.cond_input: # more efficient than doing two concatenations\n\t\t\t\tjoint_feature = torch.cat([h_alpha, h_beta, cond_emb_vec], dim=-1) # shape=[batch_size, hdim+cond_dim]\n\t\t\telse:\n\t\t\t\tjoint_feature = torch.cat([h_alpha, h_beta], dim=-1)\n\t\telse:\n\t\t\th_rna = self.gene_encoder(rna) # shape=[batch_size, hdim]\n\t\t\tif conditional is not None and self.cond_input:\n\t\t\t\tjoint_feature = torch.cat([h_rna, h_alpha, h_beta, cond_emb_vec], dim=-1)\n\t\t\telse:\n\t\t\t\tjoint_feature = torch.cat([h_rna, h_alpha, h_beta], dim=-1)\n\n\t\tz_ = self.shared_encoder(joint_feature) # shape=[batch_size, zdim*2]\n\t\tmu, logvar = z_[:, :z_.shape[1]//2], z_[:, z_.shape[1]//2:] # mu.shape = logvar.shape = [batch_size, zdim]\n\t\tz = self.reparameterize(mu, logvar) # shape=[batch_size, zdim]\n\n\t\tif conditional is not None:\n\t\t\tz_input = torch.cat([z, cond_emb_vec], dim=-1) # shape=[batch_size, zdim+cond_dim]\n\t\telse:\n\t\t\tz_input = z\n\t\tjoint_dec_feature = self.shared_decoder(z_input) # shape=[batch_size, hdim*2]\n\t\tif not self.use_rna:\n\t\t\trna_pred = None\n\t\telse:\n\t\t\trna_pred = self.gene_decoder(joint_dec_feature) # shape=[batch_size, num_genes]\n\n\t\talpha_seq_pred = self.alpha_decoder(joint_dec_feature, alpha_seq)\n\t\tbeta_seq_pred = self.beta_decoder(joint_dec_feature, beta_seq)\n\n\t\ttcr_pred = torch.cat([alpha_seq_pred, beta_seq_pred], dim=1) # cat along sequence dim\n\t\treturn z, mu, logvar, rna_pred, tcr_pred\n\n\tdef reparameterize(self, mu, log_var):\n\t\t\"\"\"\n\t\thttps://debuggercafe.com/getting-started-with-variational-autoencoder-using-pytorch/\n\t\t:param mu: mean from the encoder's latent space\n\t\t:param log_var: log variance from the encoder's latent space\n\t\t\"\"\"\n\t\tstd = torch.exp(0.5 * log_var) # standard deviation\n\t\teps = torch.randn_like(std) # `randn_like` as we need the same size\n\t\tz = mu + (eps * std) # sampling as if coming from the input space\n\t\treturn z\n\n\tdef predict_transcriptome(self, z_shared, conditional=None):\n\t\t\"\"\"\n\t\tPredict the transcriptome connected to an shared latent space\n\t\t:param z_shared: torch.tensor, shared latent representation\n\t\t:param conditional:\n\t\t:return: torch.tensor, transcriptome profile\n\t\t\"\"\"\n\t\tif conditional is not None: # more efficient than doing two concatenations\n\t\t\tcond_emb_vec = self.cond_emb(conditional)\n\t\t\tz_shared = torch.cat([z_shared, cond_emb_vec], dim=-1) # shape=[batch_size, zdim+cond_dim]\n\n\t\tjoint_dec_feature = self.shared_decoder(z_shared)\n\t\tif self.scRNA_model_arch == 'None' or self.scRNA_model_arch is None:\n\t\t\traise ValueError('Trying to predict transcriptome with a model without rna')\n\t\telse:\n\t\t\ttranscriptome_pred = self.gene_decoder(joint_dec_feature) # shape=[batch_size, num_genes]\n\t\treturn transcriptome_pred\n\n\tdef get_latent_from_z(self, z):\n\t\treturn z\n\n\nclass SeparateModel(VAEBaseModel):\n\tdef __init__(self,\n\t\t\t\t adata,\n\t\t\t\t params_architecture,\n\t\t\t\t balanced_sampling='clonotype',\n\t\t\t\t metadata=None,\n\t\t\t\t conditional=None,\n\t\t\t\t optimization_mode_params=None,\n\t\t\t\t label_key=None,\n\t\t\t\t device=None\n\t\t\t\t ):\n\t\tsuper(SeparateModel, self).__init__(adata, params_architecture, balanced_sampling, metadata,\n\t\t\t\t\t\t\t\t\t\t\tconditional, optimization_mode_params, label_key, device)\n\t\tself.model_type = 'separate'\n\n\t\tself.params_tcr['max_tcr_length'] = adata.obsm['alpha_seq'].shape[1]\n\t\tself.params_tcr['num_seq_labels'] = len(self.aa_to_id)\n\n\t\tif self.params_rna is not None:\n\t\t\tself.params_rna['xdim'] = adata[0].X.shape[1]\n\n\t\tnum_conditional_labels = 0\n\t\tcond_dim = 0\n\t\tif self.conditional is not None:\n\t\t\tif self.conditional in adata.obsm:\n\t\t\t\tnum_conditional_labels = adata.obsm[self.conditional].shape[1]\n\t\t\telse:\n\t\t\t\tnum_conditional_labels = len(adata.obs[self.conditional].unique())\n\t\t\tif 'c_embedding_dim' not in self.params_joint:\n\t\t\t\tcond_dim = 20\n\t\t\telse:\n\t\t\t\tcond_dim = self.params_joint['c_embedding_dim']\n\t\tself.params_joint['num_conditional_labels'] = num_conditional_labels\n\t\tself.params_joint['cond_dim'] = cond_dim\n\t\tself.params_joint['cond_input'] = conditional is not None\n\n\t\tself.model = SeparateModelTorch(self.params_tcr, self.params_rna, self.params_joint)\n\n\tdef calculate_loss(self, rna_pred, rna, tcr_pred, tcr):\n\t\t# For GRU and Transformer, as they don't predict start token for alpha and beta chain, so -2\n\t\tif tcr_pred.shape[1] == tcr.shape[1] - 2:\n\t\t\tmask = torch.ones_like(tcr).bool()\n\t\t\tmask[:, [0, mask.shape[1] // 2]] = False\n\t\t\ttcr_loss = self.loss_weights[1] * self.loss_function_tcr(tcr_pred.flatten(end_dim=1), tcr[mask].flatten())\n\t\telse: # For CNN, as it predicts start token\n\t\t\ttcr_loss = self.loss_weights[1] * self.loss_function_tcr(tcr_pred.flatten(end_dim=1), tcr.flatten())\n\n\t\trna_loss = torch.FloatTensor([0]).to(self.device)\n\t\tif rna_pred is not None:\n\t\t\trna_loss = self.loss_weights[0] * self.loss_function_rna(rna_pred, rna)\n\t\treturn rna_loss, tcr_loss\n\n\tdef calculate_kld_loss(self, mu, logvar, epoch):\n\t\tkld_loss = self.loss_function_kld(mu, logvar)\n\t\tkld_loss *= self.loss_weights[2] * self.get_kl_annealing_factor(epoch)\n\t\tz = mu # make z deterministic by using the mean\n\t\treturn kld_loss, z\n" ]
[ [ "torch.ones_like", "torch.FloatTensor", "torch.randn_like", "torch.randn", "torch.nn.Embedding", "torch.exp", "torch.cat" ] ]
Qin-Ming/nilearn
[ "82f4075d8a8ea9aec25e66bd87ebb79a6be6d32f" ]
[ "nilearn/plotting/tests/test_html_document.py" ]
[ "import os\nimport time\nimport pytest\nimport tempfile\nimport webbrowser\nfrom nilearn.plotting import html_document\n\nfrom numpy.testing import assert_no_warnings\n\n# Note: html output by nilearn view_* functions\n# should validate as html5 using https://validator.w3.org/nu/ with no\n# warnings\n\n\ndef _open_mock(f):\n print('opened {}'.format(f))\n\n\ndef test_temp_file_removing():\n html = html_document.HTMLDocument('hello')\n wb_open = webbrowser.open\n webbrowser.open = _open_mock\n fd, tmpfile = tempfile.mkstemp()\n try:\n os.close(fd)\n with pytest.warns(None) as record:\n html.open_in_browser(file_name=tmpfile, temp_file_lifetime=None)\n for warning in record:\n assert \"Saved HTML in temporary file\" not in str(warning.message)\n html.open_in_browser(temp_file_lifetime=0.5)\n assert os.path.isfile(html._temp_file)\n time.sleep(1.5)\n assert not os.path.isfile(html._temp_file)\n with pytest.warns(UserWarning, match=\"Saved HTML in temporary file\"):\n html.open_in_browser(temp_file_lifetime=None)\n html.open_in_browser(temp_file_lifetime=None)\n assert os.path.isfile(html._temp_file)\n time.sleep(1.5)\n assert os.path.isfile(html._temp_file)\n finally:\n webbrowser.open = wb_open\n try:\n os.remove(html._temp_file)\n except Exception:\n pass\n try:\n os.remove(tmpfile)\n except Exception:\n pass\n\n\ndef _open_views():\n return [html_document.HTMLDocument('') for i in range(12)]\n\n\ndef _open_one_view():\n for i in range(12):\n v = html_document.HTMLDocument('')\n return v\n\n\ndef test_open_view_warning():\n # opening many views (without deleting the SurfaceView objects)\n # should raise a warning about memory usage\n pytest.warns(UserWarning, _open_views)\n assert_no_warnings(_open_one_view)\n html_document.set_max_img_views_before_warning(15)\n assert_no_warnings(_open_views)\n html_document.set_max_img_views_before_warning(-1)\n assert_no_warnings(_open_views)\n html_document.set_max_img_views_before_warning(None)\n assert_no_warnings(_open_views)\n html_document.set_max_img_views_before_warning(6)\n pytest.warns(UserWarning, _open_views)\n" ]
[ [ "numpy.testing.assert_no_warnings" ] ]
brechtvl/embree
[ "ae029e2ff83bebbbe8742c88aba5b0521aba1a23" ]
[ "scripts/generate_motion_derivative_coefficients.py" ]
[ "#!/usr/bin/python\n\n## Copyright 2009-2021 Intel Corporation\n## SPDX-License-Identifier: Apache-2.0\n\nimport sympy as sp\nimport numpy as np\nimport math\n\n\n################################################################################\n#### Utils\n################################################################################\n\ndef getTerms(map, key):\n if key in map.keys():\n return map[key]\n return 0\n\n# simple linear interpolation wrapper\ndef lerp(v0,v1,t):\n return v0*(1-t)+v1*t\n\n# custom quaternion to matrix conversion\ndef to_rotation_matrix(q):\n return sp.Matrix([[q.a*q.a + q.b*q.b - q.c*q.c - q.d*q.d, 2*(q.b*q.c - q.a*q.d), 2*(q.b*q.d + q.a*q.c), 0],\n [2*(q.b*q.c + q.a*q.d), q.a*q.a - q.b*q.b + q.c*q.c - q.d*q.d, 2*(q.c*q.d - q.a*q.b), 0],\n [2*(q.b*q.d - q.a*q.c), 2*(q.c*q.d + q.a*q.b), q.a*q.a - q.b*q.b - q.c*q.c + q.d*q.d, 0],\n [0, 0, 0, 1]])\n\n\n################################################################################\n#### Set up symbolic objects\n################################################################################\n\nt, theta = sp.symbols(\"t, theta\", real = True)\n\npx0, py0, pz0 = sp.symbols(\"px0, py0, pz0\", real=True) # vertex position at t=0\npx1, py1, pz1 = sp.symbols(\"px1, py1, pz1\", real=True) # vertex position at t=1\n\ntx0, ty0, tz0 = sp.symbols(\"tx0, ty0, tz0\", real=True) # translation at t=0\ntx1, ty1, tz1 = sp.symbols(\"tx1, ty1, tz1\", real=True) # translation at t=1\n\nqx0, qy0, qz0, qw0 = sp.symbols(\"qx0, qy0, qz0, qw0\", real=True) # quaternion at t=0 \nqx1, qy1, qz1, qw1 = sp.symbols(\"qx1, qy1, qz1, qw1\", real=True) # quaternion at t=1\n\n# coefficients for upper triangular matrices\ns000, s001, s002, s003, s011, s012, s013, s022, s023 = sp.symbols(\"s000, s001, s002, s003, s011, s012, s013, s022, s023\", real=True)\ns100, s101, s102, s103, s111, s112, s113, s122, s123 = sp.symbols(\"s100, s101, s102, s103, s111, s112, s113, s122, s123\", real=True)\n\nq0 = sp.Quaternion(qw0, qx0, qy0, qz0)\nq1 = sp.Quaternion(qw1, qx1, qy1, qz1)\n\n# assuming that q1 is qperp = normalize(q1-q0*cosTheta), where cosTheta=dot(q0, q1) and theta = acos(cosTheta).\n# this simplifies the terms of the symbolic expressions later\nqt = q0 * sp.cos(t*theta) + q1 * sp.sin(t*theta)\n\nS0 = sp.Matrix([[s000, s001, s002, s003],\n [ 0, s011, s012, s013],\n [ 0, 0, s022, s023],\n [ 0, 0, 0, 1]])\nS1 = sp.Matrix([[s100, s101, s102, s103],\n [ 0, s111, s112, s113],\n [ 0, 0, s122, s123],\n [ 0, 0, 0, 1]])\nD0 = sp.Matrix([[1, 0, 0, tx0],\n [0, 1, 0, ty0],\n [0, 0, 1, tz0],\n [0, 0, 0, 1]])\nD1 = sp.Matrix([[1, 0, 0, tx1],\n [0, 1, 0, ty1],\n [0, 0, 1, tz1],\n [0, 0, 0, 1]])\np0 = sp.Matrix([px0, py0, pz0, 1])\np1 = sp.Matrix([px1, py1, pz1, 1])\n\nGamma = lerp(D0, D1, t)*to_rotation_matrix(qt)*lerp(S0, S1, t)*lerp(p0, p1, t)\nC = sp.Matrix(np.empty(8)) # 8 coefficients\nK = sp.Matrix(np.empty(7)) # 7 inputs\nA = sp.Matrix(np.empty(8*7*3)) # 8 coefficients, 7 inputs (1, px0, py0, pz0, px1, py1, pz1), 3 dimensions (x, y, z)\ndGamma = sp.diff(Gamma, t)\n\n\n################################################################################\n#### Group the coefficients (this might time a couple of seconds)\n################################################################################\n\n# loop over dimensions (x, y, z)\nfor dim in range(3):\n dm = sp.expand(dGamma[dim])\n dm = dm.subs(sp.sin(t*theta)*sp.sin(t*theta),(1-sp.cos(2*t*theta))/2) # remove sin(t*theta)^2\n dm = dm.subs(sp.cos(t*theta)*sp.cos(t*theta),(1+sp.cos(2*t*theta))/2) # remove cos(t*theta)^2\n dm = dm.subs(sp.sin(t*theta)*sp.cos(t*theta),sp.sin(2*t*theta)/2) # remove sin(t*theta)*cos(t*theta)\n dm = sp.expand(dm)\n\n # group all terms in the form a + b * cos(2*t*theta) + c * sin(2*t*theta)\n dm_cos_sin = sp.collect(dm, (sp.cos(2*t*theta), sp.sin(2*t*theta)), evaluate=False)\n\n # get the terms\n coeff_cos = getTerms(dm_cos_sin, sp.cos(2*t*theta))\n coeff_sin = getTerms(dm_cos_sin, sp.sin(2*t*theta))\n coeff_const = getTerms(dm_cos_sin, 1)\n\n # group the term in the form a + b * t \n coeff_const_t = sp.collect(coeff_const, t, evaluate=False)\n C[0] = getTerms(coeff_const_t, 1)\n C[1] = getTerms(coeff_const_t, t)\n\n # group the term in the form a + b * t + c * t^2 \n coeff_cos_t = sp.collect(coeff_cos, t, evaluate=False)\n C[2] = getTerms(coeff_cos_t, 1)\n C[3] = getTerms(coeff_cos_t, t)\n C[4] = getTerms(coeff_cos_t, t*t)\n\n # group the term in the form a + b * t + c * t^2 \n coeff_sin_t = sp.collect(coeff_sin, t, evaluate=False)\n C[5] = getTerms(coeff_sin_t, 1)\n C[6] = getTerms(coeff_sin_t, t)\n C[7] = getTerms(coeff_sin_t, t*t)\n\n for c in range(8):\n kc = sp.collect(C[c], (px0, py0, pz0, px1, py1, pz1), evaluate=False)\n K[0] = getTerms(kc, 1)\n K[1] = getTerms(kc, px0)\n K[2] = getTerms(kc, py0)\n K[3] = getTerms(kc, pz0)\n K[4] = getTerms(kc, px1)\n K[5] = getTerms(kc, py1)\n K[6] = getTerms(kc, pz1)\n\n for k in range(7):\n K[k] = sp.expand(K[k])\n K[k] = K[k].subs(qw0*qw0, 1-qx0*qx0-qy0*qy0-qz0*qz0) # clean up substitutions\n K[k] = K[k].subs(qw1*qw1, 1-qx1*qx1-qy1*qy1-qz1*qz1) # clean up substitutions\n K[k] = sp.simplify(K[k])\n A[8*7*dim + c*7 + k] = K[k]\n\n\n################################################################################\n#### Write code to file\n################################################################################\n\nfrom sympy.utilities.codegen import codegen, default_datatypes\nfrom sympy.codegen.ast import real, float32\nfrom sympy.printing.ccode import C99CodePrinter\nprinter = C99CodePrinter()\n\n# custom code printer that will not generate such nonesene as x^2 -> pow(x, 2)\nclass CustomCodePrinter(C99CodePrinter):\n def _print_Pow(self, expr):\n if expr.exp.is_integer and expr.exp > 0 and expr.exp < 5:\n return '*'.join([self._print(expr.base) for i in range(expr.exp)])\n else:\n return super()._print_Pow(expr)\n\ncustomprinter = CustomCodePrinter()\ncustomprinter.type_aliases[real] = float32 # cosf instead of cos\ndefault_datatypes[\"float\"].cname = \"float\" # float instead of double\nparams = [\n theta,\n tx0, ty0, tz0,\n tx1, ty1, tz1,\n qw0, qx0, qy0, qz0,\n qw1, qx1, qy1, qz1,\n s000, s001, s002, s003, s011, s012, s013, s022, s023,\n s100, s101, s102, s103, s111, s112, s113, s122, s123]\nR = sp.MatrixSymbol(\"coeff\", A.shape[0], A.shape[1])\nP = sp.MatrixSymbol('p', len(params), 1)\nparam_map = dict(zip(params, P))\nB = A.xreplace(param_map)\ncodegen(('motion_derivative_coefficients', sp.Eq(R,B)), language='c', printer=customprinter, prefix='motion_derivative_coefficients', to_files=True)" ]
[ [ "numpy.empty" ] ]
Mingzheng01/pointnet
[ "401692e08441ff459b63786b9c65c11f78ea599e" ]
[ "sample_from_mesh.py" ]
[ "import open3d as o3d\nimport os\nimport numpy as np\nimport h5py\n\npoint_clouds = []\nfor dirpath, dirnames, filenames in os.walk(\"F:\\\\cases\\\\tooth_11_stls\"):\n for filename in filenames:\n print(os.path.splitext(filename)[-1])\n if os.path.splitext(filename)[-1] != \".stl\":\n continue\n\n full_filename = os.path.join(dirpath, filename)\n mesh = o3d.io.read_triangle_mesh(full_filename)\n mesh.remove_duplicated_vertices()\n mesh.compute_vertex_normals()\n print(mesh)\n pcd = mesh.sample_points_poisson_disk(1024)\n print(pcd)\n #o3d.visualization.draw_geometries([mesh, pcd], mesh_show_wireframe=True)\n #base_name = os.path.splitext(os.path.basename(filename))[0]\n #o3d.io.write_point_cloud(os.path.join(dirpath, base_name) + \".ply\", pcd)\n point_clouds.append(np.array(pcd.points))\n\nf = h5py.File(\"F:\\\\cases\\\\tooth_11_stls\\\\point_clouds.hdf5\", mode='w')\nf[\"point_clouds\"] = point_clouds\nf.close()\n\n" ]
[ [ "numpy.array" ] ]
rkansal47/weaver
[ "7e9d3d8c9ee43acb2a95f2d3f76c384822e04699" ]
[ "utils/data/tools.py" ]
[ "import numpy as np\nimport math\n\ntry:\n import awkward0 as awkward\nexcept ImportError:\n import awkward\n if awkward.__version__[0] == '1':\n raise ImportError('Please install awkward0 with `pip install awkward0`.')\n\n\ndef _concat(arrays, axis=0):\n if len(arrays) == 0:\n return np.array([])\n if isinstance(arrays[0], np.ndarray):\n return np.concatenate(arrays, axis=axis)\n else:\n return awkward.concatenate(arrays, axis=axis)\n\n\ndef _stack(arrays, axis=1):\n if len(arrays) == 0:\n return np.array([])\n if isinstance(arrays[0], np.ndarray):\n return np.stack(arrays, axis=axis)\n else:\n content = np.stack([a.content for a in arrays], axis=axis)\n return awkward.JaggedArray.fromcounts(arrays[0].counts, content)\n\n\ndef _pad(a, maxlen, value=0, dtype='float32'):\n if isinstance(a, np.ndarray) and a.ndim >= 2 and a.shape[1] == maxlen:\n return a\n elif isinstance(a, awkward.JaggedArray):\n return a.pad(maxlen, clip=True).fillna(value).regular().astype(dtype)\n else:\n x = (np.ones((len(a), maxlen)) * value).astype(dtype)\n for idx, s in enumerate(a):\n if not len(s):\n continue\n trunc = s[:maxlen].astype(dtype)\n x[idx, :len(trunc)] = trunc\n return x\n\n\ndef _repeat_pad(a, maxlen, shuffle=False, dtype='float32'):\n x = a.flatten()\n x = np.tile(x, int(np.ceil(len(a) * maxlen / len(x))))\n if shuffle:\n np.random.shuffle(x)\n x = x[:len(a) * maxlen].reshape((len(a), maxlen))\n mask = _pad(awkward.JaggedArray.zeros_like(a), maxlen, value=1)\n x = _pad(a, maxlen) + mask * x\n return x.astype(dtype)\n\n\ndef _clip(a, a_min, a_max):\n if isinstance(a, np.ndarray):\n return np.clip(a, a_min, a_max)\n else:\n return awkward.JaggedArray.fromcounts(a.counts, np.clip(a.content, a_min, a_max))\n\n\ndef _knn(support, query, k, n_jobs=1):\n from scipy.spatial import cKDTree\n kdtree = cKDTree(support)\n d, idx = kdtree.query(query, k, n_jobs=n_jobs)\n return idx\n\n\ndef _batch_knn(supports, queries, k, maxlen_s, maxlen_q=None, n_jobs=1):\n assert (len(supports) == len(queries))\n if maxlen_q is None:\n maxlen_q = maxlen_s\n batch_knn_idx = np.ones((len(supports), maxlen_q, k), dtype='int32') * (maxlen_s - 1)\n for i, (s, q) in enumerate(zip(supports, queries)):\n batch_knn_idx[i, :len(q[:maxlen_q]), :] = _knn(\n s[:maxlen_s], q[:maxlen_q], k, n_jobs=n_jobs).reshape((-1, k)) # (len(q), k)\n return batch_knn_idx\n\n\ndef _batch_permute_indices(array, maxlen):\n batch_permute_idx = np.tile(np.arange(maxlen), (len(array), 1))\n for i, a in enumerate(array):\n batch_permute_idx[i, :len(a)] = np.random.permutation(len(a[:maxlen]))\n return batch_permute_idx\n\n\ndef _batch_argsort(array, maxlen):\n batch_argsort_idx = np.tile(np.arange(maxlen), (len(array), 1))\n for i, a in enumerate(array):\n batch_argsort_idx[i, :len(a)] = np.argsort(a[:maxlen])\n return batch_argsort_idx\n\n\ndef _batch_gather(array, indices):\n out = array.zeros_like()\n for i, (a, idx) in enumerate(zip(array, indices)):\n maxlen = min(len(a), len(idx))\n out[i][:maxlen] = a[idx[:maxlen]]\n return out\n\n\ndef _get_variable_names(expr, exclude=['awkward', 'np', 'numpy', 'math']):\n import ast\n root = ast.parse(expr)\n return sorted({node.id for node in ast.walk(root) if isinstance(\n node, ast.Name) and not node.id.startswith('_')} - set(exclude))\n\n\ndef _eval_expr(expr, table):\n tmp = {k: table[k] for k in _get_variable_names(expr)}\n tmp.update(\n {'math': math, 'np': np, 'awkward': awkward, '_concat': _concat, '_stack': _stack, '_pad': _pad,\n '_repeat_pad': _repeat_pad, '_clip': _clip, '_batch_knn': _batch_knn,\n '_batch_permute_indices': _batch_permute_indices, '_batch_argsort': _batch_argsort,\n '_batch_gather': _batch_gather})\n return eval(expr, tmp)\n" ]
[ [ "numpy.random.shuffle", "numpy.stack", "numpy.argsort", "numpy.arange", "scipy.spatial.cKDTree", "numpy.clip", "numpy.array", "numpy.concatenate" ] ]
VXallset/deep-high-resolution-net.TensorFlow
[ "d885abc6f8699f5dfd09b270170f3c68fbf32ac2" ]
[ "src/utils.py" ]
[ "\"\"\"\nThis is the utils for deep learning, implemented with TensorFlow.\n\n@ Author: Yu Sun. [email protected]\n\n@ Date created: Jun 04, 2019\n\n@ Last modified: Jun 06, 2019\n\n\"\"\"\nimport tensorflow as tf\n\n\ndef leaky_Relu(input, name=''):\n return tf.nn.leaky_relu(input, alpha=0.1, name=name + '_relu')\n\n\ndef conv_2d(inputs, channels, kernel_size=3, strides=1, batch_normalization=True, activation=None,\n name='', padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), is_training=True):\n\n output = tf.layers.conv2d(inputs=inputs, filters=channels, kernel_size=kernel_size, strides=strides,\n padding=padding, name=name + '_conv', kernel_initializer=kernel_initializer)\n name = name + '_conv'\n\n if batch_normalization:\n output = tf.layers.batch_normalization(output, axis=-1, momentum=0.9, name=name+'_bn', training=is_training)\n name = name + '_bn'\n\n if activation:\n output = activation(output, name=name)\n\n return output\n\n\ndef down_sampling(input, method='strided_convolution', rate=2, name='', activation=leaky_Relu, is_training=True):\n assert method == 'max_pooling' or method == 'strided_convolution', \\\n 'Unknown type of down_sample method! \"strided_convolution\" and \"' \\\n 'max_pooling\" are expected, but \"' + method + '\" is provided!'\n output = input\n\n if method == 'strided_convolution':\n _, _, _, channels = input.get_shape()\n channels = channels.value\n output = input\n loop_index = 1\n new_rate = rate\n while new_rate > 1:\n assert new_rate % 2 == 0, 'The rate of down_sampling (using \"strided_convolution\") must be the power of ' \\\n '2, but \"{}\" is provided!'.format(rate)\n output = conv_2d(output, channels=channels * (2 ** loop_index), strides=2, activation=activation,\n name=name + 'down_sampling' + '_x' + str(loop_index * 2), is_training=is_training)\n loop_index += 1\n new_rate = int(new_rate / 2)\n\n elif method == 'max_pooling':\n output = tf.layers.max_pooling2d(input, pool_size=rate, strides=rate, name=name+'_max_pooling')\n\n return output\n\n\ndef up_sampling(input, channels, method='nearest_neighbor', rate=2, name='', activation=leaky_Relu, is_training=True):\n assert method == 'nearest_neighbor', 'Only \"nearest_neighbor\" method is supported now! ' \\\n 'However, \"' + method + '\" is provided.'\n output = input\n if method == 'nearest_neighbor':\n _, x, y, _= input.get_shape()\n x = x.value\n y = y.value\n\n output = tf.image.resize_nearest_neighbor(input, size=(x*rate, y*rate), name=name + '_upsampling')\n name += '_upsampling'\n output = conv_2d(output, channels=channels, kernel_size=1, activation=activation,\n name=name + '_align_channels', is_training=is_training)\n\n return output\n\n\n# Repeated multi-scale fusion (namely the exchange block) within a stage (the input and the output has the same number\n# of sub-networks)\ndef exchange_within_stage(inputs, name='exchange_within_stage', is_training=True):\n with tf.variable_scope(name):\n subnetworks_number = len(inputs)\n outputs = []\n\n # suppose i is the index of the input sub-network, o is the index of the output sub-network\n for o in range(subnetworks_number):\n one_subnetwork = 0\n for i in range(subnetworks_number):\n if i == o:\n # if in the same resolution\n temp_subnetwork = inputs[i]\n elif i - o < 0:\n # if the input resolution is greater the output resolution, down-sampling with rate\n # of 2 ** (o - i)\n temp_subnetwork = down_sampling(inputs[i], rate=2 ** (o - i), name='i_{}_o_{}'.format(i, o),\n is_training=is_training)\n else:\n # if the input resolution is smaller the output resolution, up-sampling with rate of\n # 2 ** (o - i)\n _, _, _, c = inputs[o].get_shape()\n temp_subnetwork = up_sampling(inputs[i], channels=c, rate=2 ** (i - o),\n name='i_{}_o_{}'.format(i, o), is_training=is_training)\n one_subnetwork = tf.add(temp_subnetwork, one_subnetwork, name='add_i_{}_o_{}'.format(i, o))\n outputs.append(one_subnetwork)\n return outputs\n\n\n# Repeated multi-scale fusion (namely the exchange block) between two stages (the input and the output has the same\n# number of sub-networks)\ndef exchange_between_stage(inputs, name='exchange_between_stage', is_training=True):\n subnetworks_number = len(inputs)\n outputs = []\n\n # suppose i is the index of the input sub-network, o is the index of the output sub-network\n for o in range(subnetworks_number):\n one_subnetwork = 0\n for i in range(subnetworks_number):\n if i == o:\n # if in the same resolution\n temp_subnetwork = inputs[i]\n elif i - o < 0:\n # if the input resolution is greater the output resolution, down-sampling with rate\n # of 2 ** (o - i)\n temp_subnetwork = down_sampling(inputs[i], rate=2 ** (o - i), name='i_{}_o_{}'.format(i, o),\n is_training=is_training)\n else:\n # if the input resolution is smaller the output resolution, up-sampling with rate of\n # 2 ** (o - i)\n _, _, _, c = inputs[o].get_shape()\n temp_subnetwork = up_sampling(inputs[i], channels=c, rate=2 ** (i - o),\n name='i_{}_o_{}'.format(i, o), is_training=is_training)\n one_subnetwork = tf.add(temp_subnetwork, one_subnetwork, name='add_i_{}_o_{}'.format(i, o))\n outputs.append(one_subnetwork)\n one_subnetwork = down_sampling(inputs[-1], rate=2, name='new_resolution', is_training=is_training)\n outputs.append(one_subnetwork)\n return outputs\n\n\ndef residual_unit_bottleneck(input, name='RU_bottleneck', channels=64, is_training=True):\n \"\"\"\n Residual unit with bottleneck design, default width is 64.\n :param input:\n :param name:\n :return:\n \"\"\"\n _, _, _, c = input.get_shape()\n conv_1x1_1 = conv_2d(input, channels=channels, kernel_size=1, activation=leaky_Relu, name=name + '_conv1x1_1',\n is_training = is_training)\n conv_3x3 = conv_2d(conv_1x1_1, channels=channels, activation=leaky_Relu, name=name + '_conv3x3',\n is_training=is_training)\n conv_1x1_2 = conv_2d(conv_3x3, channels=c, kernel_size=1, name=name + '_conv1x1_2', is_training=is_training)\n _output = tf.add(input, conv_1x1_2, name=name + '_add')\n output = leaky_Relu(_output, name=name + '_out')\n return output\n\n\ndef residual_unit(input, name='RU', is_training=True):\n \"\"\"\n Residual unit with two 3 x 3 convolution layers.\n :param input:\n :param name:\n :return:\n \"\"\"\n _, _, _, channels = input.get_shape()\n conv3x3_1 = conv_2d(inputs=input, channels=channels, activation=leaky_Relu, name=name + '_conv3x3_1',\n is_training=is_training)\n conv3x3_2 = conv_2d(inputs=conv3x3_1, channels=channels, name=name + '_conv3x3_2', is_training=is_training)\n _output = tf.add(input, conv3x3_2, name=name + '_add')\n output = leaky_Relu(_output, name=name + '_out')\n return output\n\n\ndef exchange_block(inputs, name='exchange_block', is_training=True):\n with tf.variable_scope(name):\n output = []\n level = 0\n for input in inputs:\n sub_network = residual_unit(input, name='level{}RU1'.format(level), is_training=is_training)\n sub_network = residual_unit(sub_network, name='level{}RU2'.format(level), is_training=is_training)\n sub_network = residual_unit(sub_network, name='level{}RU3'.format(level), is_training=is_training)\n sub_network = residual_unit(sub_network, name='level{}RU4'.format(level), is_training=is_training)\n output.append(sub_network)\n level += 1\n outputs = exchange_within_stage(output, is_training=is_training)\n return outputs" ]
[ [ "tensorflow.layers.conv2d", "tensorflow.layers.batch_normalization", "tensorflow.add", "tensorflow.variable_scope", "tensorflow.nn.leaky_relu", "tensorflow.random_normal_initializer", "tensorflow.layers.max_pooling2d", "tensorflow.image.resize_nearest_neighbor" ] ]
cgyqu/python_learning
[ "55c8df4a963c40ace050d3454b72538190cb0517" ]
[ "ml/time_series02.py" ]
[ "#%%\nfrom numpy import array\nfrom numpy import hstack\nfrom keras.models import Sequential\nfrom keras.layers import LSTM\nfrom keras.layers import Dense\nfrom keras.layers import RepeatVector\nfrom keras.layers import TimeDistributed\n#%%\n# split a multivariate sequence into samples\ndef split_sequences(sequences, n_steps_in, n_steps_out):\n\tX, y = list(), list()\n\tfor i in range(len(sequences)):\n\t\t# find the end of this pattern\n\t\tend_ix = i + n_steps_in\n\t\tout_end_ix = end_ix + n_steps_out\n\t\t# check if we are beyond the dataset\n\t\tif out_end_ix > len(sequences):\n\t\t\tbreak\n\t\t# gather input and output parts of the pattern\n\t\tseq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :]\n\t\tX.append(seq_x)\n\t\ty.append(seq_y)\n\treturn array(X), array(y)\n#%%\n# define input sequence\nin_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90])\nin_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95])\nout_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))])\n# convert to [rows, columns] structure\nin_seq1 = in_seq1.reshape((len(in_seq1), 1))\nin_seq2 = in_seq2.reshape((len(in_seq2), 1))\nout_seq = out_seq.reshape((len(out_seq), 1))\n# horizontally stack columns\ndataset = hstack((in_seq1, in_seq2, out_seq))\ndataset\n#%%\n# choose a number of time steps\nn_steps_in, n_steps_out = 3, 2\n# covert into input/output\nX, y = split_sequences(dataset, n_steps_in, n_steps_out)\nX,y\n#%%\n# the dataset knows the number of features, e.g. 2\nn_features = X.shape[2]\n# define model\nmodel = Sequential()\nmodel.add(LSTM(200, activation='relu', input_shape=(n_steps_in, n_features)))\nmodel.add(RepeatVector(n_steps_out))\nmodel.add(LSTM(200, activation='relu', return_sequences=True))\nmodel.add(TimeDistributed(Dense(n_features))) \n'''\n这里输入3D为(样本量,return_sequences数量(=n_steps_out),200)\n输出为(样本量,return_sequences数量(=n_steps_out),n_features)\n就是每个输出是(3,2)维度的\n'''\nmodel.compile(optimizer='adam', loss='mse')\n# fit model\nmodel.fit(X, y, epochs=300, verbose=0,batch_size=2)\n# demonstrate prediction\nx_input = array([[60, 65, 125], [70, 75, 145], [80, 85, 165]])\nx_input = x_input.reshape((1, n_steps_in, n_features))\nyhat = model.predict(x_input, verbose=0)\nprint(yhat)\n\n# %%\n" ]
[ [ "numpy.array", "numpy.hstack" ] ]
aacsspkt/autodealerappliation
[ "c7ab3ae8e57e91c797129e87a13bd00d41bc4753" ]
[ "app/customer/views.py" ]
[ "from django.views.generic import ListView, DetailView, FormView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.urls.base import reverse\n\nfrom .models import Customer\nfrom .forms import CustomerForm, CustomerImportForm\n\n\ndef get_filtered_queryset(searchkey, searchvalue):\n return {\n \"citizenship_no\": Customer.objects.filter(citizenship_no__contains=searchvalue),\n \"pan_no\": Customer.objects.filter(pan_no__contains=searchvalue),\n \"fullname\": Customer.objects.filter(fullname__contains=searchvalue),\n \"dob\": Customer.objects.filter(dob__contains=searchvalue),\n \"gender\": Customer.objects.filter(gender__contains=searchvalue),\n \"email\": Customer.objects.filter(email__contains=searchvalue),\n \"phone\": Customer.objects.filter(phone__contains=searchvalue),\n \"occupation\": Customer.objects.filter(occupation__contains=searchvalue),\n \"city\": Customer.objects.filter(city__contains=searchvalue),\n \"district\": Customer.objects.filter(district__contains=searchvalue),\n \"state\": Customer.objects.filter(state__contains=searchvalue),\n \"country\": Customer.objects.filter(country__contains=searchvalue),\n \"state\": Customer.objects.filter(state__contains=searchvalue),\n }.get(searchkey, Customer.objects.all())\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerListView(ListView):\n model = Customer\n template_name = \"customer/index.html\"\n paginate_by = 12\n\n def get_context_data(self, **kwargs):\n context = super(CustomerListView, self).get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"fields\"] = Customer._meta.get_fields(include_parents=False)\n return context\n\n def get_queryset(self):\n searchkey = self.request.GET.get(\"searchkey\", None)\n searchvalue = self.request.GET.get(\"searchvalue\", None)\n if searchkey != None:\n return get_filtered_queryset(searchkey, searchvalue)\n else:\n return Customer.objects.all()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerDetailView(DetailView):\n model = Customer\n template_name = \"customer/detail.html\"\n pk_url_kwarg = \"id\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"detail\"\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerCreateView(SuccessMessageMixin, CreateView):\n model = Customer\n template_name = \"customer/create.html\"\n form_class = CustomerForm\n\n def get_success_message(self, cleaned_data):\n return \"Customer successfully created\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"create\"\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerUpdateView(SuccessMessageMixin, UpdateView):\n model = Customer\n template_name = \"customer/edit.html\"\n form_class = CustomerForm\n pk_url_kwarg = \"id\"\n\n def get_success_message(self, cleaned_data):\n return \"Customer successfully updated\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"edit\"\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerDeleteView(SuccessMessageMixin, DeleteView):\n model = Customer\n template_name = \"customer/delete.html\"\n pk_url_kwarg = \"id\"\n success_url = \"customer:customer-index\"\n\n def get_success_message(self, cleaned_data):\n return \"Customer successfully deleted\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"delete\"\n return context\n\n\nimport pandas as pd\nfrom pprint import pprint\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass CustomerImportView(SuccessMessageMixin, FormView):\n form_class = CustomerImportForm\n template_name = \"customer/import.html\"\n success_url = \"/customer/import/\"\n success_message = \"Customer successfully imported\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"segment\"] = \"customer\"\n context[\"child_segment\"] = \"import\"\n return context\n\n def form_valid(self, form):\n csvfile = form.cleaned_data.get(\"csvfile\", None)\n if csvfile is not None:\n df = pd.read_csv(csvfile)\n df_dict = df.to_dict(orient=\"index\")\n try:\n customer_list = [\n Customer(\n citizenship_no=v[\"citizenship number\"],\n pan_no=v[\"pan number\"],\n fullname=v[\"full name\"],\n dob=v[\"date of birth\"],\n gender=v[\"gender\"],\n email=v[\"email\"],\n phone=v[\"phone\"],\n occupation=v[\"occupation\"],\n city=v[\"city\"],\n district=v[\"district\"],\n state=v[\"state\"],\n country=v[\"country\"],\n address=v[\"address\"],\n )\n for i, v in df_dict.items()\n ]\n except KeyError:\n form.add_error(\n None,\n \"\"\"Column name in file doesn't match! \n Columns: \n 'citizenship number', 'pan number', 'full name', 'date of birth', 'gender', 'email',\n 'phone', 'occupation', 'city', 'district', 'state', 'country', 'address'.\"\"\",\n )\n return self.render_to_response(self.get_context_data(form=form))\n\n objs = Customer.objects.bulk_create(customer_list)\n pprint(objs)\n\n return super().form_valid(form)\n" ]
[ [ "pandas.read_csv" ] ]
Ascend-Huawei/AVOD
[ "ea62372517bbfa9d4020bc5ab2739ee182c63c56" ]
[ "avod/datasets/kitti/kitti_aug.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom npu_bridge.npu_init import *\nimport copy\n\nimport numpy as np\n\nAUG_FLIPPING = 'flipping'\nAUG_PCA_JITTER = 'pca_jitter'\n\n\ndef flip_image(image):\n \"\"\"Flips an image horizontally\n \"\"\"\n flipped_image = np.fliplr(image)\n return flipped_image\n\n\ndef flip_points(points):\n \"\"\"Flips a list of points (N, 3)\n \"\"\"\n flipped_points = np.copy(points)\n flipped_points[:, 0] = -points[:, 0]\n return flipped_points\n\n\ndef flip_point_cloud(point_cloud):\n \"\"\"Flips a point cloud (3, N)\n \"\"\"\n flipped_point_cloud = np.copy(point_cloud)\n flipped_point_cloud[0] = -point_cloud[0]\n return flipped_point_cloud\n\n\ndef flip_label_in_3d_only(obj_label):\n \"\"\"Flips only the 3D position of an object label. The 2D bounding box is\n not flipped to save time since it is not used.\n\n Args:\n obj_label: ObjectLabel\n\n Returns:\n A flipped object\n \"\"\"\n\n flipped_label = copy.deepcopy(obj_label)\n\n # Flip the rotation\n if obj_label.ry >= 0:\n flipped_label.ry = np.pi - obj_label.ry\n else:\n flipped_label.ry = -np.pi - obj_label.ry\n\n # Flip the t.x sign, t.y and t.z remains the unchanged\n flipped_t = (-flipped_label.t[0], flipped_label.t[1], flipped_label.t[2])\n flipped_label.t = flipped_t\n\n return flipped_label\n\n\ndef flip_boxes_3d(boxes_3d, flip_ry=True):\n \"\"\"Flips boxes_3d\n\n Args:\n boxes_3d: List of boxes in box_3d format\n flip_ry bool: (optional) if False, rotation is not flipped to save on\n computation (useful for flipping anchors)\n\n Returns:\n flipped_boxes_3d: Flipped boxes in box_3d format\n \"\"\"\n\n flipped_boxes_3d = np.copy(boxes_3d)\n\n if flip_ry:\n # Flip the rotation\n above_zero = boxes_3d[:, 6] >= 0\n below_zero = np.logical_not(above_zero)\n flipped_boxes_3d[above_zero, 6] = np.pi - boxes_3d[above_zero, 6]\n flipped_boxes_3d[below_zero, 6] = -np.pi - boxes_3d[below_zero, 6]\n\n # Flip the t.x sign, t.y and t.z remains the unchanged\n flipped_boxes_3d[:, 0] = -boxes_3d[:, 0]\n\n return flipped_boxes_3d\n\n\ndef flip_ground_plane(ground_plane):\n \"\"\"Flips the ground plane by negating the x coefficient\n (ax + by + cz + d = 0)\n\n Args:\n ground_plane: ground plane coefficients\n\n Returns:\n Flipped ground plane coefficients\n \"\"\"\n flipped_ground_plane = np.copy(ground_plane)\n flipped_ground_plane[0] = -ground_plane[0]\n return flipped_ground_plane\n\n\ndef flip_stereo_calib_p2(calib_p2, image_shape):\n \"\"\"Flips the stereo calibration matrix to correct the projection back to\n image space. Flipping the image can be seen as a movement of both the\n camera plane, and the camera itself. To account for this, the instrinsic\n matrix x0 value is flipped with respect to the image width, and the\n extrinsic matrix t1 value is negated.\n\n Args:\n calib_p2: 3 x 4 stereo camera calibration matrix\n image_shape: (h, w) image shape\n\n Returns:\n 'Flipped' calibration p2 matrix with shape (3, 4)\n \"\"\"\n flipped_p2 = np.copy(calib_p2)\n flipped_p2[0, 2] = image_shape[1] - calib_p2[0, 2]\n flipped_p2[0, 3] = -calib_p2[0, 3]\n\n return flipped_p2\n\n\ndef compute_pca(image_set):\n \"\"\"Calculates and returns PCA of a set of images\n\n Args:\n image_set: List of images read with cv2.imread in np.uint8 format\n\n Returns:\n PCA for the set of images\n \"\"\"\n\n # Check for valid input\n assert(image_set[0].dtype == np.uint8)\n\n # Reshape data into single array\n reshaped_data = np.concatenate([image\n for pixels in image_set for image in\n pixels])\n\n # Convert to float and normalize the data between [0, 1]\n reshaped_data = (reshaped_data / 255.0).astype(np.float32)\n\n # Calculate covariance, eigenvalues, and eigenvectors\n # np.cov calculates covariance around the mean, so no need to shift the\n # data\n covariance = np.cov(reshaped_data.T)\n e_vals, e_vecs = np.linalg.eigh(covariance)\n\n # svd can also be used instead\n # U, S, V = np.linalg.svd(mean_data)\n\n pca = np.sqrt(e_vals) * e_vecs\n\n return pca\n\n\ndef add_pca_jitter(img_data, pca):\n \"\"\"Adds a multiple of the principle components,\n with magnitude from a Gaussian distribution with mean 0 and stdev 0.1\n\n\n Args:\n img_data: Original image in read with cv2.imread in np.uint8 format\n pca: PCA calculated with compute_PCA for the image set\n\n Returns:\n Image with added noise\n \"\"\"\n\n # Check for valid input\n assert (img_data.dtype == np.uint8)\n\n # Make a copy of the image data\n new_img_data = np.copy(img_data).astype(np.float32) / 255.0\n\n # Calculate noise by multiplying pca with magnitude,\n # then sum horizontally since eigenvectors are in columns\n magnitude = np.random.randn(3) * 0.1\n noise = (pca * magnitude).sum(axis=1)\n\n # Add the noise to the image, and clip to valid range [0, 1]\n new_img_data = new_img_data + noise\n np.clip(new_img_data, 0.0, 1.0, out=new_img_data)\n\n # Change back to np.uint8\n new_img_data = (new_img_data * 255).astype(np.uint8)\n\n return new_img_data\n\n\ndef apply_pca_jitter(image_in):\n \"\"\"Applies PCA jitter or random noise to a single image\n\n Args:\n image_in: Image to modify\n\n Returns:\n Modified image\n \"\"\"\n image_in = np.asarray([image_in], dtype=np.uint8)\n\n pca = compute_pca(image_in)\n image_out = add_pca_jitter(image_in, pca)\n\n return image_out\n\n" ]
[ [ "numpy.fliplr", "numpy.linalg.eigh", "numpy.asarray", "numpy.copy", "numpy.random.randn", "numpy.logical_not", "numpy.clip", "numpy.sqrt", "numpy.concatenate", "numpy.cov" ] ]
streamlit-badge-bot/a3-thanos
[ "3c673ce6a1321a9229a34a09e4e5f29313825e4f" ]
[ "streamlit_app.py" ]
[ "import streamlit as st\nimport pandas as pd\nimport altair as alt\nfrom vega_datasets import data\n\nst.title(\"Let's analyze some CO2 emission data &#x1f30e\")\nMAX_WIDTH = 1000\[email protected] # add caching so we load the data only once\ndef load_data():\n climate_url = \"https://raw.githubusercontent.com/ZeningQu/World-Bank-Data-by-Indicators/master/climate-change/climate-change.csv\"\n country_url = \"https://raw.githubusercontent.com/ZeningQu/World-Bank-Data-by-Indicators/master/climate-change/Metadata_Country_API_19_DS2_en_csv_v2_10137883.csv\"\n climate_df = pd.read_csv(climate_url)\n climate_df.set_index(\"Country Code\")\n country_df = pd.read_csv(country_url)\n country_df.set_index(\"Country Code\")\n climate = climate_df.merge(country_df, on='Country Code', how='inner')\n\n country_loc = \"https://gist.githubusercontent.com/tadast/8827699/raw/3cd639fa34eec5067080a61c69e3ae25e3076abb/countries_codes_and_coordinates.csv\"\n country_loc_df = pd.read_csv(country_loc)\n country_loc_df.replace('\"', '', regex=True, inplace=True)\n climate = climate.merge(country_loc_df, left_on=\"Country Name\", right_on=\"Country\", how=\"inner\")\n return climate\n\n\ndf = load_data()\ncountry_map_df = pd.read_table(\n \"https://raw.githubusercontent.com/KoGor/Map-Icons-Generator/master/data/world-110m-country-names.tsv\")\ncountries = alt.topo_feature(data.world_110m.url, 'countries')\n\nalt.data_transformers.disable_max_rows()\n\n\ndef preprocess_data(df):\n df = df.rename(columns={\"CO2 emissions from gaseous fuel consumption (% of total)\": \"gaseous fuel % of total\",\n \"CO2 emissions from liquid fuel consumption (% of total)\": \"liquid fuel % of total\",\n \"CO2 emissions from solid fuel consumption (% of total)\": \"solid fuel % of total\",\n \"CO2 emissions (metric tons per capita)\": \"CO2 emissions per capita\",\n \"CO2 emissions (kg per PPP $ of GDP)\" : \"CO2 emissions per GDP\",\n \"Renewable electricity output (% of total electricity output)\":\"Renewable electricity output % of total\",\n \"Terrestrial protected areas (% of total land area)\": \"Terrestrial protected areas % of total\"})\n df = df[df['Year'] <= 2011]\n df = df[df['Year'] > 1990]\n df = df[df['CO2 emissions (kt)'] > 0]\n # df = df[df[\"Population growth (annual %)\"] >= 0]\n return df\n\n\ndef world_map(highlight, highlight2):\n slider = alt.binding_range(min=1991, max=2011, step=1)\n select_year = alt.selection_single(name=\"Year\", fields=['Year'],\n bind=slider, init={'Year': 2011})\n\n map = alt.Chart(df).mark_geoshape(\n stroke='#aaa', strokeWidth=0.25\n ).encode(\n color=alt.condition(highlight2 | highlight, 'CO2 emissions (kt):Q', alt.value('lightgrey'), scale=alt.Scale(scheme='redyellowblue', reverse=True)),\n tooltip=[\"Country Name\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\"]\n ).transform_lookup(\n lookup='Country Name',\n from_=alt.LookupData(\n \"https://raw.githubusercontent.com/KoGor/Map-Icons-Generator/master/data/world-110m-country-names.tsv\",\n 'name', ['id', \"name\"])\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(countries, 'id', fields=[\"id\", \"type\", \"properties\", \"geometry\"])\n ).project(\n type=\"equirectangular\"\n ).properties(\n width=1100,\n height=650,\n title='worldwide CO2 total emissions and emissions per capita'\n ).add_selection(highlight, highlight2)\n\n percapita = alt.Chart(df).mark_circle(\n opacity=0.4 ,\n ).encode(\n size=alt.Size('CO2 emissions per capita:Q', scale=alt.Scale(range=[10, 3000])),\n color=alt.condition(highlight2 | highlight, alt.value('red'), alt.value('lightgrey')),\n longitude='Longitude (average):Q',\n latitude='Latitude (average):Q',\n tooltip = [\"Country Name\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\"]\n ).transform_lookup(\n lookup='Country Name',\n from_=alt.LookupData(\n \"https://raw.githubusercontent.com/KoGor/Map-Icons-Generator/master/data/world-110m-country-names.tsv\",\n 'name', ['id', \"name\"])\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(countries, 'id', fields=[\"id\", \"type\", \"properties\", \"geometry\"])\n ).project(\n type=\"equirectangular\"\n ).properties(\n width=900,\n height=400,\n )\n return alt.layer(map, percapita) \\\n .add_selection(select_year) \\\n .transform_filter(select_year)\n\n\ndef next_block():\n st.write(\"\\n\")\n st.write(\"\\n\")\n st.write(\"\\n\")\n st.write(\"\\n\")\n st.write(\"\\n\")\n st.write(\"\\n\")\n\n\ndef process_data(df,dataset):\n df2 = df\n\n df2 = df2[df2[\"Country Name\"].isin(dataset)]\n\n\n return df2.melt(id_vars=[\"Country Name\", \"Year\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\"],\n value_vars=[\"solid fuel % of total\", \"liquid fuel % of total\", \"gaseous fuel % of total\"],\n var_name=\"type\",\n value_name=\"CO2 emissions from different consumptions (%)\")\n\n\ndef scatter_plot(df,picked_interval, single_select):\n point = alt.Chart(df).mark_circle().encode(\n x=alt.X('Year:O', title=\"Year\"),\n y=alt.Y('CO2 emissions (kt)', title='Total CO2 emissions (kt)', scale=alt.Scale(zero=False, padding=1)),\n # color = alt.Color('Country Name:N',scale=alt.Scale(domain=dataset,type='ordinal'))\n # color = alt.Color('CO2 emissions (kt):Q')\n color=alt.condition(picked_interval|single_select, \"CO2 emissions (kt):Q\", alt.value(\"lightgray\"),\n scale=alt.Scale(scheme='redyellowblue', reverse=True), title=\"Total CO2 emissions (kt)\")\n , size=alt.Size('CO2 emissions per capita:Q',\n scale=alt.Scale(range=[300, 1000])),\n tooltip=[\"Country Name\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\", \"Year\"]\n ).add_selection(\n single_select\n )\n\n line = alt.Chart(df).mark_line(\n # strokeWidth=0.7\n ).encode(\n x=alt.X('Year:N', title=\"Year\"),\n y=alt.Y('CO2 emissions (kt):Q', title='Total CO2 emissions (kt)'),\n color = alt.condition(picked_interval | single_select, \"Country Name\", alt.value(\"lightgray\"), legend=None),\n # color = alt.Color('CO2 emissions (kt):Q')\n # color=alt.condition(picked_interval, \"CO2 emissions (kt):Q\", alt.value(\"lightgray\"),\n # scale=alt.Scale(scheme='redyellowblue', reverse=True), title=\"Total CO2 emissions (kt)\")\n size=alt.condition(~(picked_interval | single_select), alt.value(1), alt.value(3)),\n tooltip=[\"Country Name\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\", \"Year\"]\n ).properties(\n width=650,\n height=500,\n title='CO2 total emission and emission per capita overtime'\n )\n\n labels = alt.Chart(df).mark_text(align='center', dx=-20, dy=-25).encode(\n alt.X('Year:O', aggregate='max'),\n alt.Y('CO2 emissions (kt)', aggregate={'argmax': 'Year'}),\n alt.Text('Country Name'),\n alt.Color('CO2 emissions (kt):Q', aggregate={'argmax': 'Year'},\n scale=alt.Scale(scheme='redyellowblue', reverse=True), legend=None),\n size=alt.condition(~(single_select), alt.value(17), alt.value(20)),\n ).properties(title='CO2 total emission and emission per capita', width=600)\n\n points = line+point+labels\n return points\n\n\ndef shape_plot(df, single_select):\n shape = alt.Chart(df).mark_circle(\n opacity=0.35\n ).encode(\n alt.X('CO2 emissions (kt):Q'),\n alt.Y('CO2 emissions per capita:Q'),\n color=alt.condition(single_select, 'Country Name:N', alt.value('lightgrey'), scale=alt.Scale(scheme=\"tableau10\")),\n shape=alt.Shape('Country Name:N', legend=None),\n size=alt.value(250),\n ).properties(\n width=300,\n height=250,\n title='CO2 total emissions and emissions per capita'\n )\n\n shape_labels = shape.mark_text(\n align='center',\n baseline='middle',\n dy=-25\n ).encode(\n text='Country Name',\n size=alt.value(15)\n )\n shapes = shape # + shape_labels\n return shapes\n\n\n\ndef world_map_for_factors(highlight, dataset, select_year):\n\n cols=alt.hconcat()\n for val in dataset:\n map = alt.Chart(df).mark_geoshape(\n stroke='#aaa', strokeWidth=0.25\n ).encode(\n x = alt.X(\"Country Name\"),\n color=alt.condition(highlight, val, alt.value('lightgrey'), scale=alt.Scale(scheme='yelloworangered'), title=\"\"),\n tooltip=[\"Country Name\"] + dataset\n ).transform_lookup(\n lookup='Country Name',\n from_=alt.LookupData(\n \"https://raw.githubusercontent.com/KoGor/Map-Icons-Generator/master/data/world-110m-country-names.tsv\",\n 'name', ['id', \"name\"])\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(countries, 'id', fields=[\"id\", \"type\", \"properties\", \"geometry\"])\n ).project(\n type=\"equirectangular\"\n ).properties(\n width=500,\n height=200,\n title=val,\n ).add_selection(select_year, highlight) \\\n .transform_filter(select_year)\n\n cols &= map\n return cols.resolve_scale(color='independent')\n\ndef total_trend(highlight, highlight2):\n total = alt.Chart(df).mark_bar().encode(\n alt.X('Year:N', title=\"Year\"),\n alt.Y('CO2 emissions (kt)', title='Total CO2 emissions (kt)'),\n color=alt.Color('Country Name', scale=alt.Scale(scheme=\"set3\"), title='Countries'),\n order=alt.Order(\n # Sort the segments of the bars by this field\n 'CO2 emissions (kt)',\n sort='ascending'\n ),\n tooltip=[\"Country Name\", 'CO2 emissions (kt)']\n ).properties(\n width=530,\n height=350,\n title='Total CO2 emissions world trend'\n ).transform_filter(highlight | highlight2)\n return total\n\ndef percapita_trend(highlight, highlight2):\n total = alt.Chart(df).mark_bar().encode(\n alt.X('Year:N', title=\"Year\"),\n alt.Y('CO2 emissions per capita', title='CO2 emissions per capita (kt)'),\n color=alt.Color('Country Name', scale=alt.Scale(scheme=\"set3\"), title='Countries'),\n order=alt.Order(\n # Sort the segments of the bars by this field\n 'CO2 emissions per capita',\n sort='ascending'\n ),\n tooltip=[\"Country Name\", 'CO2 emissions per capita']\n ).properties(\n width=530,\n height=350,\n title='CO2 emissions per capita world trend'\n ).transform_filter(highlight | highlight2)\n return total\n\n\ndef step1_introduction():\n st.header(\"Step1: What is CO2 Emissions?\")\n st.write(\"What is CO2 Emissions? Why is it important? Let's watch a short introduction video from BBC!\")\n st.video(\"https://www.youtube.com/watch?v=rFw8MopzXdI&ab_channel=BBCNews\")\n next_block()\n st.header(\"Dataset Description\")\n st.write('''\n This dataset comes from [WorldBank]\n (https://github.com/ZeningQu/World-Bank-Data-by-Indicators) It covers over 174 countries and 50 indicators.\n ''')\n if st.checkbox(\"Show Raw Data\"):\n st.write(df)\n\n\ndef step2_wordwide_trend():\n # next_block()\n st.header(\"Step2: Explore the worldwide CO2 emissions trend!\")\n st.write(\"Tips:\")\n st.write(\n \"1. Put your mouse on the country for detailed information. The stacked bar plots below show total CO2 emissions and emissions per capita for all years.\")\n st.write(\"2. Press shift and select multiple countries for comparison.\")\n st.write(\"3. Try to play with the year slide bar below!\")\n highlight1 = alt.selection_multi(on='click', fields=['Country Name'], empty='all')\n highlight2 = alt.selection_single(on='mouseover', fields=['Country Name'], empty='all')\n st.write((world_map(highlight1, highlight2) & alt.hconcat(total_trend(highlight1, highlight2),\n percapita_trend(highlight1, highlight2))).resolve_scale(\n y='independent',\n size='independent'\n ))\n\ndef step3_co2_emissions_sources():\n # next_block()\n st.header(\"Step3: CO2 emissions from different consumptions\")\n st.write(\"Tips:\")\n st.write(\"1. Add countries to the plot and compare!\")\n st.write(\n \"2. Hover your mouse over points on the left plot for detailed information and corresponding CO2 emissions sources.\")\n st.write(\n \"3. Select year interval on the left plot and explore the change of CO2 emissions sources overtime for each country!\")\n\n dataset = st.multiselect(\"Choose countries you want to explore!\", country_map_df[\"name\"].to_list(),\n [\"China\", \"United States\", \"India\", \"Qatar\"])\n df2 = process_data(df,dataset)\n picked_interval = alt.selection_interval(encodings=[\"x\"])\n single_select = alt.selection(type=\"single\",on='mouseover', fields=['Country Name'])\n points = scatter_plot(df2,picked_interval, single_select)\n shapes = shape_plot(df2, single_select)\n\n concat = alt.vconcat(\n alt.Chart(df2).mark_bar(\n opacity=0.9\n ).encode(\n alt.X(\"mean(CO2 emissions from different consumptions (%))\"),\n alt.Y('Country Name', title=\"\"),\n color = alt.condition(single_select, 'type', alt.value('lightgrey'),scale=alt.Scale(scheme=\"tableau10\"), title=\"type\"),\n # color = single_select)\n # alt.Color('type', scale=alt.Scale(scheme=\"tableau10\"), title='type'),\n ).properties(\n width=300,\n height=150,\n title='CO2 emissions from different consumptions'\n ).transform_filter(picked_interval), shapes.transform_filter(picked_interval)\n ).resolve_scale(\n color='independent'\n )\n\n vconcat = alt.hconcat(\n points.add_selection(picked_interval), concat\n ).resolve_scale(\n color='independent'\n , size='independent'\n )\n\n st.write(vconcat)\n\ndef step4_related_factors():\n # next_block()\n st.header(\"Step4: Factors that may affect CO2 emissions\")\n st.write(\"Tips:\")\n st.write(\"1. Add indicators you want to compare with CO2 emissions!\")\n st.write(\"2. Put your mouse on a country and compare across indicators!\")\n st.write(\"3. Remember to play with the year slide bar :)\")\n\n slider = alt.binding_range(min=1991, max=2011, step=1)\n select_year = alt.selection_single(name=\"Year\", fields=['Year'],\n bind=slider, init={'Year': 2011})\n highlight = alt.selection_single(on='mouseover', fields=['Country Name'],\n empty='all') # init={\"Country Name\": \"United States\"})\n\n dataset2 = st.multiselect(\"Choose factors to compare!\",\n [\"CO2 emissions per GDP\", \"CO2 emissions (kt)\", \"CO2 emissions per capita\",\n \"Urban population (% of total)\",\n \"Renewable energy consumption (% of total final energy consumption)\",\n \"Forest area (% of land area)\", \"Marine protected areas (% of territorial waters)\",\n \"Population growth (annual %)\", \"Renewable electricity output % of total\",\n \"Terrestrial protected areas % of total\",\n \"Total greenhouse gas emissions (kt of CO2 equivalent)\"],\n [\"CO2 emissions (kt)\", \"CO2 emissions per GDP\",\n \"Renewable electricity output % of total\"])\n\n st.write(alt.hconcat(world_map_for_factors(highlight, dataset2, select_year), alt.Chart(df).mark_point().encode(\n alt.X(alt.repeat(\"column\"), type='quantitative'),\n alt.Y(alt.repeat(\"row\"), type='quantitative'),\n color='Country Name:N',\n ).properties(\n width=160,\n height=160,\n ).repeat(\n row=dataset2,\n column=dataset2,\n ).transform_filter(select_year).interactive()))\n\n\n################# main plots ############\n\n\ndf = preprocess_data(df)\n\n\nst.sidebar.write('Follow the steps below and begin to explore!')\n\nfunctions = {\n 'Step1: What is CO2 emissions': lambda: step1_introduction(),\n 'Step2: Worldwide CO2 emissions': lambda: step2_wordwide_trend(),\n 'Step3: CO2 emissions sources': lambda: step3_co2_emissions_sources(),\n 'Step4: CO2 emissions related factors': lambda: step4_related_factors()\n}\nmenu = st.sidebar.selectbox(\n \"Menu\", list(functions.keys())\n)\nfunctions[menu]()\n\n\n" ]
[ [ "pandas.read_table", "pandas.read_csv" ] ]
minhhoccode/Interpolate-with-flask
[ "7f8cb8f551e9bd36beca911e0987b6c1bc168356" ]
[ "NoiSuy.py" ]
[ "from sympy import *\r\nimport numpy as np\r\nimport array as arr\r\n\r\ndef TongQuat(X, Y):\r\n list ( zip(X , Y ) )\r\n x = symbols('x')\r\n m = len(X)\r\n A = [[X[i] ** j for j in range (m) ] for i in range (m) ] \r\n kq = np.linalg.solve(A,Y)\r\n hamSo = ''\r\n for i in range (len(kq)):\r\n hamSo += '+%d*(x ** %d)' %(kq[i], i)\r\n P = lambda x: eval(hamSo )\r\n f1 = str(P(x))\r\n f1 = eval(f1)\r\n f1 = latex(f1)\r\n return f1, A\r\n\r\n\r\n\r\ndef Newton(X, Y, pp):\r\n X = [0.0,0.5,1.0,1.5,2.0] #mốc nội suy\r\n Y = [-1.0,0.125,1.0,2.375,5.0]\r\n n = len(X)\r\n h = X[1]-X[0]\r\n x , t = symbols ('x t')\r\n sp = [ [d(k, i, Y) for i in range(n-k)] for k in range (n)]\r\n if pp == 'Newton':\r\n P = Y[0]\r\n for k in range(1, n): # k chạy từ 1 tới n-1\r\n prod = d(k, 0,Y)/factorial(k)\r\n for i in range(k):\r\n prod *= t - i\r\n P += prod\r\n P = P . subs (t , ( x - X [0]) / h) . expand()\r\n if pp == 'Newton Lùi':\r\n m = n-1\r\n P = Y[m]\r\n for k in range(1, n): \r\n prod = d(k, m-k, Y)/factorial(k)\r\n for i in range(k):\r\n prod *= t + i\r\n P += prod\r\n P = P.subs(t, (x - X[m]) / h).expand()\r\n print(P)\r\n f1 = latex(P)\r\n return f1, sp\r\n\r\ndef d (k , i, Y ) :\r\n if k == 0:\r\n return Y[i]\r\n return d (k -1,i +1, Y ) - d (k -1 , i, Y )\r\n\r\n\r\ndef checkCondition(X, Y):\r\n n = len(X)\r\n h = X[1]-X[0]\r\n if(len(X) != len(Y)):\r\n return False\r\n for i in range(0,n-1):\r\n if(X[i+1] - X[i] != h):\r\n return False\r\n return True\r\n\r\ndef Lagrange(X,Y):\r\n n = len(X)\r\n x = symbols('x')\r\n P = 0\r\n for i in range (n) :\r\n P += Y [i ] * L (i , x, n , X )\r\n P = P.expand()\r\n f1 = latex(P)\r\n print(f1)\r\n s = []\r\n s1 = [] \r\n for i in range(n):\r\n a, b = L(i, x, n, X), L(i, x, n , X).expand()\r\n s.append( latex(a))\r\n s1.append( latex(b))\r\n return f1, s, s1\r\ndef L (i , x, n, X ) :\r\n prod = 1\r\n for j in range (n) :\r\n if j != i :\r\n prod *= ( x - X[ j ]) / ( X [ i ] - X [ j ])\r\n return prod\r\n\r\n" ]
[ [ "numpy.linalg.solve" ] ]
Xi-L/PMOCO
[ "81dc7c66e5bee34f401d16c29cc39b5e2c3a62e6" ]
[ "MOKP/POMO/test_mokp_n100.py" ]
[ "##########################################################################################\r\n# Machine Environment Config\r\nDEBUG_MODE = False\r\nUSE_CUDA = not DEBUG_MODE\r\nCUDA_DEVICE_NUM = 0\r\n\r\n##########################################################################################\r\n# Path Config\r\nimport os\r\nimport sys\r\nimport torch\r\nimport numpy as np\r\n\r\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\r\nsys.path.insert(0, \"..\") # for problem_def\r\nsys.path.insert(0, \"../..\") # for utils\r\n\r\n##########################################################################################\r\n# import\r\nimport logging\r\nfrom utils.utils import create_logger, copy_all_src\r\n\r\n\r\nfrom MOKPTester import KPTester as Tester\r\nfrom MOKProblemDef import get_random_problems\r\n##########################################################################################\r\nimport time\r\nimport hvwfg\r\nimport pickle\r\n\r\nfrom matplotlib import pyplot as plt\r\nimport matplotlib as mpl\r\nmpl.style.use('default')\r\n##########################################################################################\r\n# parameters\r\nenv_params = {\r\n 'problem_size': 100,\r\n 'pomo_size': 100,\r\n}\r\n\r\nmodel_params = {\r\n 'embedding_dim': 128,\r\n 'sqrt_embedding_dim': 128**(1/2),\r\n 'encoder_layer_num': 6,\r\n 'qkv_dim': 16,\r\n 'head_num': 8,\r\n 'logit_clipping': 10,\r\n 'ff_hidden_dim': 512,\r\n 'eval_type': 'argmax',\r\n}\r\n\r\ntester_params = {\r\n 'use_cuda': USE_CUDA,\r\n 'cuda_device_num': CUDA_DEVICE_NUM,\r\n 'model_load': {\r\n 'path': './result/saved_kp100_model', # directory path of pre-trained model and log files saved.\r\n 'epoch': 200 # epoch version of pre-trained model to laod.\r\n },\r\n 'test_episodes': 100, \r\n 'test_batch_size': 100,\r\n 'augmentation_enable': True,\r\n 'aug_factor': 1, \r\n 'aug_batch_size': 100 \r\n}\r\nif tester_params['augmentation_enable']:\r\n tester_params['test_batch_size'] = tester_params['aug_batch_size']\r\n\r\nlogger_params = {\r\n 'log_file': {\r\n 'desc': 'test_kp_n100',\r\n 'filename': 'run_log'\r\n }\r\n}\r\n\r\n##########################################################################################\r\ndef _set_debug_mode():\r\n global tester_params\r\n tester_params['test_episodes'] = 100\r\n\r\n\r\ndef _print_config():\r\n logger = logging.getLogger('root')\r\n logger.info('DEBUG_MODE: {}'.format(DEBUG_MODE))\r\n logger.info('USE_CUDA: {}, CUDA_DEVICE_NUM: {}'.format(USE_CUDA, CUDA_DEVICE_NUM))\r\n [logger.info(g_key + \"{}\".format(globals()[g_key])) for g_key in globals().keys() if g_key.endswith('params')]\r\n \r\n##########################################################################################\r\ndef main(n_sols = 101):\r\n \r\n timer_start = time.time()\r\n logger_start = time.time()\r\n \r\n if DEBUG_MODE:\r\n _set_debug_mode()\r\n \r\n create_logger(**logger_params)\r\n _print_config()\r\n \r\n tester = Tester(env_params=env_params,\r\n model_params=model_params,\r\n tester_params=tester_params)\r\n \r\n copy_all_src(tester.result_folder)\r\n \r\n sols = np.zeros([n_sols, 2])\r\n \r\n shared_problem = get_random_problems(tester_params['test_episodes'], env_params['problem_size'])\r\n \r\n for i in range(n_sols):\r\n pref = torch.zeros(2).cuda()\r\n pref[0] = 1 - 0.01 * i\r\n pref[1] = 0.01 * i\r\n pref = pref / torch.sum(pref)\r\n \r\n score = tester.run(shared_problem,pref)\r\n sols[i] = np.array(score)\r\n \r\n timer_end = time.time()\r\n \r\n total_time = timer_end - timer_start\r\n \r\n # MOKP 50\r\n #single_task = [20.12, 20.12]\r\n \r\n # MOKP 100\r\n single_task = [40.45, 40.45]\r\n \r\n # MOKP 200\r\n #single_task = [57.62, 57.62]\r\n \r\n fig = plt.figure()\r\n \r\n plt.axvline(single_task[0],linewidth=3 , alpha = 0.25)\r\n plt.axhline(single_task[1],linewidth=3,alpha = 0.25, label = 'Single Objective KP (DP)')\r\n \r\n plt.plot(sols[:,0],sols[:,1], marker = 'o', c = 'C1',ms = 3, label='Pareto MOCO (Ours)')\r\n \r\n plt.legend()\r\n \r\n #ref = np.array([-15.5,-15.5]) # refpoint: [20.5,20.5] e.g., divide by (20.5 - 15.5) * (20 - 15.5)\r\n ref = np.array([-30,-30]) # refpoint: [40,40] e.g., divide by (40 - 30) * (40 - 30)\r\n #ref = np.array([-40,-40]) # refpoint: [60,60] e.g., divide by (60 - 40) * (60 - 40)\r\n \r\n hv = hvwfg.wfg(-sols.astype(float), ref.astype(float))\r\n \r\n hv_ratio = hv / ((40 - 30) * (40 - 30))\r\n\r\n print('Run Time(s): {:.4f}'.format(total_time))\r\n print('HV Ratio: {:.4f}'.format(hv_ratio))\r\n\r\n##########################################################################################\r\nif __name__ == \"__main__\":\r\n main()" ]
[ [ "torch.sum", "matplotlib.pyplot.axvline", "matplotlib.pyplot.legend", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.axhline", "torch.zeros", "matplotlib.style.use", "numpy.array", "matplotlib.pyplot.plot" ] ]
AntonMu/albert
[ "f928a488380d359520e407e7862b031ffa5bb603" ]
[ "run_classifier.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning on classification tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nfrom albert import classifier_utils\nfrom albert import fine_tuning_utils\nfrom albert import modeling\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import cluster_resolver as contrib_cluster_resolver\nfrom tensorflow.contrib import tpu as contrib_tpu\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"data_dir\",\n None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\",\n)\n\nflags.DEFINE_string(\n \"albert_config_file\",\n None,\n \"The config json file corresponding to the pre-trained ALBERT model. \"\n \"This specifies the model architecture.\",\n)\n\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\n\nflags.DEFINE_string(\n \"vocab_file\", None, \"The vocabulary file that the ALBERT model was trained on.\"\n)\n\nflags.DEFINE_string(\n \"spm_model_file\", None, \"The model file for sentence piece tokenization.\"\n)\n\nflags.DEFINE_string(\n \"output_dir\",\n None,\n \"The output directory where the model checkpoints will be written.\",\n)\n\nflags.DEFINE_string(\n \"cached_dir\",\n None,\n \"Path to cached training and dev tfrecord file. \"\n \"The file will be generated if not exist.\",\n)\n\n## Other parameters\nflags.DEFINE_float(\"train_subset\", 1.0, \"The subset of the train set.\")\nflags.DEFINE_float(\"dev_subset\", 1.0, \"The subset of the dev set.\")\nflags.DEFINE_float(\"test_subset\", 1.0, \"The subset of the test set.\")\n\nflags.DEFINE_string(\n \"init_checkpoint\",\n None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\",\n)\n\nflags.DEFINE_string(\n \"albert_hub_module_handle\", None, \"If set, the ALBERT hub module to use.\"\n)\n\nflags.DEFINE_bool(\n \"do_lower_case\",\n True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\",\n)\n\nflags.DEFINE_integer(\n \"max_seq_length\",\n 512,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\",\n)\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False, \"Whether to run the model in inference mode on the test set.\"\n)\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_integer(\"train_step\", 1000, \"Total number of training steps to perform.\")\n\nflags.DEFINE_integer(\n \"warmup_step\", 0, \"number of steps to perform linear learning rate warmup for.\"\n)\n\nflags.DEFINE_integer(\n \"save_checkpoints_steps\", 1000, \"How often to save the model checkpoint.\"\n)\n\nflags.DEFINE_integer(\"keep_checkpoint_max\", 5, \"How many checkpoints to keep.\")\n\nflags.DEFINE_integer(\n \"iterations_per_loop\", 1000, \"How many steps to make in each estimator call.\"\n)\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\nflags.DEFINE_string(\"optimizer\", \"adamw\", \"Optimizer to use\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\",\n None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\",\n)\n\ntf.flags.DEFINE_string(\n \"tpu_zone\",\n None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\",\n)\n\ntf.flags.DEFINE_string(\n \"gcp_project\",\n None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\",\n)\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\",\n 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\",\n)\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": classifier_utils.ColaProcessor,\n \"mnli\": classifier_utils.MnliProcessor,\n \"mismnli\": classifier_utils.MisMnliProcessor,\n \"mrpc\": classifier_utils.MrpcProcessor,\n \"rte\": classifier_utils.RteProcessor,\n \"sst-2\": classifier_utils.Sst2Processor,\n \"sts-b\": classifier_utils.StsbProcessor,\n \"qqp\": classifier_utils.QqpProcessor,\n \"qnli\": classifier_utils.QnliProcessor,\n \"wnli\": classifier_utils.WnliProcessor,\n }\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:\n raise ValueError(\n \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\"\n )\n\n if not FLAGS.albert_config_file and not FLAGS.albert_hub_module_handle:\n raise ValueError(\n \"At least one of `--albert_config_file` and \"\n \"`--albert_hub_module_handle` must be set\"\n )\n\n if FLAGS.albert_config_file:\n albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)\n if FLAGS.max_seq_length > albert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the ALBERT model \"\n \"was only trained up to sequence length %d\"\n % (FLAGS.max_seq_length, albert_config.max_position_embeddings)\n )\n else:\n albert_config = None # Get the config from TF-Hub.\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n task_name = FLAGS.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name](\n use_spm=True if FLAGS.spm_model_file else False,\n do_lower_case=FLAGS.do_lower_case,\n )\n\n label_list = processor.get_labels()\n\n tokenizer = fine_tuning_utils.create_vocab(\n vocab_file=FLAGS.vocab_file,\n do_lower_case=FLAGS.do_lower_case,\n spm_model_file=FLAGS.spm_model_file,\n hub_module=FLAGS.albert_hub_module_handle,\n )\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project\n )\n\n is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2\n if FLAGS.do_train:\n iterations_per_loop = int(\n min(FLAGS.iterations_per_loop, FLAGS.save_checkpoints_steps)\n )\n else:\n iterations_per_loop = FLAGS.iterations_per_loop\n run_config = contrib_tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=int(FLAGS.save_checkpoints_steps),\n keep_checkpoint_max=0,\n tpu_config=contrib_tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host,\n ),\n )\n\n train_examples = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(\n FLAGS.data_dir, FLAGS.train_subset\n )\n model_fn = classifier_utils.model_fn_builder(\n albert_config=albert_config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=FLAGS.train_step,\n num_warmup_steps=FLAGS.warmup_step,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu,\n task_name=task_name,\n hub_module=FLAGS.albert_hub_module_handle,\n optimizer=FLAGS.optimizer,\n )\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = contrib_tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size,\n )\n\n if FLAGS.do_train:\n cached_dir = FLAGS.cached_dir\n if not cached_dir:\n cached_dir = FLAGS.output_dir\n train_file = os.path.join(cached_dir, task_name + \"_train.tf_record\")\n if not tf.gfile.Exists(train_file):\n classifier_utils.file_based_convert_examples_to_features(\n train_examples,\n label_list,\n FLAGS.max_seq_length,\n tokenizer,\n train_file,\n task_name,\n )\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", FLAGS.train_step)\n train_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.train_batch_size,\n )\n estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_step)\n\n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples(FLAGS.data_dir, FLAGS.dev_subset)\n num_actual_eval_examples = len(eval_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on. These do NOT count towards the metric (all tf.metrics\n # support a per-instance weight, and these get a weight of 0.0).\n while len(eval_examples) % FLAGS.eval_batch_size != 0:\n eval_examples.append(classifier_utils.PaddingInputExample())\n\n cached_dir = FLAGS.cached_dir\n if not cached_dir:\n cached_dir = FLAGS.output_dir\n eval_file = os.path.join(cached_dir, task_name + \"_eval.tf_record\")\n if not tf.gfile.Exists(eval_file):\n classifier_utils.file_based_convert_examples_to_features(\n eval_examples,\n label_list,\n FLAGS.max_seq_length,\n tokenizer,\n eval_file,\n task_name,\n )\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\n \" Num examples = %d (%d actual, %d padding)\",\n len(eval_examples),\n num_actual_eval_examples,\n len(eval_examples) - num_actual_eval_examples,\n )\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n # This tells the estimator to run through the entire set.\n eval_steps = None\n # However, if running eval on the TPU, you will need to specify the\n # number of steps.\n if FLAGS.use_tpu:\n assert len(eval_examples) % FLAGS.eval_batch_size == 0\n eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.eval_batch_size,\n )\n\n best_trial_info_file = os.path.join(FLAGS.output_dir, \"best_trial.txt\")\n\n def _best_trial_info():\n \"\"\"Returns information about which checkpoints have been evaled so far.\"\"\"\n if tf.gfile.Exists(best_trial_info_file):\n with tf.gfile.GFile(best_trial_info_file, \"r\") as best_info:\n (\n global_step,\n best_metric_global_step,\n metric_value,\n ) = best_info.read().split(\":\")\n global_step = int(global_step)\n best_metric_global_step = int(best_metric_global_step)\n metric_value = float(metric_value)\n else:\n metric_value = -1\n best_metric_global_step = -1\n global_step = -1\n tf.logging.info(\n \"Best trial info: Step: %s, Best Value Step: %s, \" \"Best Value: %s\",\n global_step,\n best_metric_global_step,\n metric_value,\n )\n return global_step, best_metric_global_step, metric_value\n\n def _remove_checkpoint(checkpoint_path):\n for ext in [\"meta\", \"data-00000-of-00001\", \"index\"]:\n src_ckpt = checkpoint_path + \".{}\".format(ext)\n tf.logging.info(\"removing {}\".format(src_ckpt))\n tf.gfile.Remove(src_ckpt)\n\n def _find_valid_cands(curr_step):\n filenames = tf.gfile.ListDirectory(FLAGS.output_dir)\n candidates = []\n for filename in filenames:\n if filename.endswith(\".index\"):\n ckpt_name = filename[:-6]\n idx = ckpt_name.split(\"-\")[-1]\n if int(idx) > curr_step:\n candidates.append(filename)\n return candidates\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n\n if task_name == \"sts-b\":\n key_name = \"pearson\"\n elif task_name == \"cola\":\n key_name = \"matthew_corr\"\n else:\n key_name = \"eval_accuracy\"\n\n global_step, best_perf_global_step, best_perf = _best_trial_info()\n writer = tf.gfile.GFile(output_eval_file, \"w\")\n while global_step < FLAGS.train_step:\n steps_and_files = {}\n filenames = tf.gfile.ListDirectory(FLAGS.output_dir)\n for filename in filenames:\n if filename.endswith(\".index\"):\n ckpt_name = filename[:-6]\n cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)\n if cur_filename.split(\"-\")[-1] == \"best\":\n continue\n gstep = int(cur_filename.split(\"-\")[-1])\n if gstep not in steps_and_files:\n tf.logging.info(\"Add {} to eval list.\".format(cur_filename))\n steps_and_files[gstep] = cur_filename\n tf.logging.info(\"found {} files.\".format(len(steps_and_files)))\n if not steps_and_files:\n tf.logging.info(\n \"found 0 file, global step: {}. Sleeping.\".format(global_step)\n )\n time.sleep(60)\n else:\n for checkpoint in sorted(steps_and_files.items()):\n step, checkpoint_path = checkpoint\n if global_step >= step:\n if (\n best_perf_global_step != step\n and len(_find_valid_cands(step)) > 1\n ):\n _remove_checkpoint(checkpoint_path)\n continue\n result = estimator.evaluate(\n input_fn=eval_input_fn,\n steps=eval_steps,\n checkpoint_path=checkpoint_path,\n )\n global_step = result[\"global_step\"]\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n writer.write(\"best = {}\\n\".format(best_perf))\n if result[key_name] > best_perf:\n best_perf = result[key_name]\n best_perf_global_step = global_step\n elif len(_find_valid_cands(global_step)) > 1:\n _remove_checkpoint(checkpoint_path)\n writer.write(\"=\" * 50 + \"\\n\")\n writer.flush()\n with tf.gfile.GFile(best_trial_info_file, \"w\") as best_info:\n best_info.write(\n \"{}:{}:{}\".format(\n global_step, best_perf_global_step, best_perf\n )\n )\n writer.close()\n\n for ext in [\"meta\", \"data-00000-of-00001\", \"index\"]:\n src_ckpt = \"model.ckpt-{}.{}\".format(best_perf_global_step, ext)\n tgt_ckpt = \"model.ckpt-best.{}\".format(ext)\n tf.logging.info(\"saving {} to {}\".format(src_ckpt, tgt_ckpt))\n tf.io.gfile.rename(\n os.path.join(FLAGS.output_dir, src_ckpt),\n os.path.join(FLAGS.output_dir, tgt_ckpt),\n overwrite=True,\n )\n\n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples(\n FLAGS.data_dir, FLAGS.test_subset\n )\n num_actual_predict_examples = len(predict_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on.\n while len(predict_examples) % FLAGS.predict_batch_size != 0:\n predict_examples.append(classifier_utils.PaddingInputExample())\n\n predict_file = os.path.join(FLAGS.output_dir, \"predict.tf_record\")\n classifier_utils.file_based_convert_examples_to_features(\n predict_examples,\n label_list,\n FLAGS.max_seq_length,\n tokenizer,\n predict_file,\n task_name,\n )\n\n tf.logging.info(\"***** Running prediction*****\")\n tf.logging.info(\n \" Num examples = %d (%d actual, %d padding)\",\n len(predict_examples),\n num_actual_predict_examples,\n len(predict_examples) - num_actual_predict_examples,\n )\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n predict_drop_remainder = True if FLAGS.use_tpu else False\n predict_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.predict_batch_size,\n )\n\n checkpoint_path = os.path.join(FLAGS.output_dir, \"model.ckpt-best\")\n result = estimator.predict(\n input_fn=predict_input_fn, checkpoint_path=checkpoint_path\n )\n\n output_predict_file = os.path.join(FLAGS.output_dir, \"test_results.tsv\")\n output_submit_file = os.path.join(FLAGS.output_dir, \"submit_results.tsv\")\n with tf.gfile.GFile(output_predict_file, \"w\") as pred_writer, tf.gfile.GFile(\n output_submit_file, \"w\"\n ) as sub_writer:\n sub_writer.write(\"index\" + \"\\t\" + \"prediction\\n\")\n num_written_lines = 0\n tf.logging.info(\"***** Predict results *****\")\n for (i, (example, prediction)) in enumerate(zip(predict_examples, result)):\n probabilities = prediction[\"probabilities\"]\n if i >= num_actual_predict_examples:\n break\n output_line = (\n \"\\t\".join(\n str(class_probability) for class_probability in probabilities\n )\n + \"\\n\"\n )\n pred_writer.write(output_line)\n\n if task_name != \"sts-b\":\n actual_label = label_list[int(prediction[\"predictions\"])]\n else:\n actual_label = str(prediction[\"predictions\"])\n sub_writer.write(example.guid + \"\\t\" + actual_label + \"\\n\")\n num_written_lines += 1\n assert num_written_lines == num_actual_predict_examples\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"spm_model_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n" ]
[ [ "tensorflow.compat.v1.gfile.Remove", "tensorflow.compat.v1.gfile.ListDirectory", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.gfile.GFile", "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.compat.v1.flags.DEFINE_string", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.app.run", "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.logging.set_verbosity" ] ]
CsatiZoltan/imagepy
[ "9a60ad3b1e8f79f2dcc47e4f246a4f31a96f99f5" ]
[ "imagepy/tools/Draw/floodfill_tol.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 19 17:35:09 2016\n\n@author: yxl\n\"\"\"\n\nfrom imagepy.core.engine import Tool\nimport numpy as np\nfrom imagepy.core.manager import ColorManager\n# from imagepy.core.draw.fill import floodfill\nfrom skimage.morphology import flood_fill, flood\n\nclass Plugin(Tool):\n title = 'Flood Fill'\n para = {'tor':10, 'con':'8-connect'}\n view = [(int, 'tor', (0,1000), 0, 'torlorance', 'value'),\n (list, 'con', ['4-connect', '8-connect'], str, 'fill', 'pix')]\n \n def mouse_down(self, ips, x, y, btn, **key):\n \n img, color = ips.img, ColorManager.get_front()\n if int(y)<0 or int(x)<0: return\n if int(y)>=img.shape[0] or int(x)>=img.shape[1]: return \n\n ips.snapshot()\n connectivity=(self.para['con']=='8-connect')+1\n img = ips.img.reshape((ips.img.shape+(1,))[:3])\n msk = np.ones(img.shape[:2], dtype=np.bool)\n for i in range(img.shape[2]):\n msk &= flood(img[:,:,i], (int(y),int(x)), \n connectivity=connectivity, tolerance=self.para['tor'])\n img[msk] = np.mean(color) if img.shape[2]==1 else color\n ips.update()\n \n def mouse_up(self, ips, x, y, btn, **key):\n pass\n \n def mouse_move(self, ips, x, y, btn, **key):\n pass\n \n def mouse_wheel(self, ips, x, y, d, **key):\n pass\n\n" ]
[ [ "numpy.ones", "numpy.mean" ] ]
neonbjb/tortoise-tts
[ "a9e64e216d871f52c091465f2a2a8e503737a69c" ]
[ "tortoise/utils/wav2vec_alignment.py" ]
[ "import re\n\nimport torch\nimport torchaudio\nfrom transformers import Wav2Vec2ForCTC, Wav2Vec2FeatureExtractor, Wav2Vec2CTCTokenizer, Wav2Vec2Processor\n\nfrom tortoise.utils.audio import load_audio\n\n\ndef max_alignment(s1, s2, skip_character='~', record=None):\n \"\"\"\n A clever function that aligns s1 to s2 as best it can. Wherever a character from s1 is not found in s2, a '~' is\n used to replace that character.\n\n Finally got to use my DP skills!\n \"\"\"\n if record is None:\n record = {}\n assert skip_character not in s1, f\"Found the skip character {skip_character} in the provided string, {s1}\"\n if len(s1) == 0:\n return ''\n if len(s2) == 0:\n return skip_character * len(s1)\n if s1 == s2:\n return s1\n if s1[0] == s2[0]:\n return s1[0] + max_alignment(s1[1:], s2[1:], skip_character, record)\n\n take_s1_key = (len(s1), len(s2) - 1)\n if take_s1_key in record:\n take_s1, take_s1_score = record[take_s1_key]\n else:\n take_s1 = max_alignment(s1, s2[1:], skip_character, record)\n take_s1_score = len(take_s1.replace(skip_character, ''))\n record[take_s1_key] = (take_s1, take_s1_score)\n\n take_s2_key = (len(s1) - 1, len(s2))\n if take_s2_key in record:\n take_s2, take_s2_score = record[take_s2_key]\n else:\n take_s2 = max_alignment(s1[1:], s2, skip_character, record)\n take_s2_score = len(take_s2.replace(skip_character, ''))\n record[take_s2_key] = (take_s2, take_s2_score)\n\n return take_s1 if take_s1_score > take_s2_score else skip_character + take_s2\n\n\nclass Wav2VecAlignment:\n \"\"\"\n Uses wav2vec2 to perform audio<->text alignment.\n \"\"\"\n def __init__(self):\n self.model = Wav2Vec2ForCTC.from_pretrained(\"jbetker/wav2vec2-large-robust-ft-libritts-voxpopuli\").cpu()\n self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f\"facebook/wav2vec2-large-960h\")\n self.tokenizer = Wav2Vec2CTCTokenizer.from_pretrained('jbetker/tacotron-symbols')\n\n def align(self, audio, expected_text, audio_sample_rate=24000):\n orig_len = audio.shape[-1]\n\n with torch.no_grad():\n self.model = self.model.cuda()\n audio = audio.to('cuda')\n audio = torchaudio.functional.resample(audio, audio_sample_rate, 16000)\n clip_norm = (audio - audio.mean()) / torch.sqrt(audio.var() + 1e-7)\n logits = self.model(clip_norm).logits\n self.model = self.model.cpu()\n\n logits = logits[0]\n pred_string = self.tokenizer.decode(logits.argmax(-1).tolist())\n\n fixed_expectation = max_alignment(expected_text.lower(), pred_string)\n w2v_compression = orig_len // logits.shape[0]\n expected_tokens = self.tokenizer.encode(fixed_expectation)\n expected_chars = list(fixed_expectation)\n if len(expected_tokens) == 1:\n return [0] # The alignment is simple; there is only one token.\n expected_tokens.pop(0) # The first token is a given.\n expected_chars.pop(0)\n\n alignments = [0]\n def pop_till_you_win():\n if len(expected_tokens) == 0:\n return None\n popped = expected_tokens.pop(0)\n popped_char = expected_chars.pop(0)\n while popped_char == '~':\n alignments.append(-1)\n if len(expected_tokens) == 0:\n return None\n popped = expected_tokens.pop(0)\n popped_char = expected_chars.pop(0)\n return popped\n\n next_expected_token = pop_till_you_win()\n for i, logit in enumerate(logits):\n top = logit.argmax()\n if next_expected_token == top:\n alignments.append(i * w2v_compression)\n if len(expected_tokens) > 0:\n next_expected_token = pop_till_you_win()\n else:\n break\n\n pop_till_you_win()\n if not (len(expected_tokens) == 0 and len(alignments) == len(expected_text)):\n torch.save([audio, expected_text], 'alignment_debug.pth')\n assert False, \"Something went wrong with the alignment algorithm. I've dumped a file, 'alignment_debug.pth' to\" \\\n \"your current working directory. Please report this along with the file so it can get fixed.\"\n\n # Now fix up alignments. Anything with -1 should be interpolated.\n alignments.append(orig_len) # This'll get removed but makes the algorithm below more readable.\n for i in range(len(alignments)):\n if alignments[i] == -1:\n for j in range(i+1, len(alignments)):\n if alignments[j] != -1:\n next_found_token = j\n break\n for j in range(i, next_found_token):\n gap = alignments[next_found_token] - alignments[i-1]\n alignments[j] = (j-i+1) * gap // (next_found_token-i+1) + alignments[i-1]\n\n return alignments[:-1]\n\n def redact(self, audio, expected_text, audio_sample_rate=24000):\n if '[' not in expected_text:\n return audio\n splitted = expected_text.split('[')\n fully_split = [splitted[0]]\n for spl in splitted[1:]:\n assert ']' in spl, 'Every \"[\" character must be paired with a \"]\" with no nesting.'\n fully_split.extend(spl.split(']'))\n\n # At this point, fully_split is a list of strings, with every other string being something that should be redacted.\n non_redacted_intervals = []\n last_point = 0\n for i in range(len(fully_split)):\n if i % 2 == 0:\n end_interval = max(0, last_point + len(fully_split[i]) - 1)\n non_redacted_intervals.append((last_point, end_interval))\n last_point += len(fully_split[i])\n\n bare_text = ''.join(fully_split)\n alignments = self.align(audio, bare_text, audio_sample_rate)\n\n output_audio = []\n for nri in non_redacted_intervals:\n start, stop = nri\n output_audio.append(audio[:, alignments[start]:alignments[stop]])\n return torch.cat(output_audio, dim=-1)\n" ]
[ [ "torch.save", "torch.no_grad", "torch.cat" ] ]
thunlp/EntityDuetNeuralRanking
[ "3efbc1f6ccffb5b149d08030fde8dd059fce0fa1" ]
[ "data/preprocess.py" ]
[ "''' Handling the data io '''\nimport argparse\nimport torch\nimport sys\n\n\n\ndef read_vocab_idx(vocab_path):\n ''' build vocab '''\n\n word2idx = {\"_PAD\" : 0}\n\n with open(vocab_path) as f:\n for line in f:\n tokens = line.strip(\"\\n\").split(\"\\t\")\n no = int(tokens[1])\n word2idx[tokens[0]] = no\n\n print('[Info] Trimmed vocabulary size = {},'.format(len(word2idx)))\n return word2idx\n\ndef read_ent_des(inst_file):\n ent_des_dict = dict()\n ent_des = list()\n ent_des.append([0] * 20)\n with open(inst_file) as f:\n for step, line in enumerate(f):\n tokens = line.strip().split()\n ent_des_dict[tokens[0]] = step + 1\n ent_des.append([int(token) for token in tokens[1:]][:20])\n return ent_des, ent_des_dict\n\ndef read_ent_car(inst_file):\n ent_wrd_dict = dict()\n ent_wrd = list()\n ent_wrd.append([0] * 10)\n with open(inst_file) as f:\n for step, line in enumerate(f):\n tokens = line.strip().split()\n ent_wrd_dict[tokens[0]] = step + 1\n ent_wrd.append([int(token) for token in tokens[1:]][:10])\n return ent_wrd, ent_wrd_dict\n\n\ndef main():\n ''' Main function '''\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-ent_des', required=True)\n parser.add_argument('-ent_car', required=True)\n parser.add_argument('-save_data', required=True)\n parser.add_argument('-wrd_vocab', required=True)\n parser.add_argument('-ent_vocab', required=True)\n parser.add_argument('-car_vocab', required=True)\n\n opt = parser.parse_args()\n wrd2idx = read_vocab_idx(opt.wrd_vocab)\n ent2idx = read_vocab_idx(opt.ent_vocab)\n car2idx = read_vocab_idx(opt.car_vocab)\n ent_des, ent_des_dict = read_ent_des(opt.ent_des)\n ent_wrd, ent_wrd_dict = read_ent_car(opt.ent_car)\n data = {\n 'settings': opt,\n 'wrd2idx': wrd2idx,\n 'ent2idx': ent2idx,\n 'car2idx': car2idx,\n 'ent_des_dict' : ent_des_dict,\n 'ent_des' : ent_des,\n 'ent_wrd_dict': ent_wrd_dict,\n 'ent_wrd': ent_wrd}\n\n print('[Info] Dumping the processed data to pickle file', opt.save_data)\n torch.save(data, opt.save_data)\n print('[Info] Finish.')\n\nif __name__ == '__main__':\n #reload(sys)\n #sys.setdefaultencoding('utf-8')\n main()" ]
[ [ "torch.save" ] ]
fantamat/gtrain
[ "9538697768deb0f88c3efdbd617e882d0ecc2bc4" ]
[ "gtrain/utils.py" ]
[ "import numpy as np\nimport os\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n\n\ndef get_loss_and_accuracy(save_dir):\n \"\"\"\n loads scalars from training procedure saved in summaries created by gtrain\n :param save_dir: same as save_dir in gtrain method\n :return: dict with\n \"val_acc\": vector of validation accuracies\n \"val_loss\": vector of loss\n \"val_stem\": step in which the record was made\n \"val_timestamp\": time in which the record was made\n \"train_acc\": vector of validation accuracies\n \"train_loss\": vector of loss\n \"train_stem\": step in which the record was made\n \"train_timestamp\": time in which the record was made\n \"\"\"\n def scallarEvent_list_2_dict(sel):\n wall_time = list()\n step = list()\n value = list()\n for se in sel:\n wall_time.append(se.wall_time)\n step.append(se.step)\n value.append(se.value)\n return {\n \"wall_time\": wall_time,\n \"step\": step,\n \"value\": value,\n }\n\n event_acc = EventAccumulator(os.path.join(save_dir, \"summaries\", \"train\"))\n event_acc.Reload()\n train_loss = scallarEvent_list_2_dict(event_acc.Scalars(\"loss\"))\n train_acc = scallarEvent_list_2_dict(event_acc.Scalars(\"accuracy\"))\n\n event_acc = EventAccumulator(os.path.join(save_dir, \"summaries\", \"dev\"))\n event_acc.Reload()\n val_loss = scallarEvent_list_2_dict(event_acc.Scalars(\"loss\"))\n val_acc = scallarEvent_list_2_dict(event_acc.Scalars(\"accuracy\"))\n return {\n \"train_loss\": train_loss[\"value\"],\n \"train_acc\": train_acc[\"value\"],\n \"train_step\": train_loss[\"step\"],\n \"train_timestamp\": train_loss[\"wall_time\"],\n \"val_loss\": val_loss[\"value\"],\n \"val_acc\": val_acc[\"value\"],\n \"val_step\": val_loss[\"step\"],\n \"val_timestamp\": val_loss[\"wall_time\"],\n }\n\n\ndef confmat(y0, y1, num_classes=None):\n \"\"\"\n compute confusion matrix for y1 and y2 does not meter if either of them is in vector or integer form\n :param y0: list of - labels or vector of probabilities\n :param y1: list of - labels or vector of probabilities\n :param num_classes: number of classes if is not defined takes maximal value in labels as the highest class\n :return: confusion matrix\n \"\"\"\n if not isinstance(y0[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):\n y0 = np.argmax(y0, axis=1)\n elif isinstance(y0, list):\n y0 = np.array(y0)\n if not isinstance(y1[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):\n y1 = np.argmax(y1, axis=1)\n elif isinstance(y1, list):\n y1 = np.array(y1)\n labels_num = max(max(y0), max(y1)) + 1 if num_classes is None else num_classes\n out = np.zeros((labels_num, labels_num))\n for i in range(labels_num):\n for j in range(labels_num):\n out[i, j] = np.sum(y1[y0==i]==j)\n return out\n\n\ndef accuracy(y0, y1):\n \"\"\"\n compute accuracy for y1 and y2 does not meter if either of them is in vector or integer form\n :param y0: list of - labels or vector of probabilities\n :param y1: list of - labels or vector of probabilities\n :return: accuracy\n \"\"\"\n if not isinstance(y0[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):\n y0 = np.argmax(y0, axis=1)\n elif isinstance(y0, list):\n y0 = np.array(y0)\n if not isinstance(y1[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):\n y1 = np.argmax(y1, axis=1)\n elif isinstance(y1, list):\n y1 = np.array(y1)\n\n out = np.sum(y0==y1)/len(y0)\n return out\n\n\ndef labels2probabilities(labels):\n \"\"\"\n transforms labels into the 1-hod encoded vectors\n :param labels: list of integer values 0..(number_of_classes - 1), size n\n :return: matrix size (n, num_of_classes), ones are on the indexes defined by param labels\n \"\"\"\n num_of_classes = max(labels)+1\n return np.apply_along_axis(lambda x: np.eye(num_of_classes)[x], 0, labels)\n\n\ndef save_weights(list_of_numpy_arrays, file_name):\n \"\"\"\n saves list of numpy arrays into the file\n if the file have other than npz extension or no extension at all the .npz is added at the end of file_name\n (uses nympy function savez_compressed)\n :param list_of_numpy_arrays: list of numpy arrays\n :param file_name: filename with format npz\n \"\"\"\n if os.path.dirname(file_name):\n check_dir(os.path.dirname(file_name))\n if not str(file_name).endswith(\".npz\"):\n file_name = file_name + \".npz\"\n\n np.savez_compressed(file_name, *list_of_numpy_arrays)\n\n\ndef load_weights(file_name):\n \"\"\"\n loads weights saved by save_weights, so the extension npz of the file is necessary\n :param file_name: filename with format npz\n :return: list of loaded numpy arrays\n \"\"\"\n if not str(file_name).endswith(\".npz\"):\n raise IOError(\"file_name has bad format use .npz file insted.\")\n l = np.load(file_name)\n files = l.files\n output = list()\n for file in files:\n output += [l[file]]\n return output\n\n\ndef check_dir(directory):\n \"\"\"\n Checks if the path exists and if not it creates all missing folders\n \"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef join_weights_and_biases(weights, biases):\n \"\"\"\n joins two arrays into one\n :param weights: list of numpy arrays.\n :param biases: list of numpy arrays with same length as weights.\n :return: list of list with two numpy arrays for weights and biases, respectively.\n - the first index is defines layer and the second weight (0) or bias (1)\n \"\"\"\n out = list()\n for i, _ in enumerate(weights):\n out.append([weights[i], biases[i]])\n return out\n\n\ndef get_class_vs_others_indexes(class_index, labels, return_new_labels=False):\n \"\"\"\n Generate a indexes that contains the same number of samples from the specified class (class_index) and\n other remaining classes. It also can return new labels, i.e., 0 for samples with class_index and 1 for the others.\n The returned indexes are randomly shuffled.\n :param class_index: index of the base class\n :param labels: list of labels for given dataset\n :param return_new_labels: a flag\n :return: indexes of samples with all samples from the class (class_index) and the same number of other classes\n - if return_new_labels is True then also new labels are returned\n \"\"\"\n ar = np.arange(len(labels))\n indexes0 = ar[labels==class_index]\n indexes1 = np.random.choice(ar[labels!=class_index], len(indexes0))\n out = indexes0 + indexes1\n out_lables = np.zeros((2*len(indexes0)), dtype=np.int)\n out_lables[-len(indexes0):] = 1\n rp = np.random.permutation(len(out))\n if return_new_labels:\n return out[rp], out_lables[rp]\n else:\n return out[rp]\n\n" ]
[ [ "numpy.load", "numpy.sum", "numpy.eye", "numpy.zeros", "numpy.argmax", "numpy.array", "numpy.savez_compressed" ] ]
mariesig/privacy-evaluator
[ "4e6ced65cc71bb661aef4518192517e23e22595e" ]
[ "privacy_evaluator/metrics/basics.py" ]
[ "import numpy as np\n\n\ndef accuracy(y: np.ndarray, y_prediction: np.ndarray) -> np.float32:\n \"\"\"Calculates accuracy for true labels and predicted labels.\n\n :params y: True labels.\n :params y_prediction: Predicted labels.\n :return: Accuracy\n :raises ValueError: If true labels and predicted labels are not of the same shape.\n \"\"\"\n if y.shape != y_prediction.shape:\n raise ValueError(\n f\"Expected true labels and predicted labels to be of same shape, received true labels with shape {str(y.shape)} and predicted labels with shape {str(y_prediction.shape)} instead.\"\n )\n return (np.argmax(y, axis=1) == np.argmax(y_prediction, axis=1)).sum() / y.shape[0]\n\n\ndef train_to_test_accuracy_gap(\n train_accuracy: np.float32, test_accuracy: np.float32\n) -> np.float32:\n \"\"\"Calculates the gap between the train and test accuracy of a classifier.\n\n The gap is calculated by subtracting the test accuracy from the train accuracy.\n\n :params train_accuracy: The train accuracy.\n :params test_accuracy: The test accuracy.\n :return: The gap between the train and test accuracy.\n \"\"\"\n return train_accuracy - test_accuracy\n\n\ndef train_to_test_accuracy_ratio(\n train_accuracy: np.float32, test_accuracy: np.float32\n) -> np.float32:\n \"\"\"Calculates the ratio between the train and test accuracy of a classifier.\n\n The ratio is calculated by dividing the test accuracy by the train accuracy.\n\n :params train_accuracy: The train accuracy.\n :params test_accuracy: The test accuracy.\n :return: The ratio between the train and test accuracy.\n \"\"\"\n return train_accuracy / test_accuracy\n" ]
[ [ "numpy.argmax" ] ]
glassroom/torch_train_test_loop
[ "fbd575c59cbf2823d13eb1df86b5d90f29febd23" ]
[ "torch_train_test_loop.py" ]
[ "import torch\nimport contextlib\n\nTRAIN_DESC, VALID_DESC, TEST_DESC = ('train', 'valid', 'test')\n\nclass EarlyStopException(Exception):\n pass\n\nclass LoopComponent():\n r\"\"\"\n Base class for loop components. Each method is a callback to be\n invoked by a `TrainTestLoop` instance, which is passed as an input.\n If the loop instance has multiple components, on each iteration their\n callbacks will be invoked in the following order:\n `\n Iteration\n +------->-------+\n | |\n | +-----------v-----------+-----------------------+--\n | | Loop component #1 | Loop component #2 | ...\n | +-----------------------+-----------------------+--\n | | on_train_begin -----+-> on_train_begin -----+-> ...\n | | on_epoch_begin ---+---> on_epoch_begin ---+---> ...\n | | on_batch_begin -+-----> on_batch_begin -+-----> ...\n | | : | : |\n | | on_batch_end ---+-----> on_batch_end --+-----> ...\n | | on_epoch_end -----+---> on_epoch_end -----+---> ...\n | | on_train_end -------+-> on_train_end -------+-> ... :\n | +-----------------------+-----------------------+-- |\n | v\n +-------------------------------<------------------------------+\n `\n \"\"\"\n def on_train_begin(self, loop): pass # called by loop at start of training\n def on_epoch_begin(self, loop): pass # called by loop at start of each epoch\n def on_batch_begin(self, loop): pass # called by loop at start of each batch\n def on_grads_reset(self, loop): pass # called by loop to zero out gradients, if training\n def on_forward_pass(self, loop): pass # called by loop to compute forward pass\n def on_loss_compute(self, loop): pass # called by loop to compute model loss\n def on_backward_pass(self, loop): pass # called by loop to compute backward pass, if training\n def on_optim_step(self, loop): pass # called by loop to compute/schedule optim, if training\n def on_batch_end(self, loop): pass # called by loop at end of each batch\n def on_epoch_end(self, loop): pass # called by loop at end of each epoch\n def on_train_end(self, loop): pass # called by loop at end of training\n\nclass TrainTestLoop():\n r\"\"\"\n Composable loop for training and testing PyTorch models. On each\n iteration of the loop, computations are performed by one or more\n `LoopComponent` instances that access and modify loop state. The\n number and order of loop components can be modified at any time.\n\n Args:\n model: `torch.nn.Module` object containing the model.\n components: iterable of `LoopComponent` instances that perform\n computations on each iteration, in order of invocation.\n train_data: iterable for which len() returns length.\n valid_data: iterable for which len() returns length.\n\n Methods:\n train(n_epochs): train/validate model for n_epochs: int.\n test(test_data): test model for one epoch on previously unseen\n test_data, an iterable for which len() returns length.\n stop(): stop early and, if training and validating, invoke the\n 'on_train_end' callbacks of all loop components. Any\n component of the loop can call stop() at any time.\n\n Sample usage:\n >>> loop = TrainTestLoop(model, components, train_data, valid_data)\n >>> loop.train(n_epochs)\n >>> loop.test(test_data)\n >>> print(*vars(loop), sep='\\n') # vars holding loop state\n \"\"\"\n def __init__(self, model, components, train_data, valid_data):\n self.model, self.components, self.train_data, self.valid_data = (model, list(components), train_data, valid_data)\n self.epoch_num = 0\n\n def _components_do(self, *args):\n for callback in [getattr(comp, arg) for arg in args for comp in self.components]:\n callback(self)\n\n def _run_epoch(self, data, desc):\n self.n_batches, self.epoch_desc = (len(data), desc)\n self.is_training, self.is_validating, self.is_testing = [desc == s for s in (TRAIN_DESC, VALID_DESC, TEST_DESC)]\n assert [self.is_training, self.is_validating, self.is_testing].count(True) == 1\n self.model.train() if self.is_training else self.model.eval()\n with torch.no_grad() if not self.is_training else contextlib.suppress():\n self._components_do('on_epoch_begin')\n for self.batch_num, self.batch in enumerate(iter(data)):\n if self.is_training: self._components_do('on_grads_reset')\n self._components_do('on_batch_begin', 'on_forward_pass', 'on_loss_compute')\n if self.is_training:\n self._components_do('on_backward_pass', 'on_optim_step')\n self.optim_step_num += 1\n self._components_do('on_batch_end')\n self._components_do('on_epoch_end')\n\n def train(self, n_epochs):\n self.n_epochs = n_epochs\n self.n_optim_steps, self.optim_step_num = (self.n_epochs * len(self.train_data), 0)\n self._components_do('on_train_begin')\n for _ in range(n_epochs):\n try:\n self._run_epoch(self.train_data, TRAIN_DESC)\n self._run_epoch(self.valid_data, VALID_DESC)\n self.epoch_num += 1\n except EarlyStopException: break\n self._components_do('on_train_end')\n \n def test(self, test_data):\n try:\n self.n_epochs = 1\n self._run_epoch(test_data, TEST_DESC)\n except EarlyStopException: pass\n\n def stop(self):\n raise EarlyStopException\n" ]
[ [ "torch.no_grad" ] ]
JulianKu/megastep
[ "21ac85510d03f20600d438618a02569c6f1e34e1" ]
[ "rebar/stats/gpu.py" ]
[ "import torch\nimport pandas as pd\nfrom io import BytesIO\nfrom subprocess import check_output\nfrom . import writing\nimport time\n\n\ndef memory(device=0):\n total_mem = torch.cuda.get_device_properties(f'cuda:{device}').total_memory\n writing.max(f'gpu-memory/cache/{device}', torch.cuda.max_memory_cached(device)/total_mem)\n torch.cuda.reset_max_memory_cached()\n writing.max(f'gpu-memory/alloc/{device}', torch.cuda.max_memory_allocated(device)/total_mem)\n torch.cuda.reset_max_memory_allocated()\n torch.cuda.reset_max_memory_cached()\n\ndef dataframe():\n \"\"\"Use `nvidia-smi --help-query-gpu` to get a list of query params\"\"\"\n params = {\n 'device': 'index', \n 'compute': 'utilization.gpu', 'access': 'utilization.memory', \n 'memused': 'memory.used', 'memtotal': 'memory.total',\n 'fan': 'fan.speed', 'power': 'power.draw', 'temp': 'temperature.gpu'}\n command = f\"\"\"nvidia-smi --format=csv,nounits,noheader --query-gpu={','.join(params.values())}\"\"\"\n df = pd.read_csv(BytesIO(check_output(command, shell=True)), header=None)\n df.columns = list(params.keys())\n df = df.set_index('device')\n df = df.apply(pd.to_numeric, errors='coerce')\n return df\n\n_last = -1\ndef vitals(device=None, throttle=0):\n # This is a fairly expensive op, so let's avoid doing it too often\n global _last\n if time.time() - _last < throttle:\n return\n _last = time.time()\n\n df = dataframe()\n if device is None:\n pass\n elif isinstance(device, int):\n df = df.loc[[device]]\n else:\n df = df.loc[device]\n\n fields = ['compute', 'access', 'fan', 'power', 'temp']\n for (device, field), value in df[fields].stack().iteritems():\n writing.mean(f'gpu/{field}/{device}', value)\n\n for device in df.index:\n writing.mean(f'gpu/memory/{device}', 100*df.loc[device, 'memused']/df.loc[device, 'memtotal'])" ]
[ [ "torch.cuda.get_device_properties", "torch.cuda.max_memory_allocated", "torch.cuda.reset_max_memory_allocated", "torch.cuda.reset_max_memory_cached", "torch.cuda.max_memory_cached" ] ]
GeWu-Lab/MUSIC-AVQA_CVPR2022
[ "f704130f37a342b5ff861780282c75cc875221b2" ]
[ "net_grd_baseline/nets_qa_grd_baseline.py" ]
[ "import torch\nimport torchvision.models as models\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom visual_net import resnet18\n\n\ndef batch_organize(audio_data, posi_img_data, nega_img_data):\n\n # print(\"audio data: \", audio_data.shape)\n (B, T, C) = audio_data.size()\n audio_data_batch=audio_data.view(B*T,C)\n batch_audio_data = torch.zeros(audio_data_batch.shape[0] * 2, audio_data_batch.shape[1])\n\n (B, T, C, H, W) = posi_img_data.size()\n posi_img_data_batch=posi_img_data.view(B*T,C,H,W)\n nega_img_data_batch=nega_img_data.view(B*T,C,H,W)\n\n batch_image_data = torch.zeros(posi_img_data_batch.shape[0] * 2, posi_img_data_batch.shape[1], posi_img_data_batch.shape[2],posi_img_data_batch.shape[3])\n batch_labels = torch.zeros(audio_data_batch.shape[0] * 2)\n for i in range(audio_data_batch.shape[0]):\n batch_audio_data[i * 2, :] = audio_data_batch[i, :]\n batch_audio_data[i * 2 + 1, :] = audio_data_batch[i, :]\n batch_image_data[i * 2, :] = posi_img_data_batch[i, :]\n batch_image_data[i * 2 + 1, :] = nega_img_data_batch[i, :]\n batch_labels[i * 2] = 1\n batch_labels[i * 2 + 1] = 0\n \n return batch_audio_data, batch_image_data, batch_labels\n\n# Question\nclass QstEncoder(nn.Module):\n\n def __init__(self, qst_vocab_size, word_embed_size, embed_size, num_layers, hidden_size):\n\n super(QstEncoder, self).__init__()\n self.word2vec = nn.Embedding(qst_vocab_size, word_embed_size)\n self.tanh = nn.Tanh()\n self.lstm = nn.LSTM(word_embed_size, hidden_size, num_layers)\n self.fc = nn.Linear(2*num_layers*hidden_size, embed_size) # 2 for hidden and cell states\n\n def forward(self, question):\n\n qst_vec = self.word2vec(question) # [batch_size, max_qst_length=30, word_embed_size=300]\n qst_vec = self.tanh(qst_vec)\n qst_vec = qst_vec.transpose(0, 1) # [max_qst_length=30, batch_size, word_embed_size=300]\n self.lstm.flatten_parameters()\n _, (hidden, cell) = self.lstm(qst_vec) # [num_layers=2, batch_size, hidden_size=512]\n qst_feature = torch.cat((hidden, cell), 2) # [num_layers=2, batch_size, 2*hidden_size=1024]\n qst_feature = qst_feature.transpose(0, 1) # [batch_size, num_layers=2, 2*hidden_size=1024]\n qst_feature = qst_feature.reshape(qst_feature.size()[0], -1) # [batch_size, 2*num_layers*hidden_size=2048]\n qst_feature = self.tanh(qst_feature)\n qst_feature = self.fc(qst_feature) # [batch_size, embed_size]\n\n return qst_feature\n\n\nclass AVQA_Fusion_Net(nn.Module):\n\n def __init__(self):\n super(AVQA_Fusion_Net, self).__init__()\n\n # for features\n self.fc_a1 = nn.Linear(128, 512)\n self.fc_a2=nn.Linear(512,512)\n\n self.visual_net = resnet18(pretrained=True)\n\n self.fc_v = nn.Linear(2048, 512)\n self.fc_st = nn.Linear(512, 512)\n self.fc_fusion = nn.Linear(1024, 512)\n self.fc = nn.Linear(1024, 512)\n self.fc_aq = nn.Linear(512, 512)\n self.fc_vq = nn.Linear(512, 512)\n\n self.linear11 = nn.Linear(512, 512)\n self.dropout1 = nn.Dropout(0.1)\n self.linear12 = nn.Linear(512, 512)\n\n self.linear21 = nn.Linear(512, 512)\n self.dropout2 = nn.Dropout(0.1)\n self.linear22 = nn.Linear(512, 512)\n self.norm1 = nn.LayerNorm(512)\n self.norm2 = nn.LayerNorm(512)\n self.dropout3 = nn.Dropout(0.1)\n self.dropout4 = nn.Dropout(0.1)\n self.norm3 = nn.LayerNorm(512)\n\n self.attn_a = nn.MultiheadAttention(512, 4, dropout=0.1)\n self.attn_v = nn.MultiheadAttention(512, 4, dropout=0.1)\n\n # question\n self.question_encoder = QstEncoder(93, 512, 512, 1, 512)\n\n self.tanh = nn.Tanh()\n self.dropout = nn.Dropout(0.5)\n self.fc_ans = nn.Linear(512, 42)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc_gl=nn.Linear(1024,512)\n\n\n # combine\n self.fc1 = nn.Linear(1024, 512)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(512, 256)\n self.relu2 = nn.ReLU()\n self.fc3 = nn.Linear(256, 128)\n self.relu3 = nn.ReLU()\n self.fc4 = nn.Linear(128, 2)\n self.relu4 = nn.ReLU()\n\n\n def forward(self, audio,visual_posi,visual_nega, question): \n # print(\"net audio input: \", audio.shape)\n # print(\"net question input: \", question.shape)\n ## question features\n qst_feature = self.question_encoder(question)\n xq = qst_feature.unsqueeze(0)\n\n ## audio features B T,128\n audio_feat = F.relu(self.fc_a1(audio))\n audio_feat = self.fc_a2(audio_feat) \n audio_feat_flag = audio_feat\n\n ## visua: [2*B*T, 512,14,14]\n # print(\"v feat1: \", visual_posi.shape) # [64, 10, 512, 14, 14]\n # v_feat = self.avgpool(visual_posi)\n # print(\"v feat: \", v_feat.shape)\n # posi_visual_feat=v_feat.squeeze(-1).squeeze(-1) # B T 512\n\n\n B,T,C,H,W=visual_posi.size()\n temp_visual=visual_posi.view(B*T,C,H,W)\n v_feat=self.avgpool(temp_visual)\n # print(\"v_feat: \", v_feat.shape) # [640, 512, 1, 1]\n posi_visual_feat=v_feat.squeeze(-1).squeeze(-1) # B T 512\n posi_visual_feat=posi_visual_feat.view(audio_feat.size(0),-1,C)\n # print(\"posi_visual_feat: \", posi_visual_feat.shape) # [64, 10, 512]\n\n # T,C,H,W=visual_posi.size()\n # visual_nega=torch.zeros(T,C,H,W)\n\n\n out_match = None\n match_label=None\n\n\n # print(\"posi_visual_feat: \", posi_visual_feat.shape)\n visual_feat_grd=posi_visual_feat.permute(1,0,2)\n \n ## attention, question as query on visual_feat_grd\n visual_feat_att = self.attn_v(xq, visual_feat_grd, visual_feat_grd, attn_mask=None, key_padding_mask=None)[0].squeeze(0)\n src = self.linear12(self.dropout1(F.relu(self.linear11(visual_feat_att))))\n visual_feat_att = visual_feat_att + self.dropout2(src)\n visual_feat_att = self.norm1(visual_feat_att)\n \n # attention, question as query on audio\n audio_feat = audio_feat.permute(1, 0, 2)\n audio_feat_att = self.attn_a(xq, audio_feat, audio_feat, attn_mask=None,key_padding_mask=None)[0].squeeze(0)\n src = self.linear22(self.dropout3(F.relu(self.linear21(audio_feat_att))))\n audio_feat_att = audio_feat_att + self.dropout4(src)\n audio_feat_att = self.norm2(audio_feat_att)\n \n feat = torch.cat((audio_feat_att, visual_feat_att), dim=-1)\n feat = self.tanh(feat)\n feat = self.fc_fusion(feat)\n\n ## fusion with question\n combined_feature = torch.mul(feat, qst_feature)\n combined_feature = self.tanh(combined_feature)\n out_qa = self.fc_ans(combined_feature) # [batch_size, ans_vocab_size]\n\n return out_qa,out_match,match_label\n" ]
[ [ "torch.nn.MultiheadAttention", "torch.nn.LSTM", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Embedding", "torch.nn.Tanh", "torch.mul", "torch.nn.ReLU", "torch.nn.LayerNorm", "torch.zeros", "torch.cat", "torch.nn.Dropout" ] ]
edgardeng/machine-learning-pytorch
[ "24a060894f5226b5ef20cc311db72f1adc037548" ]
[ "practice-ocr/train_code/train_ctpn/data/dataset.py" ]
[ "#-*- coding:utf-8 -*-\n#'''\n# Created on 18-12-27 上午10:34\n#\n# @Author: Greg Gao(laygin)\n#'''\n\nimport os\nimport xml.etree.ElementTree as ET\nimport numpy as np\nimport cv2\nfrom torch.utils.data import Dataset\nimport torch\nfrom config import IMAGE_MEAN\nfrom ctpn_utils import cal_rpn\n\n\ndef readxml(path):\n gtboxes = []\n imgfile = ''\n xml = ET.parse(path)\n for elem in xml.iter():\n if 'filename' in elem.tag:\n imgfile = elem.text\n if 'object' in elem.tag:\n for attr in list(elem):\n if 'bndbox' in attr.tag:\n xmin = int(round(float(attr.find('xmin').text)))\n ymin = int(round(float(attr.find('ymin').text)))\n xmax = int(round(float(attr.find('xmax').text)))\n ymax = int(round(float(attr.find('ymax').text)))\n\n gtboxes.append((xmin, ymin, xmax, ymax))\n\n return np.array(gtboxes), imgfile\n\n\n# for ctpn text detection\nclass VOCDataset(Dataset):\n def __init__(self,\n datadir,\n labelsdir):\n '''\n\n :param txtfile: image name list text file\n :param datadir: image's directory\n :param labelsdir: annotations' directory\n '''\n if not os.path.isdir(datadir):\n raise Exception('[ERROR] {} is not a directory'.format(datadir))\n if not os.path.isdir(labelsdir):\n raise Exception('[ERROR] {} is not a directory'.format(labelsdir))\n\n self.datadir = datadir\n self.img_names = os.listdir(self.datadir)\n self.labelsdir = labelsdir\n\n def __len__(self):\n return len(self.img_names)\n\n def __getitem__(self, idx):\n img_name = self.img_names[idx]\n img_path = os.path.join(self.datadir, img_name)\n print(img_path)\n xml_path = os.path.join(self.labelsdir, img_name.replace('.jpg', '.xml'))\n gtbox, _ = readxml(xml_path)\n img = cv2.imread(img_path)\n h, w, c = img.shape\n\n # clip image\n if np.random.randint(2) == 1:\n img = img[:, ::-1, :]\n newx1 = w - gtbox[:, 2] - 1\n newx2 = w - gtbox[:, 0] - 1\n gtbox[:, 0] = newx1\n gtbox[:, 2] = newx2\n\n [cls, regr], _ = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)\n\n m_img = img - IMAGE_MEAN\n\n regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])\n\n cls = np.expand_dims(cls, axis=0)\n\n # transform to torch tensor\n m_img = torch.from_numpy(m_img.transpose([2, 0, 1])).float()\n cls = torch.from_numpy(cls).float()\n regr = torch.from_numpy(regr).float()\n\n return m_img, cls, regr\n\nclass ICDARDataset(Dataset):\n def __init__(self,\n datadir,\n labelsdir):\n '''\n\n :param txtfile: image name list text file\n :param datadir: image's directory\n :param labelsdir: annotations' directory\n '''\n if not os.path.isdir(datadir):\n raise Exception('[ERROR] {} is not a directory'.format(datadir))\n if not os.path.isdir(labelsdir):\n raise Exception('[ERROR] {} is not a directory'.format(labelsdir))\n\n self.datadir = datadir\n self.img_names = os.listdir(self.datadir)\n self.labelsdir = labelsdir\n\n def __len__(self):\n return len(self.img_names)\n\n def box_transfer(self,coor_lists,rescale_fac = 1.0):\n gtboxes = []\n for coor_list in coor_lists:\n coors_x = [int(coor_list[2*i]) for i in range(4)]\n coors_y = [int(coor_list[2*i+1]) for i in range(4)]\n xmin = min(coors_x)\n xmax = max(coors_x)\n ymin = min(coors_y)\n ymax = max(coors_y)\n if rescale_fac>1.0:\n xmin = int(xmin / rescale_fac)\n xmax = int(xmax / rescale_fac)\n ymin = int(ymin / rescale_fac)\n ymax = int(ymax / rescale_fac)\n gtboxes.append((xmin, ymin, xmax, ymax))\n return np.array(gtboxes)\n\n def box_transfer_v2(self,coor_lists,rescale_fac = 1.0):\n gtboxes = []\n for coor_list in coor_lists:\n coors_x = [int(coor_list[2 * i]) for i in range(4)]\n coors_y = [int(coor_list[2 * i + 1]) for i in range(4)]\n xmin = min(coors_x)\n xmax = max(coors_x)\n ymin = min(coors_y)\n ymax = max(coors_y)\n if rescale_fac > 1.0:\n xmin = int(xmin / rescale_fac)\n xmax = int(xmax / rescale_fac)\n ymin = int(ymin / rescale_fac)\n ymax = int(ymax / rescale_fac)\n prev = xmin\n for i in range(xmin // 16 + 1, xmax // 16 + 1):\n next = 16*i-0.5\n gtboxes.append((prev, ymin, next, ymax))\n prev = next\n gtboxes.append((prev, ymin, xmax, ymax))\n return np.array(gtboxes)\n\n def parse_gtfile(self,gt_path,rescale_fac = 1.0):\n coor_lists = list()\n with open(gt_path) as f:\n content = f.readlines()\n for line in content:\n coor_list = line.split(',')[:8]\n if len(coor_list)==8:\n coor_lists.append(coor_list)\n return self.box_transfer_v2(coor_lists,rescale_fac)\n\n def draw_boxes(self,img,cls,base_anchors,gt_box):\n for i in range(len(cls)):\n if cls[i]==1:\n pt1 = (int(base_anchors[i][0]),int(base_anchors[i][1]))\n pt2 = (int(base_anchors[i][2]),int(base_anchors[i][3]))\n img = cv2.rectangle(img,pt1,pt2,(200,100,100))\n for i in range(gt_box.shape[0]):\n pt1 = (int(gt_box[i][0]),int(gt_box[i][1]))\n pt2 = (int(gt_box[i][2]),int(gt_box[i][3]))\n img = cv2.rectangle(img, pt1, pt2, (100, 200, 100))\n return img\n\n def __getitem__(self, idx):\n img_name = self.img_names[idx]\n img_path = os.path.join(self.datadir, img_name)\n # print(img_path)\n img = cv2.imread(img_path)\n #####for read error, use default image#####\n if img is None:\n print(img_path)\n with open('error_imgs.txt','a') as f:\n f.write('{}\\n'.format(img_path))\n img_name = 'img_2647.jpg'\n img_path = os.path.join(self.datadir, img_name)\n img = cv2.imread(img_path)\n\n #####for read error, use default image#####\n\n h, w, c = img.shape\n rescale_fac = max(h, w) / 1600\n if rescale_fac>1.0:\n h = int(h/rescale_fac)\n w = int(w/rescale_fac)\n img = cv2.resize(img,(w,h))\n\n gt_path = os.path.join(self.labelsdir, 'gt_'+img_name.split('.')[0]+'.txt')\n gtbox = self.parse_gtfile(gt_path,rescale_fac)\n\n # clip image\n if np.random.randint(2) == 1:\n img = img[:, ::-1, :]\n newx1 = w - gtbox[:, 2] - 1\n newx2 = w - gtbox[:, 0] - 1\n gtbox[:, 0] = newx1\n gtbox[:, 2] = newx2\n\n [cls, regr], base_anchors = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)\n # debug_img = self.draw_boxes(img.copy(),cls,base_anchors,gtbox)\n # cv2.imwrite('debug/{}'.format(img_name),debug_img)\n m_img = img - IMAGE_MEAN\n\n regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])\n\n cls = np.expand_dims(cls, axis=0)\n\n # transform to torch tensor\n m_img = torch.from_numpy(m_img.transpose([2, 0, 1])).float()\n cls = torch.from_numpy(cls).float()\n regr = torch.from_numpy(regr).float()\n\n return m_img, cls, regr\n\nif __name__ == '__main__':\n xmin = 15\n xmax = 95\n for i in range(xmin//16+1,xmax//16+1):\n print(16*i-0.5)" ]
[ [ "numpy.array", "numpy.expand_dims", "torch.from_numpy", "numpy.random.randint" ] ]
rom1mouret/kak
[ "e6edb931d94f8a675ed6eb441a6b796dd8b35894" ]
[ "shoputils/shoputils/data_fitter.py" ]
[ "import yaml\nimport numpy as np\nimport re\nimport os\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .util import (\n to_gpu, split_chunk, slice_tensors,\n build_vocab, clean_strings, generate_uid,\n yaml_serializable, hash_strings\n)\n\n\nclass Net(nn.Module):\n def __init__(self, text_input_dim, text_embed_dim, text_output_dim,\n brand_dim, latent_dim):\n super(Net, self).__init__()\n\n self._latent_dim = latent_dim\n price_dim = 3\n price_embed_dim = 8\n other_dim = 4\n slope = 0.2\n\n hidden_dim = 2 * self._latent_dim\n self._dim = self._latent_dim + 2 * hidden_dim + price_embed_dim\n\n # product + model embeddings\n self._embedding = nn.EmbeddingBag(text_input_dim, text_embed_dim, mode='sum')\n\n # price embedding (sorta)\n self._price_proc = nn.Sequential(\n nn.Linear(price_dim, 128),\n nn.LeakyReLU(slope),\n nn.Linear(128, price_embed_dim)\n )\n\n # encoder\n self._encoder = nn.Sequential(\n nn.Linear(text_embed_dim + price_embed_dim + other_dim, 2048),\n nn.LeakyReLU(slope),\n nn.Linear(2048, hidden_dim),\n nn.LeakyReLU(slope),\n nn.Linear(hidden_dim, self._latent_dim),\n )\n\n # decoders\n self._txt_decoder = nn.Sequential(\n nn.Linear(latent_dim, 2048),\n nn.LeakyReLU(slope),\n nn.Dropout(0.25),\n nn.Linear(2048, text_output_dim)\n )\n\n self._brand_decoder = nn.Sequential(\n nn.Linear(latent_dim, hidden_dim),\n nn.LeakyReLU(slope),\n nn.Linear(hidden_dim, brand_dim),\n nn.LogSoftmax()\n )\n\n self._maker_decoder = nn.Sequential(\n nn.Linear(latent_dim, hidden_dim),\n nn.LeakyReLU(slope),\n nn.Linear(hidden_dim, brand_dim),\n nn.LogSoftmax()\n )\n\n for layer in (self._price_proc[0],\n self._encoder[0], self._encoder[2],\n self._txt_decoder[0],\n self._brand_decoder[0], self._maker_decoder[0]):\n nn.init.xavier_normal_(\n layer.weight,\n gain=nn.init.calculate_gain(\"leaky_relu\", param=slope))\n\n def dim(self):\n return self._dim\n\n def extract_features(self, batch):\n latent, price = self.latent_features(batch)\n b = F.linear(latent, self._brand_decoder[0].weight)\n m = F.linear(latent, self._maker_decoder[0].weight)\n\n return torch.cat([latent, price, b, m], dim=1)\n\n def latent_features(self, batch):\n # embeddings\n embedded = self._embedding(batch[\"text\"])\n\n # price pre-processing\n price = self._price_proc(batch[\"price\"])\n\n # encoder input\n inp = torch.cat([embedded, price, batch[\"missing\"]], dim=1)\n\n # encoder\n encoded = self._encoder(inp)\n\n return encoded, price\n\n def forward(self, batch):\n latent = self.latent_features(batch)[0]\n txt = self._txt_decoder(latent)\n brand = self._brand_decoder(latent)\n maker = self._maker_decoder(latent)\n\n return txt, brand, maker\n\n\nclass DataFitter:\n def __init__(self, static_params, gpu=-1, gpu_batch_size=16, out_dir=\"models/\"):\n self._gpu = gpu\n self._gpu_batch_size = gpu_batch_size\n trained = type(static_params) is not dict\n\n if trained:\n self._yaml_path = static_params\n with open(self._yaml_path, \"r\", encoding=\"utf-8\") as f:\n static_params = yaml.load(f)\n\n dirloc = os.path.dirname(self._yaml_path)\n self._torch_mdl_path = os.path.join(dirloc, static_params[\"torch_file\"])\n self._vocab_path = os.path.join(dirloc, static_params[\"vocab_file\"])\n else:\n prefix = \"fitter_\"\n uid = generate_uid()\n print(\"creating new model\", uid)\n self._torch_mdl_path = os.path.join(out_dir, \"%s%s.torch\" % (prefix, uid))\n self._yaml_path = os.path.join(out_dir, \"%s%s.yml\" % (prefix, uid))\n self._vocab_path = os.path.join(out_dir, \"vocab_%s.yml\" % (uid, ))\n\n self._static_params = static_params\n\n def net_factory():\n net = Net(\n text_input_dim=static_params[\"text_input_dim\"],\n text_embed_dim=static_params[\"text_embed_dim\"],\n text_output_dim=static_params[\"text_output_dim\"],\n brand_dim=static_params[\"brand_dim\"],\n latent_dim=static_params[\"latent_dim\"]\n )\n\n if self._gpu >= 0:\n net = net.cuda(self._gpu)\n\n return net\n\n self._net_factory = net_factory\n self._net = net_factory()\n self._bce_loss = nn.BCEWithLogitsLoss(reduction='sum')\n self._nll_loss = nn.NLLLoss(reduction='sum')\n\n self._foreign = re.compile(\"[a-z]+\")\n self._hangul = re.compile(\"[가-힣]\")\n self._number = re.compile(\"[0-9]+\")\n\n if trained:\n self.load(self._yaml_path)\n\n def __getitem__(self, key):\n return self._static_params[key]\n\n def train_preprocessing(self, chunk):\n # brand & maker\n vocab_dim = self._static_params[\"brand_dim\"]\n self._brand_vocab = build_vocab(chunk[\"brand\"], vocab_dim, shift=1)\n self._maker_vocab = build_vocab(chunk[\"maker\"], vocab_dim, shift=1)\n\n vocab = {\n \"brand\": self._brand_vocab,\n \"maker\": self._maker_vocab\n }\n\n with open(self._vocab_path, \"w\", encoding=\"utf-8\") as f:\n yaml.dump(vocab, f)\n\n # prices\n prices = chunk[\"price\"].copy().astype(np.float32)\n missing = prices == -1\n\n log_prices = np.log(1.01 + prices)\n log_prices[missing] = np.nan\n prices[missing] = np.nan\n\n self._logprice_median = np.nanmedian(log_prices)\n self._logprice_std = np.nanstd(log_prices)\n self._price_median = np.nanmedian(prices)\n self._price_std = np.nanstd(prices)\n\n print(\"price median\", self._price_median, \"price std\", self._price_std)\n\n def _tokenize(self, text):\n \"\"\" gets the text input ready for embedding \"\"\"\n text = text.lower()\n arr = list(self._hangul.findall(text))\n arr += list(self._foreign.findall(text))\n if self._number.search(text):\n arr.append(\"9\")\n\n return set(arr)\n\n def _text_output(self, sequences):\n vocab_dim = self._static_params[\"text_output_dim\"]\n X = torch.zeros(len(sequences), vocab_dim)\n for i, seq in enumerate(sequences):\n indices = hash_strings(seq, mod=vocab_dim, seed=1)\n if len(indices) > 0:\n X[[i] * len(indices), indices] = 1\n\n return X\n\n def _embedding_input(self, sequences):\n vocab_dim = self._static_params[\"text_input_dim\"]\n max_length = max(1, max(map(len, sequences)))\n inp = torch.zeros(len(sequences), max_length, dtype=torch.int64)\n for i, seq in enumerate(sequences):\n indices = hash_strings(seq, mod=vocab_dim-1, seed=0) + 1\n inp[i, :len(indices)] = torch.LongTensor(indices)\n\n return inp\n\n def _build_batch(self, chunk, with_targets=True, with_idf=True, gpu=-1):\n cols = {} # to store a copy of the chunk\n\n # decoding\n for col_name in (\"product\", \"model\"):\n cols[col_name] = (s.decode(\"utf-8\") for s in chunk[col_name])\n\n # tokenization\n sequences = []\n for product, model in zip(cols[\"product\"], cols[\"model\"]):\n sequences.append(self._tokenize(product) | self._tokenize(model))\n\n # text input\n text = self._embedding_input(sequences)\n\n # missing columns (with no decoding required)\n missing_product = map(bool, chunk[\"product\"])\n missing_model = map(bool, chunk[\"product\"])\n missing_brand = map(bool, chunk[\"brand\"])\n missing_maker = map(bool, chunk[\"maker\"])\n tranposed = zip(missing_product, missing_model, missing_brand, missing_maker)\n missing = torch.Tensor(list(tranposed))\n\n # prices\n prices = chunk[\"price\"]\n price_missing = prices == -1\n log_prices = (np.log(1.01 + prices) - self._logprice_median) / self._logprice_std\n log_prices[price_missing] = 0\n reg_prices = (prices - self._price_median) / self._price_std\n reg_prices[price_missing] = 0\n np_price = np.column_stack([price_missing.astype(np.float32), log_prices, reg_prices])\n price_feat = torch.from_numpy(np_price.astype(np.float32))\n\n if with_targets:\n # brand & makers\n for col_name in (\"brand\", \"maker\"):\n cols[col_name] = clean_strings((s.decode(\"utf-8\") for s in chunk[col_name]))\n\n # maker & brand\n makers = torch.LongTensor([self._maker_vocab.get(v, 0) for v in cols[\"maker\"]])\n brands = torch.LongTensor([self._brand_vocab.get(v, 0) for v in cols[\"brand\"]])\n\n # text\n if with_idf:\n y_text = self._text_output(sequences)\n else:\n y_text = None\n\n # move everything to GPU\n y = to_gpu(gpu, y_text, makers, brands)\n else:\n y = None\n\n batch = {\n \"text\": text,\n \"price\": price_feat,\n \"missing\": missing\n }\n\n batch = to_gpu(gpu, batch)[0]\n\n return batch, y\n\n def extract_features(self, chunk):\n batch, y = self._build_batch(chunk, with_targets=False, gpu=self._gpu)\n return self._net.extract_features(batch).data\n\n def prediction_err(self, chunk):\n batch, (y, brand, maker) = self._build_batch(chunk, with_targets=True, gpu=self._gpu)\n y_pred, brand_pred, maker_pred = self._net(batch)\n bce = nn.BCEWithLogitsLoss(reduction='none')\n nll = nn.NLLLoss(reduction='none')\n err1 = bce(y_pred, y).sum(dim=1).data\n err2 = nll(brand_pred, brand).data\n err3 = nll(maker_pred, maker).data\n\n return err1, err2, err3\n\n def find_err_thresholds(self, chunk):\n self._net.eval()\n minichunks = split_chunk(chunk, batch_size=self._gpu_batch_size)\n errs1, errs2, errs3 = [], [], []\n for minichunk in minichunks:\n err1, err2, err3 = self.prediction_err(minichunk)\n errs1 += err1.tolist()\n errs2 += err2.tolist()\n errs3 += err3.tolist()\n\n errs = np.array([errs1, errs2, errs3])\n\n thresholds = {}\n for p in (50, 75, 80, 85, 90, 92, 95, 96, 97, 98, 99):\n thresholds[p] = np.percentile(errs, p, axis=1).tolist()\n\n # save in yaml file\n with open(self._yaml_path, \"r\", encoding=\"utf-8\") as f:\n content = yaml.load(f)\n content[\"thresholds\"] = thresholds\n with open(self._yaml_path, \"w\", encoding=\"utf-8\") as f:\n yaml.dump(content, f, default_flow_style=False)\n\n self._net.train()\n\n return thresholds\n\n def dim(self):\n return self._net.dim()\n\n def prepare_hyperopt_dataset(self, train_set):\n self._train_set = self._build_batch(train_set)\n\n def set_holdout_val_set(self, val_set):\n chunks = split_chunk(val_set, batch_size=self._gpu_batch_size,\n max_rows=5000)\n print(\"holdout batch size:\", len(chunks) * self._gpu_batch_size)\n self._val_batches = list(map(self._build_batch, chunks))\n\n def train_and_eval(self, space):\n # copy the current net + new optimization hyperparameters\n net = self._net_factory()\n if os.path.exists(self._torch_mdl_path):\n net.load_state_dict(torch.load(self._torch_mdl_path))\n\n optim = self._create_optimizer(net, space)\n\n # training\n train_batch, (y, brand, maker) = self._train_set\n batch_size = int(space[\"batch_size\"])\n L = y.size(0)\n ordering = np.random.permutation(L) # gets us a meaningful variance to provide to Bayesian optim\n for k in range(0, L, batch_size):\n # forward\n loss = 0\n for i in range(k, min(L, k+batch_size), self._gpu_batch_size):\n indices = ordering[i:i+self._gpu_batch_size]\n batch, y_true, brand_true, maker_true = \\\n slice_tensors(indices, train_batch, y, brand, maker)\n batch, y_true, brand_true, maker_true = \\\n to_gpu(self._gpu, batch, y_true, brand_true, maker_true)\n\n # prediction\n y_pred, brand_pred, maker_pred = net(batch)\n\n # loss\n loss = loss + self._bce_loss(y_pred, y_true) + \\\n self._nll_loss(brand_pred, brand_true) + \\\n self._nll_loss(maker_pred, maker_true)\n\n # backward\n optim.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(net.parameters(), space[\"clipping\"])\n optim.step()\n\n # evaluation\n return self._validate(net.eval())[0]\n\n def reset_optimizer(self, space):\n self._optim = self._create_optimizer(self._net, space)\n self._space = space\n\n def train_on_batch(self, chunk):\n mini_chunks = split_chunk(chunk, batch_size=self._gpu_batch_size)\n\n # forward\n loss = 0\n for mini_chunk in mini_chunks:\n mini_batch, (y_true, maker_true, brand_true) = \\\n self._build_batch(mini_chunk, gpu=self._gpu)\n y_pred, maker_pred, brand_pred = self._net(mini_batch)\n loss = loss + self._bce_loss(y_pred, y_true) + \\\n self._nll_loss(maker_pred, maker_true) + \\\n self._nll_loss(brand_pred, brand_true)\n\n # backward\n self._optim.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self._net.parameters(), self._space[\"clipping\"])\n self._optim.step()\n\n def save(self, checkpoint, loss, report):\n torch.save(self._net.state_dict(), self._torch_mdl_path)\n\n metadata = {\n \"price_median\": self._price_median,\n \"price_std\": self._price_std,\n \"logprice_median\": self._logprice_median,\n \"logprice_std\": self._logprice_std,\n \"feat_extract_dim\": self._net.dim(),\n \"loss\": loss,\n \"checkpoint\": checkpoint,\n \"torch_file\": os.path.basename(self._torch_mdl_path),\n \"vocab_file\": os.path.basename(self._vocab_path),\n \"time\": time.strftime(\"%Y-%m-%d %H:%M\")\n }\n metadata.update(report)\n metadata.update(self._space)\n metadata.update(self._static_params)\n metadata = yaml_serializable(metadata)\n\n with open(self._yaml_path, \"w\") as f:\n yaml.dump(metadata, f, default_flow_style=False)\n\n return self._yaml_path\n\n def load(self, yaml_file):\n \"\"\" build the model structure before calling this method \"\"\"\n print(\"loading\", yaml_file)\n with open(yaml_file, \"r\") as f:\n metadata = yaml.load(f)\n self._space = metadata\n\n if \"thresholds\" in metadata:\n self._thresholds = metadata[\"thresholds\"]\n\n # price stuff\n self._price_std = metadata[\"price_std\"]\n self._price_median = metadata[\"price_median\"]\n self._logprice_std = metadata[\"logprice_std\"]\n self._logprice_median = metadata[\"logprice_median\"]\n\n # vocab\n with open(self._vocab_path, \"r\", encoding=\"utf-8\") as f:\n vocab = yaml.load(f)\n self._brand_vocab = vocab[\"brand\"]\n self._maker_vocab = vocab[\"maker\"]\n\n # torch network\n self._net.load_state_dict(torch.load(self._torch_mdl_path))\n\n if self._gpu >= 0:\n self._net = self._net.cuda(self._gpu)\n\n def thresholds(self):\n return self._thresholds\n\n def eval(self):\n self._net = self._net.eval()\n return self\n\n def validate(self):\n net = self._net.eval()\n val = self._validate(net)\n self._net.train()\n\n return val\n\n def _create_optimizer(self, net, space):\n optim = torch.optim.SGD(\n net.parameters(),\n lr=space[\"lr\"],\n momentum=space[\"momentum\"],\n weight_decay=0)\n\n return optim\n\n def _validate(self, net):\n loss = 0\n n = 0\n for batch, (y_true, maker_true, brand_true) in self._val_batches:\n batch, y_true, maker_true, brand_true = \\\n to_gpu(self._gpu, batch, y_true, maker_true, brand_true)\n y_pred, maker_pred, brand_pred = net(batch)\n loss += self._bce_loss(y_pred, y_true) + \\\n self._nll_loss(maker_pred, maker_true) + \\\n self._nll_loss(brand_pred, brand_true)\n n += y_pred.size(0)\n\n loss /= 3 * n\n\n return loss.item(), {}\n" ]
[ [ "torch.nn.EmbeddingBag", "torch.nn.NLLLoss", "torch.nn.init.calculate_gain", "torch.nn.Linear", "torch.load", "torch.nn.functional.linear", "numpy.nanmedian", "numpy.random.permutation", "numpy.nanstd", "torch.nn.LogSoftmax", "numpy.percentile", "numpy.log", "torch.nn.BCEWithLogitsLoss", "numpy.array", "torch.LongTensor", "torch.cat", "torch.nn.Dropout", "torch.nn.LeakyReLU" ] ]
pvalienteverde/MeetUpIntroMLySistemasRecomendacion
[ "50e24dfc5303b0d4a9edaf4ff634d25388351343" ]
[ "scripts/Introduccion/utiles.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef to_hot_encoding(datos,caracteristicas_categoricas): \n for cat in caracteristicas_categoricas:\n one_encoding=pd.get_dummies(datos[cat],prefix=cat)\n datos=pd.concat([datos,one_encoding],axis=1)\n del datos[cat] \n return datos\n\ndef mostrar_imagenes(datos,target=None,prediccion=None):\n fig = plt.figure(figsize=(15, 3))\n n,_=datos.shape\n for i in range(n):\n titulo=''\n if not target is None: \n titulo=\"T:{},\".format(target[i])\n if not prediccion is None:\n titulo=\"{}P:{}\".format(titulo,prediccion[i])\n\n ax = fig.add_subplot(1, n, 1 + i, xticks=[], yticks=[],title=titulo)\n ax.imshow(datos[i].reshape((8, 8)), cmap=plt.cm.binary)\n \n" ]
[ [ "matplotlib.pyplot.figure", "pandas.concat", "pandas.get_dummies" ] ]
Timsbim/Show-COVID-19-Data
[ "170109015eef03d0639fb48840aeb46133beb139" ]
[ "lib/showing.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom lib.basics import *\nfrom lib.prepping import get_base_data\n\n\n# Showing the data\n\n\ndef get_country_data_to_show(date_, plots, *countries, length=1000):\n \"\"\"Returns the data from day date_ for the categories and variants defined\n in the dictionary plots and the countries, all loaded in one dictionary\n \"\"\"\n tbl = pd.read_json(\n get_data_file_path(date_, file_format=\"json.gz\"),\n orient=\"table\",\n compression=\"gzip\",\n ).sort_index()\n\n data = dict()\n for country in countries:\n data[country] = {category: {} for category in plots}\n for category, variant, country in [\n (category, variant, country)\n for category in plots\n for variant in plots[category]\n for country in countries\n ]:\n data[country][category][variant] = tbl.loc[\n (category, variant), country\n ].tail(length)\n\n return data\n\n\ndef get_title_translation():\n \"\"\"Returns dictionary which translates shortcuts in text suitable for plot\n titles\n \"\"\"\n return json.load(get_settings_file_path(\"title_translation\").open(\"r\"))\n\n\ndef setup_ax(ax, days):\n \"\"\"Set up the axes for the plots\"\"\"\n\n # Months\n months = (\n \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\",\n \"Nov\", \"Dec\",\n )\n\n # Setting the viewable range of x-axis\n ax.set_xlim(-2, len(days) + 1)\n\n # Setting the ticks (positions and labels) mostly on x-axis\n\n # Minor ticks: Only 3 for each month, roughly the end of the 1., 2. and 3.\n # week\n minor_ticks = [i for i in range(len(days)) if days[i].day in (8, 16, 24)]\n\n # Minor labels: Day of minor tick, i.e. 8, 16 and 24\n minor_labels = [days[i].day for i in minor_ticks]\n\n # Major: Ticks = months end/beginning, label: short version of months name\n major_ticks = []\n major_labels = []\n month = days[0].month\n for i, day in enumerate(days[1:], start=1):\n # Detecting the beginning of a new month\n if day.month != month:\n major_ticks.append(i)\n month = day.month\n major_labels.append(months[month - 1])\n\n # Actually setting the prepared ticks/labels, including the label size\n ax.xaxis.set_ticks(minor_ticks, minor=True)\n ax.xaxis.set_ticklabels(minor_labels, minor=True)\n ax.xaxis.set_ticks(major_ticks, minor=False)\n ax.xaxis.set_ticklabels(major_labels, minor=False)\n ax.xaxis.set_tick_params(which=\"both\", labelsize=10)\n ax.yaxis.set_tick_params(which=\"both\", labelsize=12)\n\n # Setting the grid\n ax.grid(True, which=\"both\")\n ax.grid(which=\"major\", linestyle=\"dashed\", linewidth=2)\n ax.grid(which=\"minor\", linestyle=\"dashed\")\n\n # Setting the labels of the x-axis, including the font size\n ax.set_xlabel(\"day\", fontsize=14)\n\n\ndef show_countries(date_, *countries, length=1000):\n \"\"\"Creates a standard set of plots for every country provided by the\n argument countries (usually a list). The set contains:\n - Confirmed cases, cumulative and diffs (including the 1-week-moving\n average)\n - Deaths, cumulative and diffs (including the 1-week-moving average)\n - Active cases, cumulative and diffs (including the 1-week-moving\n average)\n The plots are available in single-plot files, files per category\n (containing 2 plots), and a file containing all 6 plots\n \"\"\"\n print_log(f\"Plotting countries: {str.join(', ', countries)} ...\")\n\n # Defining the plots that should be included\n plots = {\n \"confirmed\": [\"cum\", \"diff\", \"diff_ma1w\"],\n \"deaths\": [\"cum\", \"diff\", \"diff_ma1w\"],\n \"active\": [\"cum\", \"diff\", \"diff_ma1w\"],\n }\n categories = plots\n\n # Getting the title text bits\n trsl = get_title_translation()\n iso3_to_name = get_base_data(date_, columns=(\"iso3\", \"name\"))\n\n # Read data from files produced by prepare_data\n data = get_country_data_to_show(date_, plots, *countries, length=length)\n\n # Creating the plots for the selected countries\n title_font_size = 30\n for country in countries:\n # Creating the figure which includes all plots\n fig_all, axs_all = plt.subplots(3, 2, figsize=(40, 50))\n fig_all.suptitle(\n iso3_to_name[country], fontsize=title_font_size, fontweight=\"bold\"\n )\n\n for i, category in enumerate(categories):\n # Creating the figure for all plots per category\n fig_cat, axs_cat = plt.subplots(2, 1, figsize=(20, 25))\n fig_cat.suptitle(\n f\"{iso3_to_name[country]} - {trsl[category]}\",\n fontsize=title_font_size,\n fontweight=\"bold\",\n )\n\n for j, variant in enumerate([\"cum\", \"diff\"]):\n series = data[country][category][variant]\n days = list(series.index)\n\n # Creating the figure for single plot (category and variant)\n fig, axs = plt.subplots(figsize=(25, 16))\n fig.suptitle(\n f\"{iso3_to_name[country]} - \"\n f\"{trsl[category]} - {trsl[variant]}\",\n fontsize=title_font_size,\n fontweight=\"bold\",\n )\n for ax in [axs, axs_cat[j], axs_all[i][j]]:\n ax.set_title(\n f\"{trsl[category]} - {trsl[variant]}\", fontsize=20\n )\n setup_ax(ax, days)\n ax.plot(\n list(range(len(series.index))), series.values, \"bo\"\n )\n if variant == \"diff\":\n series_ma = data[country][category][\"diff_ma1w\"]\n ax.plot(\n list(range(len(series_ma.index))),\n series_ma.values,\n \"r-\",\n label=trsl[\"diff_ma1w\"],\n )\n ax.legend(fontsize=\"xx-large\")\n\n # Due to data corrections there are sometimes negative\n # diffs for confirmed cases, which should be always\n # non-negative. This can lead to distorted plots and is\n # therefore adjusted by setting the minimum value of the\n # y-axis to -25.\n if category == \"confirmed\" and variant == \"diff\":\n ax.set_ylim(bottom=-25)\n\n # Saving the figure for single plot\n fig.align_labels()\n fig.savefig(\n get_plot_file_path(date_, country, category, variant)\n )\n\n # Saving the figure with all plots per category\n fig_cat.align_labels()\n fig_cat.savefig(get_plot_file_path(date_, country, category))\n\n # Saving the figure with all plots\n fig_all.align_labels()\n fig_all.savefig(get_plot_file_path(date_, country))\n\n plt.close(\"all\")\n\n print_log(f\"Plots for {country} finished\")\n\n print_log(\"Plotting finished\")\n\n\ndef get_group_data_to_show(date_, plots, groups, length=1000):\n \"\"\"Returns the data from day dte for the categories and variants defined\n in the dictionary plots and the groups in list groups, loaded into a\n dictionary\n \"\"\"\n tbl = pd.read_json(\n get_data_file_path(date_, file_format=\"json.gz\"),\n orient=\"table\",\n compression=\"gzip\",\n ).sort_index()\n\n data = dict()\n for group in groups:\n data[group] = {category: {} for category in plots}\n for category, variant, group in [\n (category, variant, group)\n for category in plots\n for variant in plots[category]\n for group in groups\n ]:\n data[group][category][variant] = tbl.loc[\n (category, variant), groups[group]\n ].tail(length)\n\n return data\n\n\ndef show_groups(date_, groups, length=1000):\n \"\"\"Creates a standard set of plots for groups of countries provided by the\n argument groups (a dictionary). The set contains:\n - Confirmed cases per million, cumulative and diffs (including the\n 1-week-moving average)\n - Deaths per 100,000, cumulative and diffs (including the 1-week-moving\n average)\n - Active cases per million, cumulative and diffs (including the\n 1-week-moving average)\n The plots are available in single-plot files, files per category\n (containing 2 plots), and a file containing all 6 plots\n \"\"\"\n # Defining the plots that should be included\n plots = {\n \"confirmed\": [\"cum_rel_popmio\", \"diff_rel_popmio_ma1w\"],\n \"deaths\": [\"cum_rel_pop100k\", \"diff_rel_pop100k_ma1w\"],\n \"active\": [\"cum_rel_popmio\", \"diff_rel_popmio_ma1w\"],\n }\n trsl = get_title_translation()\n categories = plots\n\n # Reading data from files produced by prepare_data\n data = get_group_data_to_show(date_, plots, groups, length)\n\n title_font_size = 30\n for group in groups:\n print_log(\n f\"Plotting group {group} with countries \"\n f\"{str.join(', ', groups[group])} ...\"\n )\n\n # Creating list of countries in group\n countries = groups[group]\n\n # Creating the figure which includes all plots\n fig_all, axs_all = plt.subplots(3, 2, figsize=(40, 50))\n fig_all.suptitle(group, fontsize=title_font_size, fontweight=\"bold\")\n for i, category in enumerate(categories):\n variants = plots[category]\n\n # Creating the figure for all plots per category\n fig_cat, axs_cat = plt.subplots(2, 1, figsize=(20, 25))\n fig_cat.suptitle(\n f\"{group} - {trsl[category]}\",\n fontsize=title_font_size,\n fontweight=\"bold\",\n )\n for j, variant in enumerate(variants):\n # Creating the figure for single plot (category and variant)\n fig, axs = plt.subplots(figsize=(25, 16))\n fig.suptitle(\n f\"{group} - {trsl[category]} - {trsl[variant]}\",\n fontsize=title_font_size,\n fontweight=\"bold\",\n )\n for ax in [axs, axs_cat[j], axs_all[i][j]]:\n ax.set_title(\n f\"{trsl[category]} - {trsl[variant]}\", fontsize=20\n )\n days = list(data[group][category][variant].index)\n setup_ax(ax, days)\n ax.plot(\n list(range(len(days))),\n data[group][category][variant][countries],\n \"o\",\n )\n ax.legend(countries)\n\n # Due to data corrections there are sometimes negative\n # diffs for confirmed cases, which should be always\n # non-negative. This can lead to distorted plots and is\n # therefore adjusted by setting the minimum value of the\n # y-axis to -10.\n if (\n category == \"confirmed\"\n and variant == \"diff_rel_popmio_ma1w\"\n ):\n ax.set_ylim(bottom=-10)\n\n # Saving the figure with single plot\n fig.align_labels()\n fig.savefig(\n get_plot_file_path(date_, group, category, variant)\n )\n\n # Saving the figure with all plots per category\n fig_cat.align_labels()\n fig_cat.savefig(get_plot_file_path(date_, group, category))\n\n # Saving the figure with all plots\n fig_all.align_labels()\n fig_all.savefig(get_plot_file_path(date_, group))\n plt.close(\"all\")\n\n print_log(\"Plotting finished\")\n\n\ndef show_countries_beyond_threshold(\n date_, category, variant, threshold, *countries\n):\n \"\"\"Creates a plot for the variable category -> variant for the group of\n countries. Here the plots are \"normalized\": The series starts with the\n day the variable first exceeds the threshold. I.e., the x-axis just\n shows the number of days (past exceeding the threshold), not calendar\n days.\n \"\"\"\n # Fetching the relevant data and loading it into a DataFrame\n plots = {category: [variant]}\n data = get_country_data_to_show(date_, plots, *countries)\n tbl = pd.DataFrame()\n for country in countries:\n tbl.insert(\n loc=len(tbl.columns),\n column=country,\n value=data[country][category][variant],\n )\n\n # Initializing the plot\n fig, ax = plt.subplots(figsize=(20, 7.5))\n\n # Processing the series into the plot\n for country in tbl.columns:\n series = tbl[country]\n series.index = np.arange(len(series.index))\n try:\n # Adjust the series: New series starts with the first day it\n # exceeds the threshold\n start = series[series > threshold].index[0]\n series_new = pd.Series(series[start:].values)\n series_new.name = series.name\n # Adding new series to plot\n ax.plot(series_new, \".\")\n finally:\n # If all values of the series are below the threshold the series\n # isn't included in the plot (obviously)\n pass\n\n # Setting up the plot\n ax.grid(which=\"major\", linestyle=\"dashed\", linewidth=1)\n trsl = get_title_translation()\n ax.set_title(\n f\"{trsl[category]} - {trsl[variant]}: \"\n f\"Days beyond threshold ({threshold})\"\n )\n ax.set_xlabel(f\"days\")\n ax.set_ylabel(f\"{category}\")\n ax.legend(countries)\n plt.show()\n plt.close(\"all\")\n" ]
[ [ "pandas.Series", "pandas.DataFrame", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.pyplot.close" ] ]
wclark3/machine-learning
[ "f4f09d6d1efa022d9c34647883e49ae8e2f1fe6c" ]
[ "final-project/old/gpu-tutorial/kfkd.py" ]
[ "# file kfkd.py\nimport os\n\nimport numpy as np\nfrom pandas.io.parsers import read_csv\nfrom sklearn.utils import shuffle\n\n# Implicit import needed in newer theano versions\n# https://groups.google.com/forum/#!msg/lasagne-users/gEtFrC8mkms/0oNCDbSKbTkJ\nimport lasagne.layers.cuda_convnet\n\nFTRAIN = '../data/training.csv'\nFTEST = '../data/test.csv'\n\n\ndef load(test=False, cols=None):\n \"\"\"Loads data from FTEST if *test* is True, otherwise from FTRAIN.\n Pass a list of *cols* if you're only interested in a subset of the\n target columns.\n \"\"\"\n fname = FTEST if test else FTRAIN\n df = read_csv(os.path.expanduser(fname)) # load pandas dataframe\n\n # The Image column has pixel values separated by space; convert\n # the values to numpy arrays:\n df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))\n\n if cols: # get a subset of columns\n df = df[list(cols) + ['Image']]\n\n print(df.count()) # prints the number of values for each column\n df = df.dropna() # drop all rows that have missing values in them\n\n X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]\n X = X.astype(np.float32)\n\n if not test: # only FTRAIN has any target columns\n y = df[df.columns[:-1]].values\n y = (y - 48) / 48 # scale target coordinates to [-1, 1]\n X, y = shuffle(X, y, random_state=42) # shuffle train data\n y = y.astype(np.float32)\n else:\n y = None\n\n return X, y\n\n\nfrom lasagne import layers\nfrom lasagne.updates import nesterov_momentum\nfrom nolearn.lasagne import NeuralNet\n\nnet1 = NeuralNet(\n layers=[ # three layers: one hidden layer\n ('input', layers.InputLayer),\n ('hidden', layers.DenseLayer),\n ('output', layers.DenseLayer),\n ],\n # layer parameters:\n input_shape=(128, 9216), # 128 images per batch times 96x96 input pixels\n hidden_num_units=100, # number of units in hidden layer\n output_nonlinearity=None, # output layer uses identity function\n output_num_units=30, # 30 target values\n\n # optimization method:\n update=nesterov_momentum,\n update_learning_rate=0.01,\n update_momentum=0.9,\n\n regression=True, # flag to indicate we're dealing with regression problem\n max_epochs=400, # we want to train this many epochs\n verbose=1,\n )\n\nX, y = load()\nnet1.fit(X, y)\n\ndef load2d(test=False, cols=None):\n X, y = load(test=test)\n X = X.reshape(-1, 1, 96, 96)\n return X, y\n\n\n# use the cuda-convnet implementations of conv and max-pool layer\nConv2DLayer = layers.cuda_convnet.Conv2DCCLayer\nMaxPool2DLayer = layers.cuda_convnet.MaxPool2DCCLayer\n\nnet2 = NeuralNet(\n layers=[\n ('input', layers.InputLayer),\n ('conv1', layers.Conv2DLayer),\n ('pool1', layers.MaxPool2DLayer),\n ('conv2', layers.Conv2DLayer),\n ('pool2', layers.MaxPool2DLayer),\n ('conv3', layers.Conv2DLayer),\n ('pool3', layers.MaxPool2DLayer),\n ('hidden4', layers.DenseLayer),\n ('hidden5', layers.DenseLayer),\n ('output', layers.DenseLayer),\n ],\n input_shape=(None, 1, 96, 96),\n conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),\n conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),\n conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),\n hidden4_num_units=500, hidden5_num_units=500,\n output_num_units=30, output_nonlinearity=None,\n\n update_learning_rate=0.01,\n update_momentum=0.9,\n\n regression=True,\n max_epochs=1000,\n verbose=1,\n )\n\n#X, y = load2d() # load 2-d data\n#net2.fit(X, y)\n\n# Training for 1000 epochs will take a while. We'll pickle the\n# trained model so that we can load it back later:\nimport cPickle as pickle\nwith open('net2.pickle', 'wb') as f:\n pickle.dump(net2, f, -1)\n" ]
[ [ "numpy.vstack", "numpy.fromstring", "sklearn.utils.shuffle" ] ]
nanfeng1101/DBQA
[ "56b0a320e4641f46e80db78039d4ca79e9037d7e" ]
[ "torch_models/qa_cnn.py" ]
[ "# -*- coding:utf-8 -*-\n__author__ = 'chenjun'\n\nimport torch\nimport torch.nn as nn\nfrom torch_models.layers import InteractLayer, BatchNormLayer, MLP, MLPDropout\n\n\nclass CNNModule(nn.Module):\n \"\"\"\n qa_cnn module.\n \"\"\"\n def __init__(self, feature_maps, filter_shape, pool_size, channels=1):\n \"\"\"\n qa_cnn module init.\n :param feature_maps: feature maps(filter_num) after convolution.\n :param filter_shape: filter shape for convolution.\n :param pool_size: pool size for max pooling.\n :param channels: in channels, default=1.\n \"\"\"\n super(CNNModule, self).__init__()\n self.cnn_layer = nn.Sequential(nn.Conv2d(channels, feature_maps, filter_shape),\n nn.ReLU(),\n nn.MaxPool2d(pool_size)\n )\n\n def forward(self, q_input, a_input):\n \"\"\"\n convolution + max_pool for q_input and a_input.\n :param q_input: q_input vec.\n :param a_input: a_inut vec.\n :return:\n \"\"\"\n q_out = self.cnn_layer(q_input)\n a_out = self.cnn_layer(a_input)\n return q_out, a_out\n\n\nclass InceptionModule(nn.Module):\n \"\"\"\n simple inception module.\n \"\"\"\n def __init__(self, img_h, img_w, filter_windows, filter_num):\n \"\"\"\n inception module init.\n :param img_h: sentence length\n :param img_w: embedding dim\n :param filter_windows: multi filter height\n :param filter_num: feature maps\n \"\"\"\n super(InceptionModule, self).__init__()\n self.img_h = img_h\n self.img_w = img_w\n self.filter_windows = filter_windows\n self.filter_num = filter_num\n self.num_feature_maps = len(filter_windows) * filter_num\n self.layers_num, self.filter_shapes, self.pool_sizes = self.param()\n for i, filter_shape, pool_size in zip(self.layers_num, self.filter_shapes, self.pool_sizes):\n self.add_module(name=\"cnn_layer_{}\".format(i), module=CNNModule(self.filter_num, filter_shape, pool_size))\n\n def param(self):\n \"\"\"\n get param(filter_shape and pool_size) for cnn module.\n :return:\n \"\"\"\n filter_shapes = []\n pool_sizes = []\n layers_num = []\n for i, filter_h in enumerate(self.filter_windows):\n filter_shapes.append((filter_h, self.img_w))\n pool_sizes.append((self.img_h - filter_h + 1, 1))\n layers_num.append(i)\n return layers_num, filter_shapes, pool_sizes\n\n def forward(self, q_input, a_input):\n \"\"\"\n concat outputs of multi-cnn-layer(conv+max_pool) with q_input vec and a_input vec.\n :param q_input: q_input vec\n :param a_input: a_input vec\n :return:\n \"\"\"\n q_output, a_output = [], []\n for cnn_layer in self.children():\n q_out, a_out = cnn_layer(q_input, a_input)\n q_output.append(q_out)\n a_output.append(a_out)\n q_vec = torch.cat(q_output, dim=1).view(-1, self.num_feature_maps) # batch * num_feature_maps\n a_vec = torch.cat(a_output, dim=1).view(-1, self.num_feature_maps)\n return q_vec, a_vec\n\n\nclass QACNNModel(nn.Module):\n \"\"\"\n cnn model for QA pair.\n \"\"\"\n def __init__(self, embedding, img_h, img_w, filter_windows, filter_num, n_in, n_hidden, n_out):\n \"\"\"\n model init.\n :param embedding: word embedding.\n :param img_h: sentence length.\n :param img_w: embedding dim.\n :param filter_windows: collection of filter height.\n :param filter_num: feature maps.\n :param n_in: input size for mlp\n :param n_hidden: hidden size for mlp\n :param n_out: out size for mlp\n \"\"\"\n super(QACNNModel, self).__init__()\n self.embedding = embedding\n self.img_h = img_h\n self.img_w = img_w\n self.filter_windows = filter_windows\n self.filter_num = filter_num\n self.input_size = n_in\n self.hidden_size = n_hidden\n self.out_size = n_out\n self.num_feature_maps = len(self.filter_windows) * self.filter_num\n self.inception_module_layers = InceptionModule(self.img_h, self.img_w, self.filter_windows, self.filter_num)\n self.interact_layer = InteractLayer(self.num_feature_maps, self.num_feature_maps, self.input_size)\n self.bn_layer = BatchNormLayer(self.input_size)\n self.mlp = MLPDropout(self.input_size, self.hidden_size, self.out_size)\n\n def forward(self, q_input, a_input, drop_rate):\n \"\"\"\n input -> embedding_layer -> multi_cnn_layer -> interact_layer -> batchnorm_layer -> mlp_layer\n :param q_input: question sentence vec\n :param a_input: answer sentence vec\n :param: drop_rate: dropout rate\n :return:\n \"\"\"\n q_input_emb = torch.unsqueeze(self.embedding(q_input), dim=1)\n a_input_emb = torch.unsqueeze(self.embedding(a_input), dim=1)\n q_vec, a_vec = self.inception_module_layers(q_input_emb, a_input_emb)\n qa_vec = self.interact_layer(q_vec, a_vec)\n bn_vec = self.bn_layer(qa_vec)\n prop, cate = self.mlp(bn_vec, drop_rate)\n return prop, cate\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.cat" ] ]
haifeng-jin/SMAC3
[ "999d04408f084ec085844255b592e485dba241dd" ]
[ "examples/rf.py" ]
[ "import logging\nimport os\nimport inspect\n\nimport numpy as np\nfrom sklearn.metrics import make_scorer\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.datasets import load_boston\n\nfrom smac.configspace import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import CategoricalHyperparameter, \\\n UniformFloatHyperparameter, UniformIntegerHyperparameter\n\nfrom smac.tae.execute_func import ExecuteTAFuncDict\nfrom smac.scenario.scenario import Scenario\nfrom smac.facade.smac_facade import SMAC\n\nboston = load_boston()\n\ndef rf_from_cfg(cfg, seed):\n \"\"\"\n Creates a random forest regressor from sklearn and fits the given data on it.\n This is the function-call we try to optimize. Chosen values are stored in\n the configuration (cfg).\n\n Parameters:\n -----------\n cfg: Configuration\n configuration chosen by smac\n seed: int or RandomState\n used to initialize the rf's random generator\n\n Returns:\n -----------\n np.mean(rmses): float\n mean of root mean square errors of random-forest test predictions\n per cv-fold\n \"\"\"\n rfr = RandomForestRegressor(\n n_estimators=cfg[\"num_trees\"],\n criterion=cfg[\"criterion\"],\n min_samples_split=cfg[\"min_samples_to_split\"],\n min_samples_leaf=cfg[\"min_samples_in_leaf\"],\n min_weight_fraction_leaf=cfg[\"min_weight_frac_leaf\"],\n max_features=cfg[\"max_features\"],\n max_leaf_nodes=cfg[\"max_leaf_nodes\"],\n bootstrap=cfg[\"do_bootstrapping\"],\n random_state=seed)\n\n def rmse(y, y_pred):\n return np.sqrt(np.mean((y_pred - y)**2))\n # Creating root mean square error for sklearns crossvalidation\n rmse_scorer = make_scorer(rmse, greater_is_better=False)\n score = cross_val_score(rfr, boston.data, boston.target, cv=11, scoring=rmse_scorer)\n return -1 * np.mean(score) # Because cross_validation sign-flips the score\n\n\nlogger = logging.getLogger(\"RF-example\")\nlogging.basicConfig(level=logging.INFO)\n#logging.basicConfig(level=logging.DEBUG) # Enable to show debug-output\nlogger.info(\"Running random forest example for SMAC. If you experience \"\n \"difficulties, try to decrease the memory-limit.\")\n\n# Build Configuration Space which defines all parameters and their ranges.\n# To illustrate different parameter types,\n# we use continuous, integer and categorical parameters.\ncs = ConfigurationSpace()\n\n# We can add single hyperparameters:\ndo_bootstrapping = CategoricalHyperparameter(\n \"do_bootstrapping\", [\"true\", \"false\"], default_value=\"true\")\ncs.add_hyperparameter(do_bootstrapping)\n\n# Or we can add multiple hyperparameters at once:\nnum_trees = UniformIntegerHyperparameter(\"num_trees\", 10, 50, default_value=10)\nmax_features = UniformIntegerHyperparameter(\"max_features\", 1, boston.data.shape[1], default_value=1)\nmin_weight_frac_leaf = UniformFloatHyperparameter(\"min_weight_frac_leaf\", 0.0, 0.5, default_value=0.0)\ncriterion = CategoricalHyperparameter(\"criterion\", [\"mse\", \"mae\"], default_value=\"mse\")\nmin_samples_to_split = UniformIntegerHyperparameter(\"min_samples_to_split\", 2, 20, default_value=2)\nmin_samples_in_leaf = UniformIntegerHyperparameter(\"min_samples_in_leaf\", 1, 20, default_value=1)\nmax_leaf_nodes = UniformIntegerHyperparameter(\"max_leaf_nodes\", 10, 1000, default_value=100)\n\ncs.add_hyperparameters([num_trees, min_weight_frac_leaf, criterion,\n max_features, min_samples_to_split, min_samples_in_leaf, max_leaf_nodes])\n\n# SMAC scenario oject\nscenario = Scenario({\"run_obj\": \"quality\", # we optimize quality (alternative runtime)\n \"runcount-limit\": 50, # maximum number of function evaluations\n \"cs\": cs, # configuration space\n \"deterministic\": \"true\",\n \"memory_limit\": 3072, # adapt this to reasonable value for your hardware\n })\n\n# To optimize, we pass the function to the SMAC-object\nsmac = SMAC(scenario=scenario, rng=np.random.RandomState(42),\n tae_runner=rf_from_cfg)\n\n# Example call of the function with default values\n# It returns: Status, Cost, Runtime, Additional Infos\ndef_value = smac.get_tae_runner().run(cs.get_default_configuration(), 1)[1]\nprint(\"Value for default configuration: %.2f\" % (def_value))\n\n# Start optimization\ntry:\n incumbent = smac.optimize()\nfinally:\n incumbent = smac.solver.incumbent\n\ninc_value = smac.get_tae_runner().run(incumbent, 1)[1]\nprint(\"Optimized Value: %.2f\" % (inc_value))\n" ]
[ [ "numpy.random.RandomState", "sklearn.metrics.make_scorer", "sklearn.datasets.load_boston", "sklearn.cross_validation.cross_val_score", "sklearn.ensemble.RandomForestRegressor", "numpy.mean" ] ]
igctty/pydy
[ "ea1b6902a6625e66d0f41d06e12bbd1e7096fdf9" ]
[ "src/dl_iris_data_processing.py" ]
[ "# coding: UTF-8\n\nimport numpy as np\nfrom sklearn import datasets\nimport chainer\nfrom chainer import Variable,Chain\nimport chainer.links as L\nimport chainer.functions as F\nimport chainer.optimizers as O\n\n# データ読み込み\niris_data = datasets.load_iris()\n# print(iris_data)\n\n# データの取り出し\nx = iris_data.data.astype(np.float32) # iris 花の特徴を表す4種のデータ\nt = iris_data.target # 品種を表す数値\nn = t.size # 品種を表す数値のサイズ\n# print(x)\n# print(t)\n# print(n)\n\n# 教師データの準備\nt_matrix = np.zeros(3*n).reshape(n, 3).astype(np.float32)\nfor i in range(n):\n t_matrix[i,t[i]] = 1.0\n\n# print(t_matrix)\n\n# 訓練用データとテスト用データ 半分ずつ\nindexes = np.arange(n)\nindexes_training = indexes[indexes%2 != 0]\nindexes_test = indexes[indexes%2 == 0]\n\n# print(indexes)\n# print(indexes_training)\n# print(indexes_test)\n\nx_training = x[indexes_training, : ] # 訓練用 入力\nt_training = t_matrix[indexes_training, : ] # 訓練用 正解\nx_test = x[indexes_test, : ] # テスト用 入力\nt_test = t[indexes_test] # テスト用 正解\n\n# print(x_training)\n# print(x_test)\n# print(t_training)\n# print(t_test)\n\n# Variable に変換\nx_training_v = Variable(x_training)\nt_training_v = Variable(t_training)\nx_test_v = Variable(x_test)\n\nprint(x_training_v)\nprint(t_training_v)\nprint(x_test_v)\n" ]
[ [ "numpy.arange", "numpy.zeros", "sklearn.datasets.load_iris" ] ]
IceCreamWW/ESPnet-informed-se
[ "38471b7749b7df0fadeae500cf8a050ac66872d2" ]
[ "espnet2/enh/espnet_enh_informed_model.py" ]
[ "from distutils.version import LooseVersion\nfrom functools import reduce\nfrom itertools import permutations\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Tuple\n\nimport torch\nfrom torch_complex.tensor import ComplexTensor\nfrom typeguard import check_argument_types\n\nfrom espnet2.enh.informed_encoder.abs_informed_encoder import AbsInformedEncoder\nfrom espnet2.enh.encoder.abs_encoder import AbsEncoder\nfrom espnet2.enh.encoder.conv_encoder import ConvEncoder\nfrom espnet2.enh.fusion.abs_fusion import AbsFusion\nfrom espnet2.enh.separator.abs_separator import AbsSeparator\nfrom espnet2.enh.decoder.abs_decoder import AbsDecoder\nfrom espnet2.torch_utils.device_funcs import force_gatherable\nfrom espnet2.train.abs_espnet_model import AbsESPnetModel\nfrom espnet.nets.pytorch_backend.transformer.label_smoothing_loss import (\n LabelSmoothingLoss, # noqa: H301\n)\nfrom espnet.nets.pytorch_backend.nets_utils import make_pad_mask, make_non_pad_mask\nfrom einops import rearrange\nimport copy\n\nimport logging\nimport pdb\n\n\nis_torch_1_3_plus = LooseVersion(torch.__version__) >= LooseVersion(\"1.3.0\")\nALL_LOSS_TYPES = (\n # mse_loss(predicted_mask, target_label)\n \"mask_mse\",\n # mse_loss(enhanced_magnitude_spectrum, target_magnitude_spectrum)\n \"magnitude\",\n # mse_loss(enhanced_complex_spectrum, target_complex_spectrum)\n \"spectrum\",\n # log_mse_loss(enhanced_complex_spectrum, target_complex_spectrum)\n \"spectrum_log\",\n # si_snr(enhanced_waveform, target_waveform)\n \"si_snr\",\n)\nEPS = torch.finfo(torch.get_default_dtype()).eps\n\n\nclass ESPnetEnhancementInformedModel(AbsESPnetModel):\n \"\"\"Speech enhancement or separation Frontend model\"\"\"\n\n def __init__(\n self,\n informed_encoder: AbsInformedEncoder,\n fusion: AbsFusion,\n encoder: AbsEncoder,\n separator: AbsSeparator,\n decoder: AbsDecoder,\n stft_consistency: bool = False,\n loss_type: str = \"mask_mse\",\n mask_type: Optional[str] = None,\n component_loss: bool = False,\n informed_ali_mtl: float = 0.0,\n informed_ali_mtl_lastn: int = 2,\n informed_ali_mtl_sil_scale: float = 1,\n disable_enh_loss: bool = False,\n expand_informed: bool = False,\n mask_before_fusion: bool = True,\n detached_fusion: bool = False,\n multi_grained: bool = False,\n ):\n assert check_argument_types()\n\n super().__init__()\n\n self.informed_encoder = informed_encoder\n self.encoder = encoder\n self.fusion = fusion\n self.separator = separator\n self.decoder = decoder\n self.num_spk = separator.num_spk\n self.num_noise_type = getattr(self.separator, \"num_noise_type\", 1)\n self.component_loss = component_loss\n self.informed_ali_mtl = informed_ali_mtl\n self.informed_ali_mtl_lastn = informed_ali_mtl_lastn\n self.informed_ali_mtl_sil_scale = informed_ali_mtl_sil_scale\n self.disable_enh_loss = disable_enh_loss\n self.expand_informed = expand_informed\n self.mask_before_fusion = mask_before_fusion\n self.detached_fusion = detached_fusion\n self.multi_grained = multi_grained\n\n if loss_type != \"si_snr\" and isinstance(encoder, ConvEncoder):\n raise TypeError(f\"{loss_type} is not supported with {type(ConvEncoder)}\")\n\n # get mask type for TF-domain models (only used when loss_type=\"mask_*\")\n self.mask_type = mask_type.upper() if mask_type else None\n # get loss type for model training\n self.loss_type = loss_type\n # whether to compute the TF-domain loss while enforcing STFT consistency\n self.stft_consistency = stft_consistency\n\n if stft_consistency and loss_type in [\"mask_mse\", \"si_snr\"]:\n raise ValueError(\n f\"stft_consistency will not work when '{loss_type}' loss is used\"\n )\n\n assert self.loss_type in ALL_LOSS_TYPES, self.loss_type\n # for multi-channel signal\n self.ref_channel = getattr(self.separator, \"ref_channel\", -1)\n\n\n if self.informed_ali_mtl != 0:\n self.criterion_align = LabelSmoothingLoss(\n size=self.informed_encoder.input_size,\n padding_idx=-1,\n smoothing=0,\n normalize_length=True,\n scales={1:self.informed_ali_mtl_sil_scale},\n )\n\n \n def make_post_from_att(self, informed, informed_lengths, attn):\n bsz = informed.shape[0]\n vocab_size = self.informed_encoder.input_size\n M = torch.zeros(bsz, max(informed_lengths), vocab_size)\n for b in range(bsz):\n M[b,torch.arange(informed_lengths[b]),informed[b]] = 1\n M = M.to(attn.device)\n post = torch.bmm(attn, M)\n return post\n\n def make_ali_from_kaldi_ali(self, kaldi_ali, frame_len, frame_shift, real_lens):\n assert (160 / frame_shift) == (160 // frame_shift), f\"frame_shift {frame_shift} cannot be divided by 160\"\n repeats = 160 // frame_shift\n ali_real = make_non_pad_mask(real_lens).type(real_lens.dtype) # sil = 1, padding = -1\n\n # start from half of 15ms (offset the first frame to center), 16 samples per ms \n start = round((7.5 * 16 - (frame_len - frame_shift) / 2) / frame_shift)\n ali_real[:,start:kaldi_ali.shape[1] * repeats + start] = rearrange(kaldi_ali.unsqueeze(-1).repeat(1,1,repeats), 'b d r -> b (d r)')\n ali_real = ali_real.to(kaldi_ali.device)\n return ali_real\n\n\n @staticmethod\n def _create_mask_label(mix_spec, ref_spec, mask_type=\"IAM\"):\n \"\"\"Create mask label.\n\n Args:\n mix_spec: ComplexTensor(B, T, F)\n ref_spec: List[ComplexTensor(B, T, F), ...]\n mask_type: str\n Returns:\n labels: List[Tensor(B, T, F), ...] or List[ComplexTensor(B, T, F), ...]\n \"\"\"\n\n # Must be upper case\n assert mask_type in [\n \"IBM\",\n \"IRM\",\n \"IAM\",\n \"PSM\",\n \"NPSM\",\n \"PSM^2\",\n ], f\"mask type {mask_type} not supported\"\n mask_label = []\n for r in ref_spec:\n mask = None\n if mask_type == \"IBM\":\n flags = [abs(r) >= abs(n) for n in ref_spec]\n mask = reduce(lambda x, y: x * y, flags)\n mask = mask.int()\n elif mask_type == \"IRM\":\n # TODO(Wangyou): need to fix this,\n # as noise referecens are provided separately\n mask = abs(r) / (sum(([abs(n) for n in ref_spec])) + EPS)\n elif mask_type == \"IAM\":\n mask = abs(r) / (abs(mix_spec) + EPS)\n mask = mask.clamp(min=0, max=1)\n elif mask_type == \"PSM\" or mask_type == \"NPSM\":\n phase_r = r / (abs(r) + EPS)\n phase_mix = mix_spec / (abs(mix_spec) + EPS)\n # cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)\n cos_theta = (\n phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag\n )\n mask = (abs(r) / (abs(mix_spec) + EPS)) * cos_theta\n mask = (\n mask.clamp(min=0, max=1)\n if mask_type == \"NPSM\"\n else mask.clamp(min=-1, max=1)\n )\n elif mask_type == \"PSM^2\":\n # This is for training beamforming masks\n phase_r = r / (abs(r) + EPS)\n phase_mix = mix_spec / (abs(mix_spec) + EPS)\n # cos(a - b) = cos(a)*cos(b) + sin(a)*sin(b)\n cos_theta = (\n phase_r.real * phase_mix.real + phase_r.imag * phase_mix.imag\n )\n mask = (abs(r).pow(2) / (abs(mix_spec).pow(2) + EPS)) * cos_theta\n mask = mask.clamp(min=-1, max=1)\n assert mask is not None, f\"mask type {mask_type} not supported\"\n mask_label.append(mask)\n return mask_label\n\n def forward(\n self,\n speech_mix: torch.Tensor,\n informed: torch.Tensor,\n speech_mix_lengths: torch.Tensor = None,\n informed_lengths: torch.Tensor = None,\n informed_ali_ref: torch.Tensor = None,\n **kwargs,\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:\n \"\"\"Frontend + Encoder + Decoder + Calc loss\n\n Args:\n speech_mix: (Batch, samples) or (Batch, samples, channels)\n speech_ref: (Batch, num_speaker, samples)\n or (Batch, num_speaker, samples, channels)\n speech_mix_lengths: (Batch,), default None for chunk interator,\n because the chunk-iterator does not have the\n speech_lengths returned. see in\n espnet2/iterators/chunk_iter_factory.py\n \"\"\"\n # clean speech signal of each speaker\n speech_ref = [\n kwargs[\"speech_ref{}\".format(spk + 1)] for spk in range(self.num_spk)\n ]\n # (Batch, num_speaker, samples) or (Batch, num_speaker, samples, channels)\n speech_ref = torch.stack(speech_ref, dim=1)\n\n if \"noise_ref1\" in kwargs:\n # noise signal (optional, required when using\n # frontend models with beamformering)\n noise_ref = [\n kwargs[\"noise_ref{}\".format(n + 1)] for n in range(self.num_noise_type)\n ]\n # (Batch, num_noise_type, samples) or\n # (Batch, num_noise_type, samples, channels)\n noise_ref = torch.stack(noise_ref, dim=1)\n else:\n noise_ref = None\n\n # dereverberated (noisy) signal\n # (optional, only used for frontend models with WPE)\n if \"dereverb_ref1\" in kwargs:\n # noise signal (optional, required when using\n # frontend models with beamformering)\n dereverb_speech_ref = [\n kwargs[\"dereverb_ref{}\".format(n + 1)]\n for n in range(self.num_spk)\n if \"dereverb_ref{}\".format(n + 1) in kwargs\n ]\n assert len(dereverb_speech_ref) in (1, self.num_spk), len(\n dereverb_speech_ref\n )\n # (Batch, N, samples) or (Batch, N, samples, channels)\n dereverb_speech_ref = torch.stack(dereverb_speech_ref, dim=1)\n else:\n dereverb_speech_ref = None\n\n batch_size = speech_mix.shape[0]\n speech_lengths = (\n speech_mix_lengths\n if speech_mix_lengths is not None\n else torch.ones(batch_size).int().fill_(speech_mix.shape[1])\n )\n assert speech_lengths.dim() == 1, speech_lengths.shape\n # Check that batch_size is unified\n assert speech_mix.shape[0] == speech_ref.shape[0] == speech_lengths.shape[0], (\n speech_mix.shape,\n speech_ref.shape,\n speech_lengths.shape,\n )\n\n # for data-parallel\n speech_ref = speech_ref[:, :, : speech_lengths.max()]\n speech_mix = speech_mix[:, : speech_lengths.max()]\n\n\n loss, speech_pre, others, out_lengths, perm = self._compute_loss(\n speech_mix,\n informed,\n speech_lengths,\n informed_lengths,\n speech_ref,\n dereverb_speech_ref=dereverb_speech_ref,\n noise_ref=noise_ref,\n informed_ali_ref=informed_ali_ref\n )\n\n # raise RuntimeError(\"out of memory\")\n\n # add stats for logging\n if self.loss_type != \"si_snr\":\n if self.training:\n si_snr = None\n else:\n speech_pre = [self.decoder(ps, speech_lengths)[0] for ps in speech_pre]\n speech_ref = torch.unbind(speech_ref, dim=1)\n if speech_ref[0].dim() == 3:\n # For si_snr loss, only select one channel as the reference\n speech_ref = [sr[..., self.ref_channel] for sr in speech_ref]\n # compute si-snr loss\n si_snr_loss, perm = self._permutation_loss(\n speech_ref, speech_pre, self.si_snr_loss, perm=perm\n )\n si_snr = -si_snr_loss.detach()\n\n stats = dict(\n si_snr=si_snr,\n loss=loss.detach(),\n )\n else:\n stats = dict(si_snr=-loss.detach(), loss=loss.detach())\n\n\n # informed align CE loss\n if self.informed_ali_mtl > 0:\n assert informed_ali_ref is not None, \"informed align ref is not available\"\n assert isinstance(self.encoder, ConvEncoder), \"informed align mtl support only ConvEncoder\"\n assert (160 / self.encoder.stride) == (160 // self.encoder.stride), f\"encoder stride {self.encoder.stride} cannot be divided by 160\"\n\n repeats = 160 // self.encoder.stride\n speech_flens = (speech_mix_lengths - self.encoder.kernel_size) // self.encoder.stride + 1\n informed_ali_ref_re = make_non_pad_mask(speech_flens).type(speech_flens.dtype) * 2 - 1 # sil = 1, padding = -1\n\n # start from half of 15ms (offset the first frame to center), 16 samples per ms \n start = round((7.5 * 16 - (self.encoder.kernel_size - self.encoder.stride) / 2) / self.encoder.stride)\n informed_ali_ref_re[:,start:informed_ali_ref.shape[1] * repeats + start] = rearrange(informed_ali_ref.unsqueeze(-1).repeat(1,1,repeats), 'b d r -> b (d r)')\n informed_ali_ref_re = informed_ali_ref_re.to(informed_ali_ref.device)\n\n loss_align = 0\n for idx in range(-1, -1 - self.informed_ali_mtl_lastn, -1):\n post = self.make_post_from_att(informed, informed_lengths, self.fusion.encoders[idx].src_attn.attn[:,0,:,:])\n loss_align += self.criterion_align(post, informed_ali_ref_re)\n loss_align /= self.informed_ali_mtl_lastn\n\n stats[\"loss_align\"] = loss_align.detach()\n\n if self.disable_enh_loss:\n loss = loss_align\n stats[\"loss\"] = loss.detach()\n del stats[\"si_snr\"]\n else:\n loss += loss_align * self.informed_ali_mtl\n stats[\"loss\"] = loss.detach()\n\n\n # force_gatherable: to-device and to-tensor if scalar for DataParallel\n loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)\n return loss, stats, weight\n\n def _compute_loss(\n self,\n speech_mix,\n informed,\n speech_lengths,\n informed_lengths,\n speech_ref,\n dereverb_speech_ref=None,\n noise_ref=None,\n cal_loss=True,\n informed_ali_ref=None\n ):\n \"\"\"Compute loss according to self.loss_type.\n\n Args:\n speech_mix: (Batch, samples) or (Batch, samples, channels)\n speech_lengths: (Batch,), default None for chunk interator,\n because the chunk-iterator does not have the\n speech_lengths returned. see in\n espnet2/iterators/chunk_iter_factory.py\n speech_ref: (Batch, num_speaker, samples)\n or (Batch, num_speaker, samples, channels)\n dereverb_speech_ref: (Batch, N, samples)\n or (Batch, num_speaker, samples, channels)\n noise_ref: (Batch, num_noise_type, samples)\n or (Batch, num_speaker, samples, channels)\n cal_loss: whether to calculate enh loss, defualt is True\n\n Returns:\n loss: (torch.Tensor) speech enhancement loss\n speech_pre: (List[torch.Tensor] or List[ComplexTensor])\n enhanced speech or spectrum(s)\n others: (OrderedDict) estimated masks or None\n output_lengths: (Batch,)\n perm: () best permutation\n \"\"\"\n\n # pdb.set_trace()\n speech_feature_mix, speech_flens = self.encoder(speech_mix, speech_lengths)\n if self.expand_informed:\n informed = self.make_ali_from_kaldi_ali(informed_ali_ref, self.encoder.kernel_size, self.encoder.stride, speech_flens)\n informed_lengths = speech_flens\n\n informed_feature, informed_flens = self.informed_encoder(informed, informed_lengths)\n\n if self.detached_fusion:\n assert self.mask_before_fusion, \"detached fusion must work together with mask_before_fusion\"\n detached_speech_feature_mix = speech_feature_mix.detach()\n feature_mix, flens, _ = self.fusion(detached_speech_feature_mix, informed_feature, speech_flens, informed_flens)\n else:\n feature_mix, flens, _ = self.fusion(speech_feature_mix, informed_feature, speech_flens, informed_flens)\n\n if self.mask_before_fusion:\n feature_pre, flens, others = self.separator(feature_mix, flens, speech_feature_mix)\n else:\n feature_pre, flens, others = self.separator(feature_mix, flens)\n\n\n if self.loss_type != \"si_snr\":\n spectrum_mix = feature_mix\n spectrum_pre = feature_pre\n # predict separated speech and masks\n if self.stft_consistency:\n # pseudo STFT -> time-domain -> STFT (compute loss)\n tmp_t_domain = [\n self.decoder(sp, speech_lengths)[0] for sp in spectrum_pre\n ]\n spectrum_pre = [\n self.encoder(sp, speech_lengths)[0] for sp in tmp_t_domain\n ]\n pass\n\n if spectrum_pre is not None and not isinstance(\n spectrum_pre[0], ComplexTensor\n ):\n spectrum_pre = [\n ComplexTensor(*torch.unbind(sp, dim=-1)) for sp in spectrum_pre\n ]\n\n if not cal_loss:\n loss, perm = None, None\n return loss, spectrum_pre, others, flens, perm\n\n # prepare reference speech and reference spectrum\n speech_ref = torch.unbind(speech_ref, dim=1)\n # List[ComplexTensor(Batch, T, F)] or List[ComplexTensor(Batch, T, C, F)]\n spectrum_ref = [self.encoder(sr, speech_lengths)[0] for sr in speech_ref]\n\n\n # compute TF masking loss\n if self.loss_type == \"magnitude\":\n # compute loss on magnitude spectrum\n assert spectrum_pre is not None\n magnitude_pre = [abs(ps + 1e-15) for ps in spectrum_pre]\n if spectrum_ref[0].dim() > magnitude_pre[0].dim():\n # only select one channel as the reference\n magnitude_ref = [\n abs(sr[..., self.ref_channel, :]) for sr in spectrum_ref\n ]\n else:\n magnitude_ref = [abs(sr) for sr in spectrum_ref]\n\n tf_loss, perm = self._permutation_loss(\n magnitude_ref, magnitude_pre, self.tf_mse_loss\n )\n elif self.loss_type.startswith(\"spectrum\"):\n # compute loss on complex spectrum\n if self.loss_type == \"spectrum\":\n loss_func = self.tf_mse_loss\n elif self.loss_type == \"spectrum_log\":\n loss_func = self.tf_log_mse_loss\n else:\n raise ValueError(\"Unsupported loss type: %s\" % self.loss_type)\n\n assert spectrum_pre is not None\n if spectrum_ref[0].dim() > spectrum_pre[0].dim():\n # only select one channel as the reference\n spectrum_ref = [sr[..., self.ref_channel, :] for sr in spectrum_ref]\n\n tf_loss, perm = self._permutation_loss(\n spectrum_ref, spectrum_pre, loss_func\n )\n elif self.loss_type.startswith(\"mask\"):\n if self.loss_type == \"mask_mse\":\n loss_func = self.tf_mse_loss\n else:\n raise ValueError(\"Unsupported loss type: %s\" % self.loss_type)\n\n assert others is not None\n mask_pre_ = [\n others[\"mask_spk{}\".format(spk + 1)] for spk in range(self.num_spk)\n ]\n\n # prepare ideal masks\n mask_ref = self._create_mask_label(\n spectrum_mix, spectrum_ref, mask_type=self.mask_type\n )\n\n # compute TF masking loss\n tf_loss, perm = self._permutation_loss(mask_ref, mask_pre_, loss_func)\n\n if \"mask_dereverb1\" in others:\n if dereverb_speech_ref is None:\n raise ValueError(\n \"No dereverberated reference for training!\\n\"\n 'Please specify \"--use_dereverb_ref true\" in run.sh'\n )\n\n mask_wpe_pre = [\n others[\"mask_dereverb{}\".format(spk + 1)]\n for spk in range(self.num_spk)\n if \"mask_dereverb{}\".format(spk + 1) in others\n ]\n assert len(mask_wpe_pre) == dereverb_speech_ref.size(1), (\n len(mask_wpe_pre),\n dereverb_speech_ref.size(1),\n )\n dereverb_speech_ref = torch.unbind(dereverb_speech_ref, dim=1)\n dereverb_spectrum_ref = [\n self.encoder(dr, speech_lengths)[0]\n for dr in dereverb_speech_ref\n ]\n dereverb_mask_ref = self._create_mask_label(\n spectrum_mix, dereverb_spectrum_ref, mask_type=self.mask_type\n )\n\n tf_dereverb_loss, perm_d = self._permutation_loss(\n dereverb_mask_ref, mask_wpe_pre, loss_func\n )\n tf_loss = tf_loss + tf_dereverb_loss\n\n if \"mask_noise1\" in others:\n if noise_ref is None:\n raise ValueError(\n \"No noise reference for training!\\n\"\n 'Please specify \"--use_noise_ref true\" in run.sh'\n )\n\n noise_ref = torch.unbind(noise_ref, dim=1)\n noise_spectrum_ref = [\n self.encoder(nr, speech_lengths)[0] for nr in noise_ref\n ]\n noise_mask_ref = self._create_mask_label(\n spectrum_mix, noise_spectrum_ref, mask_type=self.mask_type\n )\n\n mask_noise_pre = [\n others[\"mask_noise{}\".format(n + 1)]\n for n in range(self.num_noise_type)\n ]\n tf_noise_loss, perm_n = self._permutation_loss(\n noise_mask_ref, mask_noise_pre, loss_func\n )\n tf_loss = tf_loss + tf_noise_loss\n else:\n raise ValueError(\"Unsupported loss type: %s\" % self.loss_type)\n\n loss = tf_loss\n return loss, spectrum_pre, others, flens, perm\n\n else:\n speech_pre = [self.decoder(ps, speech_lengths)[0] for ps in feature_pre]\n if not cal_loss:\n loss, perm = None, None\n return loss, speech_pre, None, speech_lengths, perm\n\n # speech_pre: list[(batch, sample)]\n assert speech_pre[0].dim() == 2, speech_pre[0].dim()\n\n if speech_ref.dim() == 4:\n # For si_snr loss of multi-channel input,\n # only select one channel as the reference\n speech_ref = speech_ref[..., self.ref_channel]\n speech_ref = torch.unbind(speech_ref, dim=1)\n\n # compute si-snr loss\n si_snr_loss, perm = self._permutation_loss(\n speech_ref, speech_pre, self.si_snr_loss_zeromean_multi_grained if (self.multi_grained and self.training) else self.si_snr_loss_zeromean\n )\n loss = si_snr_loss\n\n return loss, speech_pre, None, speech_lengths, perm\n\n @staticmethod\n def tf_mse_loss(ref, inf):\n \"\"\"time-frequency MSE loss.\n\n Args:\n ref: (Batch, T, F) or (Batch, T, C, F)\n inf: (Batch, T, F) or (Batch, T, C, F)\n Returns:\n loss: (Batch,)\n \"\"\"\n assert ref.shape == inf.shape, (ref.shape, inf.shape)\n if not is_torch_1_3_plus:\n # in case of binary masks\n ref = ref.type(inf.dtype)\n diff = ref - inf\n if isinstance(diff, ComplexTensor):\n mseloss = diff.real ** 2 + diff.imag ** 2\n else:\n mseloss = diff ** 2\n if ref.dim() == 3:\n mseloss = mseloss.mean(dim=[1, 2])\n elif ref.dim() == 4:\n mseloss = mseloss.mean(dim=[1, 2, 3])\n else:\n raise ValueError(\n \"Invalid input shape: ref={}, inf={}\".format(ref.shape, inf.shape)\n )\n\n return mseloss\n\n @staticmethod\n def tf_log_mse_loss(ref, inf):\n \"\"\"time-frequency log-MSE loss.\n\n Args:\n ref: (Batch, T, F) or (Batch, T, C, F)\n inf: (Batch, T, F) or (Batch, T, C, F)\n Returns:\n loss: (Batch,)\n \"\"\"\n assert ref.shape == inf.shape, (ref.shape, inf.shape)\n if not is_torch_1_3_plus:\n # in case of binary masks\n ref = ref.type(inf.dtype)\n diff = ref - inf\n if isinstance(diff, ComplexTensor):\n log_mse_loss = diff.real ** 2 + diff.imag ** 2\n else:\n log_mse_loss = diff ** 2\n if ref.dim() == 3:\n log_mse_loss = torch.log10(log_mse_loss.sum(dim=[1, 2])) * 10\n elif ref.dim() == 4:\n log_mse_loss = torch.log10(log_mse_loss.sum(dim=[1, 2, 3])) * 10\n else:\n raise ValueError(\n \"Invalid input shape: ref={}, inf={}\".format(ref.shape, inf.shape)\n )\n\n return log_mse_loss\n\n @staticmethod\n def tf_l1_loss(ref, inf):\n \"\"\"time-frequency L1 loss.\n\n Args:\n ref: (Batch, T, F) or (Batch, T, C, F)\n inf: (Batch, T, F) or (Batch, T, C, F)\n Returns:\n loss: (Batch,)\n \"\"\"\n assert ref.shape == inf.shape, (ref.shape, inf.shape)\n if not is_torch_1_3_plus:\n # in case of binary masks\n ref = ref.type(inf.dtype)\n if isinstance(inf, ComplexTensor):\n l1loss = abs(ref - inf + EPS)\n else:\n l1loss = abs(ref - inf)\n if ref.dim() == 3:\n l1loss = l1loss.mean(dim=[1, 2])\n elif ref.dim() == 4:\n l1loss = l1loss.mean(dim=[1, 2, 3])\n else:\n raise ValueError(\n \"Invalid input shape: ref={}, inf={}\".format(ref.shape, inf.shape)\n )\n return l1loss\n\n @staticmethod\n def si_snr_loss(ref, inf):\n \"\"\"SI-SNR loss\n\n Args:\n ref: (Batch, samples)\n inf: (Batch, samples)\n Returns:\n loss: (Batch,)\n \"\"\"\n ref = ref / torch.norm(ref, p=2, dim=1, keepdim=True)\n inf = inf / torch.norm(inf, p=2, dim=1, keepdim=True)\n\n s_target = (ref * inf).sum(dim=1, keepdims=True) * ref\n e_noise = inf - s_target\n\n si_snr = 20 * (\n torch.log10(torch.norm(s_target, p=2, dim=1).clamp(min=EPS))\n - torch.log10(torch.norm(e_noise, p=2, dim=1).clamp(min=EPS))\n )\n return -si_snr\n\n @staticmethod\n def si_snr_loss_zeromean_multi_grained(ref, inf):\n \"\"\"SI-SNR loss with zero-mean in pre-processing.\n\n Args:\n ref: (Batch, samples)\n inf: (Batch, samples)\n Returns:\n loss: (Batch,)\n \"\"\"\n # logging.info(\"applying multi grained si snr\")\n\n assert ref.size() == inf.size()\n B, T = ref.size()\n\n base = 1000\n pair_wise_si_snr = 0\n cnt = 1\n\n while True:\n if base > T:\n break\n for start in range(0, T, base):\n end = start + base\n pair_wise_si_snr += ESPnetEnhancementInformedModel.si_snr_loss_zeromean(ref[:, start:end], inf[:, start:end])\n cnt += 1\n base *= 4\n pair_wise_si_snr += ESPnetEnhancementInformedModel.si_snr_loss_zeromean(ref, inf)\n return pair_wise_si_snr / cnt\n\n @staticmethod\n def si_snr_loss_zeromean(ref, inf):\n \"\"\"SI-SNR loss with zero-mean in pre-processing.\n\n Args:\n ref: (Batch, samples)\n inf: (Batch, samples)\n Returns:\n loss: (Batch,)\n \"\"\"\n assert ref.size() == inf.size()\n B, T = ref.size()\n # mask padding position along T\n\n # Step 1. Zero-mean norm\n mean_target = torch.sum(ref, dim=1, keepdim=True) / T\n mean_estimate = torch.sum(inf, dim=1, keepdim=True) / T\n zero_mean_target = ref - mean_target\n zero_mean_estimate = inf - mean_estimate\n\n # Step 2. SI-SNR with order\n # reshape to use broadcast\n s_target = zero_mean_target # [B, T]\n s_estimate = zero_mean_estimate # [B, T]\n # s_target = <s', s>s / ||s||^2\n pair_wise_dot = torch.sum(s_estimate * s_target, dim=1, keepdim=True) # [B, 1]\n s_target_energy = torch.sum(s_target ** 2, dim=1, keepdim=True) + EPS # [B, 1]\n pair_wise_proj = pair_wise_dot * s_target / s_target_energy # [B, T]\n # e_noise = s' - s_target\n e_noise = s_estimate - pair_wise_proj # [B, T]\n\n # SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)\n pair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim=1) / (\n torch.sum(e_noise ** 2, dim=1) + EPS\n )\n # print('pair_si_snr',pair_wise_si_snr[0,:])\n pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + EPS) # [B]\n # print(pair_wise_si_snr)\n\n return -1 * pair_wise_si_snr\n\n @staticmethod\n def _permutation_loss(ref, inf, criterion, perm=None):\n \"\"\"The basic permutation loss function.\n\n Args:\n ref (List[torch.Tensor]): [(batch, ...), ...] x n_spk\n inf (List[torch.Tensor]): [(batch, ...), ...]\n criterion (function): Loss function\n perm (torch.Tensor): specified permutation (batch, num_spk)\n Returns:\n loss (torch.Tensor): minimum loss with the best permutation (batch)\n perm (torch.Tensor): permutation for inf (batch, num_spk)\n e.g. tensor([[1, 0, 2], [0, 1, 2]])\n \"\"\"\n assert len(ref) == len(inf), (len(ref), len(inf))\n num_spk = len(ref)\n\n def pair_loss(permutation):\n return sum(\n [criterion(ref[s], inf[t]) for s, t in enumerate(permutation)]\n ) / len(permutation)\n\n if perm is None:\n device = ref[0].device\n all_permutations = list(permutations(range(num_spk)))\n losses = torch.stack([pair_loss(p) for p in all_permutations], dim=1)\n loss, perm = torch.min(losses, dim=1)\n perm = torch.index_select(\n torch.tensor(all_permutations, device=device, dtype=torch.long),\n 0,\n perm,\n )\n else:\n loss = torch.tensor(\n [\n torch.tensor(\n [\n criterion(\n ref[s][batch].unsqueeze(0), inf[t][batch].unsqueeze(0)\n )\n for s, t in enumerate(p)\n ]\n ).mean()\n for batch, p in enumerate(perm)\n ]\n )\n\n return loss.mean(), perm\n\n def collect_feats(\n self, speech_mix: torch.Tensor, speech_mix_lengths: torch.Tensor, **kwargs\n ) -> Dict[str, torch.Tensor]:\n # for data-parallel\n speech_mix = speech_mix[:, : speech_mix_lengths.max()]\n\n feats, feats_lengths = speech_mix, speech_mix_lengths\n return {\"feats\": feats, \"feats_lengths\": feats_lengths}\n" ]
[ [ "torch.sum", "torch.unbind", "torch.min", "torch.stack", "torch.ones", "torch.get_default_dtype", "torch.tensor", "torch.norm", "torch.arange", "torch.log10", "torch.bmm" ] ]
adiyen/codeday_sf_obj_detection_flask
[ "7b134e454303a79377b7b7ec54ada9c8dccf3f7a" ]
[ "object_detection.py" ]
[ "import matplotlib; matplotlib.use(\"Agg\")\n\nfrom imageai.Detection import ObjectDetection\nimport os, glob\n\ndef runner():\n execution_path = os.getcwd()\n\n detector = ObjectDetection()\n detector.setModelTypeAsRetinaNet()\n detector.setModelPath(os.path.join(execution_path , \"resnet50_coco_best_v2.0.1.h5\"))\n detector.loadModel()\n\n\n path = \"static/pics/\"\n files = os.listdir(path)\n\n oldest = files[-1]\n print(oldest)\n detections = detector.detectObjectsFromImage(input_image = os.path.join(execution_path , f\"static/pics/{oldest}\"), output_image_path = os.path.join(f\"static/pics/{oldest}\"))\n\n for obj in detections:\n# # if eachObject[\"name\"] == wanted_item:\n# # print(\"Found what you were looking for!\")\n# # print(eachObject[\"name\"] , \" : \" , eachObject[\"percentage_probability\"])\n# # else:\n print(obj[\"name\"] , \" : \" , obj[\"percentage_probability\"])\n\nrunner()\n\n# from imageai.Detection import VideoObjectDetection\n# import os\n# import cv2\n\n# execution_path = os.getcwd()\n\n# camera = cv2.VideoCapture(0)\n\n\n# detector = VideoObjectDetection()\n# detector.setModelTypeAsYOLOv3()\n# detector.setModelPath(os.path.join(execution_path , \"yolo.h5\"))\n# detector.loadModel()\n\n# video_path = detector.detectObjectsFromVideo(camera_input=camera,\n# output_file_path=os.path.join(execution_path, \"camera_detected_video\")\n# , frames_per_second=2, log_progress=True, minimum_percentage_probability=30)\n\n# cv2.imshow(video_path)\n\n# print(video_path)\n# def runner():\n\n# counter = 0\n# while True:\n# ret, img = camera.read()\n# counter+=1\n# cv2.waitKey(100)\n# detections = detector.detectObjectsFromVideo(camera_input = camera)\n# for obj in detections:\n# # cv2.imwrite(\"pics/\" + str(name) + \".\" + str(counter) + \".jpg\", img)\n# cv2.waitKey(100)\n# # cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n\n# cv2.imshow(\"Face\", img)\n# # k = cv2.waitKey(1)\n# if counter > 50:\n# break\n\n# cam.release()\n# cv2.destroyAllWindows()\n\n# runner()" ]
[ [ "matplotlib.use" ] ]
tayuny/Chicago_Business_Prodictor
[ "c9076b93ddaacb619d4eefef830d3b7276174528" ]
[ "pipeline/evaluator.py" ]
[ "import numpy as np\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import recall_score\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.metrics import auc\r\nfrom sklearn.metrics import precision_recall_curve\r\nimport matplotlib.pyplot as plt\r\n\r\ndef compute_acc(y_true, y_scores, k):\r\n '''\r\n Compute accuracy score based on threshold\r\n :param pred_scores: (np array) an array of predicted score\r\n :param threshold: (float) the threshold of labeling predicted results\r\n :param y_test: test set\r\n\r\n :return: (float) an accuracy score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n\r\n return accuracy_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef compute_f1(y_true, y_scores, k):\r\n '''\r\n Compute f1 score based on threshold\r\n :param pred_scores: (np array) an array of predicted score\r\n :param threshold: (float) the threshold of labeling predicted results\r\n :param y_test: test set\r\n\r\n :return: (float) an f1 score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n\r\n return f1_score(y_true_sorted, preds_at_k)\r\n\r\ndef compute_auc_roc(y_true, y_scores, k):\r\n '''\r\n Compute area under Receiver Operator Characteristic Curve\r\n :param pred_scores: (np array) an array of predicted score\r\n :param threshold: (float) the threshold of labeling predicted results\r\n :param y_test: test set\r\n\r\n :return: (float) an auc_roc score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n\r\n return roc_auc_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef compute_auc(pred_scores, true_labels):\r\n '''\r\n Compute auc score\r\n :param pred_scores: an array of predicted scores\r\n :param true_labels: an array of true labels\r\n\r\n :return: area under curve score\r\n '''\r\n fpr, tpr, thresholds = roc_curve(true_labels, pred_scores, pos_label=2)\r\n return auc(fpr, tpr)\r\n\r\n\r\n# The following functions are referenced from:\r\n# https://github.com/rayidghani/magicloops/blob/master/mlfunctions.py\r\n\r\ndef joint_sort_descending(l1, l2):\r\n '''\r\n Sort two arrays together\r\n :param l1: numpy array\r\n :param l2: numpy array\r\n\r\n :return: two sorted arrays\r\n '''\r\n idx = np.argsort(l1)[::-1]\r\n return l1[idx], l2[idx]\r\n\r\n\r\ndef generate_binary_at_k(y_scores, k):\r\n '''\r\n predict labels based on thresholds\r\n :param y_scores: the predicted scores\r\n :param k: (int or float) threshold\r\n\r\n :return: predicted labels\r\n '''\r\n cutoff_index = int(len(y_scores) * (k / 100.0))\r\n predictions_binary = [1 if x < cutoff_index else 0 for x in range(len(y_scores))]\r\n return predictions_binary\r\n\r\n\r\ndef precision_at_k(y_true, y_scores, k):\r\n '''\r\n Compute precision based on threshold (percentage)\r\n :param y_true: the true labels\r\n :param y_scores: the predicted labels\r\n :param k: (int or float) the threshold\r\n\r\n :return: (float) precision score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n return precision_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef recall_at_k(y_true, y_scores, k):\r\n '''\r\n Compute recall based on threshold (percentage)\r\n :param y_true: the true labels\r\n :param y_scores: the predicted labels\r\n :param k: (int or float) the threshold\r\n\r\n :return: (float) recall score\r\n '''\r\n y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))\r\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\r\n return recall_score(y_true_sorted, preds_at_k)\r\n\r\n\r\ndef plot_precision_recall_n(y_true, y_prob, name, save_name, output_type):\r\n #pdb.set_trace() \r\n y_score = y_prob\r\n precision_curve, recall_curve, pr_thresholds = precision_recall_curve(y_true, y_score)\r\n precision_curve = precision_curve[:-1]\r\n recall_curve = recall_curve[:-1]\r\n pct_above_per_thresh = []\r\n number_scored = len(y_score)\r\n for value in pr_thresholds:\r\n num_above_thresh = len(y_score[y_score >= value])\r\n pct_above_thresh = num_above_thresh / float(number_scored)\r\n pct_above_per_thresh.append(pct_above_thresh)\r\n pct_above_per_thresh = np.array(pct_above_per_thresh)\r\n\r\n plt.clf()\r\n fig, ax1 = plt.subplots()\r\n ax1.plot(pct_above_per_thresh, precision_curve, 'b')\r\n ax1.set_xlabel('percent of population')\r\n ax1.set_ylabel('precision', color='b')\r\n ax2 = ax1.twinx()\r\n ax2.plot(pct_above_per_thresh, recall_curve, 'r')\r\n ax2.set_ylabel('recall', color='r')\r\n ax1.set_ylim([0, 1])\r\n ax1.set_ylim([0, 1])\r\n ax2.set_xlim([0, 1])\r\n\r\n plt.title(name)\r\n if (output_type == 'save'):\r\n plt.savefig(save_name)\r\n plt.close()\r\n elif (output_type == 'show'):\r\n plt.show()\r\n else:\r\n plt.show()\r\n\r\n\r\ndef plot_roc(name, save_name, probs, y_true, output_type):\r\n \r\n fpr, tpr, thresholds = roc_curve(y_true, probs)\r\n roc_auc = auc(fpr, tpr)\r\n plt.clf()\r\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\r\n plt.plot([0, 1], [0, 1], 'k--')\r\n plt.xlim([0.0, 1.05])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title(name)\r\n plt.legend(loc=\"lower right\")\r\n if (output_type == 'save'):\r\n plt.savefig(save_name, close=True)\r\n plt.close()\r\n elif (output_type == 'show'):\r\n plt.show()\r\n else:\r\n plt.show()\r\n\r\n" ]
[ [ "numpy.argsort", "sklearn.metrics.precision_score", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "sklearn.metrics.precision_recall_curve", "sklearn.metrics.roc_curve", "matplotlib.pyplot.savefig", "sklearn.metrics.auc", "matplotlib.pyplot.xlim", "sklearn.metrics.f1_score", "matplotlib.pyplot.title", "sklearn.metrics.recall_score", "matplotlib.pyplot.clf", "matplotlib.pyplot.subplots", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "matplotlib.pyplot.legend", "sklearn.metrics.roc_auc_score", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
ysx001/IIC
[ "e72eb0833785e867ded0a9bac47ce1d1f9f47b4b" ]
[ "iic/code/utils/segmentation/segmentation_eval.py" ]
[ "from __future__ import print_function\n\nimport sys\nfrom datetime import datetime\n\nimport torch\n\nfrom code.utils.cluster.cluster_eval import cluster_subheads_eval\nfrom code.utils.cluster.transforms import sobel_process\n\n\ndef segmentation_eval(config, net,\n mapping_assignment_dataloader,\n mapping_test_dataloader,\n sobel, using_IR=False, verbose=0, return_only=False):\n torch.cuda.empty_cache()\n net.eval()\n\n stats_dict = cluster_subheads_eval(config, net,\n mapping_assignment_dataloader=mapping_assignment_dataloader,\n mapping_test_dataloader=mapping_test_dataloader,\n sobel=sobel,\n using_IR=using_IR,\n get_data_fn=_segmentation_get_data,\n verbose=verbose)\n\n net.train()\n\n acc = stats_dict[\"best\"]\n is_best = (len(config.epoch_acc) > 0) and (acc > max(config.epoch_acc))\n\n torch.cuda.empty_cache()\n\n if not return_only:\n config.epoch_stats.append(stats_dict)\n config.epoch_acc.append(acc)\n config.epoch_avg_subhead_acc.append(stats_dict[\"avg\"])\n\n return is_best\n else:\n return stats_dict\n\n\ndef _segmentation_get_data(config, net, dataloader, sobel=False,\n using_IR=False, verbose=0):\n # returns (vectorised) cuda tensors for flat preds and targets\n # sister of _clustering_get_data\n\n assert (config.output_k <= 255)\n\n num_batches = len(dataloader)\n num_samples = 0\n\n # upper bound, will be less for last batch\n samples_per_batch = config.batch_sz * config.input_sz * config.input_sz\n\n if verbose > 0:\n print(\"started _segmentation_get_data %s\" % datetime.now())\n sys.stdout.flush()\n\n # vectorised\n flat_predss_all = [torch.zeros((num_batches * samples_per_batch),\n dtype=torch.uint8).cpu() for _ in xrange(\n config.num_sub_heads)]\n flat_targets_all = torch.zeros((num_batches * samples_per_batch),\n dtype=torch.uint8).cpu()\n mask_all = torch.zeros((num_batches * samples_per_batch),\n dtype=torch.uint8).cpu()\n\n if verbose > 0:\n batch_start = datetime.now()\n all_start = batch_start\n print(\"starting batches %s\" % batch_start)\n\n for b_i, batch in enumerate(dataloader):\n\n imgs, flat_targets, mask = batch\n imgs = imgs.cpu()\n\n if sobel:\n imgs = sobel_process(imgs, config.include_rgb, using_IR=using_IR, cpu=True)\n\n with torch.no_grad():\n x_outs = net(imgs)\n\n assert (x_outs[0].shape[1] == config.output_k)\n assert (x_outs[0].shape[2] == config.input_sz and x_outs[0].shape[\n 3] == config.input_sz)\n\n # actual batch size\n actual_samples_curr = (\n flat_targets.shape[0] * config.input_sz * config.input_sz)\n num_samples += actual_samples_curr\n\n # vectorise: collapse from 2D to 1D\n start_i = b_i * samples_per_batch\n for i in xrange(config.num_sub_heads):\n x_outs_curr = x_outs[i]\n assert (not x_outs_curr.requires_grad)\n flat_preds_curr = torch.argmax(x_outs_curr, dim=1)\n flat_predss_all[i][\n start_i:(start_i + actual_samples_curr)] = flat_preds_curr.view(-1)\n\n flat_targets_all[\n start_i:(start_i + actual_samples_curr)] = flat_targets.view(-1)\n mask_all[start_i:(start_i + actual_samples_curr)] = mask.view(-1)\n\n if verbose > 0 and b_i < 3:\n batch_finish = datetime.now()\n print(\"finished batch %d, %s, took %s, of %d\" %\n (b_i, batch_finish, batch_finish - batch_start, num_batches))\n batch_start = batch_finish\n sys.stdout.flush()\n\n if verbose > 0:\n all_finish = datetime.now()\n print(\n \"finished all batches %s, took %s\" % (all_finish, all_finish - all_start))\n sys.stdout.flush()\n\n flat_predss_all = [flat_predss_all[i][:num_samples] for i in\n xrange(config.num_sub_heads)]\n flat_targets_all = flat_targets_all[:num_samples]\n mask_all = mask_all[:num_samples]\n\n flat_predss_all = [flat_predss_all[i].masked_select(mask=mask_all) for i in\n xrange(config.num_sub_heads)]\n flat_targets_all = flat_targets_all.masked_select(mask=mask_all)\n\n if verbose > 0:\n print(\"ended _segmentation_get_data %s\" % datetime.now())\n sys.stdout.flush()\n\n selected_samples = mask_all.sum()\n assert (len(flat_predss_all[0].shape) == 1 and\n len(flat_targets_all.shape) == 1)\n assert (flat_predss_all[0].shape[0] == selected_samples)\n assert (flat_targets_all.shape[0] == selected_samples)\n\n return flat_predss_all, flat_targets_all\n" ]
[ [ "torch.cuda.empty_cache", "torch.no_grad", "torch.zeros", "torch.argmax" ] ]
sa-mustafa/incubator-mxnet
[ "03654eeea3f3ab30dc43fabb7229945970a358b2" ]
[ "tests/python/unittest/test_gluon_data.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport tarfile\nimport unittest\nimport mxnet as mx\nimport numpy as np\nimport random\nfrom mxnet import gluon\nimport platform\nfrom common import setup_module, with_seed, teardown\nfrom mxnet.gluon.data import DataLoader\nimport mxnet.ndarray as nd\nfrom mxnet import context\nfrom mxnet.gluon.data.dataset import Dataset\nfrom mxnet.gluon.data.dataset import ArrayDataset\n\n@with_seed()\ndef test_array_dataset():\n X = np.random.uniform(size=(10, 20))\n Y = np.random.uniform(size=(10,))\n dataset = gluon.data.ArrayDataset(X, Y)\n loader = gluon.data.DataLoader(dataset, 2)\n for i, (x, y) in enumerate(loader):\n assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])\n assert mx.test_utils.almost_equal(y.asnumpy(), Y[i*2:(i+1)*2])\n\n dataset = gluon.data.ArrayDataset(X)\n loader = gluon.data.DataLoader(dataset, 2)\n\n for i, x in enumerate(loader):\n assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])\n\n\ndef prepare_record():\n if not os.path.isdir(\"data/test_images\"):\n os.makedirs('data/test_images')\n if not os.path.isdir(\"data/test_images/test_images\"):\n gluon.utils.download(\"http://data.mxnet.io/data/test_images.tar.gz\", \"data/test_images.tar.gz\")\n tarfile.open('data/test_images.tar.gz').extractall('data/test_images/')\n if not os.path.exists('data/test.rec'):\n imgs = os.listdir('data/test_images/test_images')\n record = mx.recordio.MXIndexedRecordIO('data/test.idx', 'data/test.rec', 'w')\n for i, img in enumerate(imgs):\n str_img = open('data/test_images/test_images/'+img, 'rb').read()\n s = mx.recordio.pack((0, i, i, 0), str_img)\n record.write_idx(i, s)\n return 'data/test.rec'\n\n\n@with_seed()\ndef test_recordimage_dataset():\n recfile = prepare_record()\n fn = lambda x, y : (x, y)\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(fn)\n loader = gluon.data.DataLoader(dataset, 1)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\ndef _dataset_transform_fn(x, y):\n \"\"\"Named transform function since lambda function cannot be pickled.\"\"\"\n return x, y\n\ndef _dataset_transform_first_fn(x):\n \"\"\"Named transform function since lambda function cannot be pickled.\"\"\"\n return x\n\n@with_seed()\ndef test_recordimage_dataset_with_data_loader_multiworker():\n recfile = prepare_record()\n dataset = gluon.data.vision.ImageRecordDataset(recfile)\n loader = gluon.data.DataLoader(dataset, 1, num_workers=5)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n # with transform\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(_dataset_transform_fn)\n loader = gluon.data.DataLoader(dataset, 1, num_workers=5)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n # with transform_first\n dataset = gluon.data.vision.ImageRecordDataset(recfile).transform_first(_dataset_transform_first_fn)\n loader = gluon.data.DataLoader(dataset, 1, num_workers=5)\n\n for i, (x, y) in enumerate(loader):\n assert x.shape[0] == 1 and x.shape[3] == 3\n assert y.asscalar() == i\n\n@with_seed()\ndef test_sampler():\n seq_sampler = gluon.data.SequentialSampler(10)\n assert list(seq_sampler) == list(range(10))\n rand_sampler = gluon.data.RandomSampler(10)\n assert sorted(list(rand_sampler)) == list(range(10))\n seq_batch_keep = gluon.data.BatchSampler(seq_sampler, 3, 'keep')\n assert sum(list(seq_batch_keep), []) == list(range(10))\n seq_batch_discard = gluon.data.BatchSampler(seq_sampler, 3, 'discard')\n assert sum(list(seq_batch_discard), []) == list(range(9))\n rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep')\n assert sorted(sum(list(rand_batch_keep), [])) == list(range(10))\n\n@with_seed()\ndef test_datasets():\n assert len(gluon.data.vision.MNIST(root='data/mnist')) == 60000\n assert len(gluon.data.vision.MNIST(root='data/mnist', train=False)) == 10000\n assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist')) == 60000\n assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist', train=False)) == 10000\n assert len(gluon.data.vision.CIFAR10(root='data/cifar10')) == 50000\n assert len(gluon.data.vision.CIFAR10(root='data/cifar10', train=False)) == 10000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100')) == 50000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100', fine_label=True)) == 50000\n assert len(gluon.data.vision.CIFAR100(root='data/cifar100', train=False)) == 10000\n\n@with_seed()\ndef test_image_folder_dataset():\n prepare_record()\n dataset = gluon.data.vision.ImageFolderDataset('data/test_images')\n assert dataset.synsets == ['test_images']\n assert len(dataset.items) == 16\n\n@with_seed()\ndef test_list_dataset():\n for num_worker in range(0, 3):\n data = mx.gluon.data.DataLoader([([1,2], 0), ([3, 4], 1)], batch_size=1, num_workers=num_worker)\n for d, l in data:\n pass\n\n\nclass Dataset(gluon.data.Dataset):\n def __len__(self):\n return 100\n def __getitem__(self, key):\n return mx.nd.full((10,), key)\n\n@with_seed()\ndef test_multi_worker():\n data = Dataset()\n for thread_pool in [True, False]:\n loader = gluon.data.DataLoader(data, batch_size=1, num_workers=5, thread_pool=thread_pool)\n for i, batch in enumerate(loader):\n assert (batch.asnumpy() == i).all()\n\n\n@with_seed()\ndef test_multi_worker_shape():\n for thread_pool in [True, False]:\n batch_size = 1024\n shape = (batch_size+1, 11, 12)\n\n data = ArrayDataset(np.ones(shape))\n loader = gluon.data.DataLoader(\n data, batch_size=batch_size, num_workers=5, last_batch='keep', thread_pool=thread_pool)\n for batch in loader:\n if shape[0] > batch_size:\n assert batch.shape == (batch_size, shape[1], shape[2])\n shape = (shape[0] - batch_size, shape[1], shape[2])\n else:\n assert batch.shape == shape\n\nclass _Dummy(Dataset):\n \"\"\"Dummy dataset for randomized shape arrays.\"\"\"\n def __init__(self, random_shape):\n self.random_shape = random_shape\n\n def __getitem__(self, idx):\n key = idx\n if self.random_shape:\n out = np.random.uniform(size=(random.randint(1000, 1100), 40))\n labels = np.random.uniform(size=(random.randint(10, 15)))\n else:\n out = np.random.uniform(size=(1000, 40))\n labels = np.random.uniform(size=(10))\n return key, out, labels\n\n def __len__(self):\n return 50\n\ndef _batchify_list(data):\n \"\"\"\n return list of ndarray without stack/concat/pad\n \"\"\"\n if isinstance(data, (tuple, list)):\n return list(data)\n if isinstance(data, mx.nd.NDArray):\n return [data]\n return data\n\ndef _batchify(data):\n \"\"\"\n Collate data into batch. Use shared memory for stacking.\n :param data: a list of array, with layout of 'NTC'.\n :return either x and x's unpadded lengths, or x, x's unpadded lengths, y and y's unpadded lengths\n if labels are not supplied.\n \"\"\"\n\n # input layout is NTC\n keys, inputs, labels = [item[0] for item in data], [item[1] for item in data], \\\n [item[2] for item in data]\n\n if len(data) > 1:\n max_data_len = max([seq.shape[0] for seq in inputs])\n max_labels_len = 0 if not labels else max([seq.shape[0] for seq in labels])\n else:\n max_data_len = inputs[0].shape[0]\n max_labels_len = 0 if not labels else labels[0].shape[0]\n\n x_lens = [item.shape[0] for item in inputs]\n y_lens = [item.shape[0] for item in labels]\n\n for i, seq in enumerate(inputs):\n pad_len = max_data_len - seq.shape[0]\n inputs[i] = np.pad(seq, ((0, pad_len), (0, 0)), 'constant', constant_values=0)\n labels[i] = np.pad(labels[i], (0, max_labels_len - labels[i].shape[0]),\n 'constant', constant_values=-1)\n\n inputs = np.asarray(inputs, dtype=np.float32)\n if labels is not None:\n labels = np.asarray(labels, dtype=np.float32)\n inputs = inputs.transpose((1, 0, 2))\n labels = labels.transpose((1, 0))\n\n return (nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),\n nd.array(x_lens, ctx=context.Context('cpu_shared', 0))) \\\n if labels is None else (\n nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),\n nd.array(x_lens, ctx=context.Context('cpu_shared', 0)),\n nd.array(labels, dtype=labels.dtype, ctx=context.Context('cpu_shared', 0)),\n nd.array(y_lens, ctx=context.Context('cpu_shared', 0)))\n\n@with_seed()\ndef test_multi_worker_forked_data_loader():\n data = _Dummy(False)\n loader = DataLoader(data, batch_size=40, batchify_fn=_batchify, num_workers=2)\n for epoch in range(1):\n for i, data in enumerate(loader):\n pass\n\n data = _Dummy(True)\n loader = DataLoader(data, batch_size=40, batchify_fn=_batchify_list, num_workers=2)\n for epoch in range(1):\n for i, data in enumerate(loader):\n pass\n\n@with_seed()\ndef test_multi_worker_dataloader_release_pool():\n # will trigger too many open file if pool is not released properly\n if os.name == 'nt':\n print('Skip for windows since spawn on windows is too expensive.')\n return\n\n from sys import platform\n if platform == 'darwin':\n print('Skip for MacOS due to https://github.com/apache/incubator-mxnet/issues/17782')\n return\n\n for _ in range(10):\n A = np.random.rand(999, 2000)\n D = mx.gluon.data.DataLoader(A, batch_size=8, num_workers=8)\n the_iter = iter(D)\n next(the_iter)\n del the_iter\n del D\n\n\ndef test_dataloader_context():\n X = np.random.uniform(size=(10, 20))\n dataset = gluon.data.ArrayDataset(X)\n default_dev_id = 0\n custom_dev_id = 1\n\n # use non-pinned memory\n loader1 = gluon.data.DataLoader(dataset, 8)\n for _, x in enumerate(loader1):\n assert x.context == context.cpu(default_dev_id)\n\n # use pinned memory with default device id\n loader2 = gluon.data.DataLoader(dataset, 8, pin_memory=True)\n for _, x in enumerate(loader2):\n assert x.context == context.cpu_pinned(default_dev_id)\n\n # use pinned memory with custom device id\n loader3 = gluon.data.DataLoader(dataset, 8, pin_memory=True,\n pin_device_id=custom_dev_id)\n for _, x in enumerate(loader3):\n assert x.context == context.cpu_pinned(custom_dev_id)\n\ndef batchify(a):\n return a\n\ndef test_dataset_filter():\n length = 100\n a = mx.gluon.data.SimpleDataset([i for i in range(length)])\n a_filtered = a.filter(lambda x: x % 10 == 0)\n assert(len(a_filtered) == 10)\n for idx, sample in enumerate(a_filtered):\n assert sample % 10 == 0\n a_xform_filtered = a.transform(lambda x: x + 1).filter(lambda x: x % 10 == 0)\n assert(len(a_xform_filtered) == 10)\n # the filtered data is already transformed\n for idx, sample in enumerate(a_xform_filtered):\n assert sample % 10 == 0\n\ndef test_dataset_shard():\n length = 9\n a = mx.gluon.data.SimpleDataset([i for i in range(length)])\n shard_0 = a.shard(4, 0)\n shard_1 = a.shard(4, 1)\n shard_2 = a.shard(4, 2)\n shard_3 = a.shard(4, 3)\n assert len(shard_0) + len(shard_1) + len(shard_2) + len(shard_3) == length\n assert len(shard_0) == 3\n assert len(shard_1) == 2\n assert len(shard_2) == 2\n assert len(shard_3) == 2\n total = 0\n for shard in [shard_0, shard_1, shard_2, shard_3]:\n for idx, sample in enumerate(shard):\n total += sample\n assert total == sum(a)\n\ndef test_dataset_take():\n length = 100\n a = mx.gluon.data.SimpleDataset([i for i in range(length)])\n a_take_full = a.take(1000)\n assert len(a_take_full) == length\n a_take_full = a.take(None)\n assert len(a_take_full) == length\n count = 10\n a_take_10 = a.take(count)\n assert len(a_take_10) == count\n expected_total = sum([i for i in range(count)])\n total = 0\n for idx, sample in enumerate(a_take_10):\n assert sample < count\n total += sample\n assert total == expected_total\n\n a_xform_take_10 = a.transform(lambda x: x * 10).take(count)\n assert len(a_xform_take_10) == count\n expected_total = sum([i * 10 for i in range(count)])\n total = 0\n for idx, sample in enumerate(a_xform_take_10):\n assert sample < count * 10\n total += sample\n assert total == expected_total\n\ndef test_dataloader_scope():\n \"\"\"\n Bug: Gluon DataLoader terminates the process pool early while\n _MultiWorkerIter is operating on the pool.\n\n Tests that DataLoader is not garbage collected while the iterator is\n in use.\n \"\"\"\n args = {'num_workers': 1, 'batch_size': 2}\n dataset = nd.ones(5)\n iterator = iter(DataLoader(\n dataset,\n batchify_fn=batchify,\n **args\n )\n )\n\n item = next(iterator)\n\n assert item is not None\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n" ]
[ [ "numpy.random.uniform", "numpy.ones", "numpy.asarray", "numpy.random.rand", "numpy.pad" ] ]
greyzor/dash-worldmap-metrics
[ "6895544edba0ccc0a00df9b88da0a3936d11c695" ]
[ "helpers.py" ]
[ "\"\"\"\nPrecomputing, App Layout (Layers/Markers), Callbacks helpers.\n\"\"\"\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n# import dash_colorscales\n# import cufflinks as cf\nimport pandas as pd\nimport numpy as np\nimport re\nimport json\nimport dash_dangerously_set_inner_html\n\nfrom conf import *\nfrom colors import (\n _color_from_bin,\n _opacity_from_bin,\n _border_color_from_bin\n)\n\n## helpers:\ndef _extract_lng(arr):\n \"\"\" Extract average longitude from array of geo-coordinates in format: (lng,lat)\"\"\"\n return np.mean([item[0] for item in arr[0]])\n\ndef _extract_lat(arr):\n \"\"\" Extract average latitude from array of geo-coordinates in format: (lng,lat)\"\"\"\n return np.mean([item[1] for item in arr[0]])\n\ndef create_country_geoloc_dataframe(source):\n \"\"\"Create geolocation Dataframe from source raw data.\n\n :param source: raw source data.\n :type source: dict.\n\n :returns: a geolocation DataFrame.\n :rtype: pd.DataFrame\n \"\"\"\n df_geo = pd.DataFrame(source['features'])\n df_geo['Country'] = df_geo['properties'].apply(lambda d: d['name'])\n\n ## For each polygon in MultiPolygon, explode into a new row.\n df_geo['geo_type'] = df_geo['geometry'].apply(lambda d: d['type'])\n df_geo['coord'] = df_geo['geometry'].apply(lambda d: d['coordinates'])\n df_geo = df_geo[['type','geo_type','Country','coord']]\n x = df_geo[df_geo.geo_type=='MultiPolygon']['coord'].apply(pd.Series)\n x = x.merge(df_geo, left_index=True, right_index=True).drop('coord',axis=1).melt(id_vars=['type', 'geo_type','Country'], value_name = \"coord\")\n x = x.dropna().sort_values(['Country','variable'])\n\n ## Merge new exploded MultiPolygon with non-exploded Polygon\n df_geo[df_geo.geo_type=='Polygon']['variable'] = 0\n df_geo = pd.concat([x,df_geo[df_geo.geo_type=='Polygon']]).sort_values(['Country','variable'])\n\n ## Compute geo-coordinates\n df_geo['lng'] = df_geo['coord'].apply(_extract_lng)\n df_geo['lat'] = df_geo['coord'].apply(_extract_lat)\n\n df_geo = df_geo[['Country','lng','lat']]\n return df_geo\n\ndef generate_random_country_partitions(source, scale=SCALE):\n \"\"\"Create random country partitions from source raw data.\n\n :param source: raw source data.\n :type source: dict.\n\n :returns: a partitions of countries.\n :rtype: dict of partitions, each key is the bin idx,\n each value is the list of countries for the bin.\n \"\"\"\n global N_BINS\n\n data = [(d['properties']['name'], d['id']) for d in source['features']]\n df = pd.DataFrame(data)\n df['count'] = (np.random.rand(df.shape[0])*scale).astype(int)\n # return df\n\n df['bin'] = (df['count']/N_BINS).astype(int)\n partitions = df.groupby('bin')[0].apply(list).to_json()\n partitions = json.loads(partitions)\n return partitions\n\ndef compute_country_airquality_scores(source, fpath='./data/air_quality_country.csv'):\n \"\"\"Compute Air Quality scores from source raw data.\n\n :param source: raw source data.\n :type source: dict.\n :param fpath: data file path.\n :type fpath: str\n\n :returns: a partitions of countries, and dataframe of scores\n :rtype: tuple (dict of partitions, pd.DataFrame of bin scores per country)\n \"\"\"\n ## Countries list\n all_countries = [d['properties']['name'] for d in source['features']]\n\n ## Air quality data\n df = pd.read_csv(fpath, sep=',')\n df_latest = df[df.Year==2015]\n\n ## Cleanify data\n mapping = {\n 'Guinea-Bissau': 'Guinea Bissau',\n \"Cote d'Ivoire\": 'Ivory Coast',\n 'Serbia': 'Republic of Serbia',\n 'Congo': 'Republic of the Congo',\n 'Russian Federation': 'Russia',\n 'Tanzania': 'United Republic of Tanzania',\n 'United States': 'United States of America',\n }\n def replace_country_name(x):\n \"\"\" Replace country name by its mapping using dict. \"\"\"\n if x in mapping.keys():\n return mapping[x]\n return x\n\n df_latest['Country'] = df_latest['Type'].apply(replace_country_name)\n\n ## Merge list of countries with Normalized Air quality estimate\n df_final = pd.DataFrame(all_countries, columns=['Country']).merge(df_latest[['Country', 'Exposure_Mean']])\n scaler = np.max(df_latest['Exposure_Mean'])*1.05\n df_final['Exposure_Norm'] = (100*df_final['Exposure_Mean']/scaler).astype(int)\n\n df_final['bin'] = (df_final['Exposure_Norm']/N_BINS).astype(int)\n partitions = df_final.groupby('bin')['Country'].apply(list).to_json()\n partitions = json.loads(partitions)\n return (partitions, df_final)\n\ndef build_mapbox_layers_for_countries(source, partitions, colors, layer_border_colors='white'):\n \"\"\"Build Mapbox layers struct.\n\n :param source: raw source data.\n :type source: dict.\n :param partitions: dict of partitions, key is bin, value is list of countries for bin.\n :type partitions: dict[list]\n :param colors: list of colors, one per layer.\n :type colors: list[str]\n :param layer_border_colors: borders color per layer.\n :type layer_border_colors: list[str] or str\n\n :returns: Mapbox layers inner struct.\n :rtype: list[dict], each dict being an inner map layer.\n \"\"\"\n first_symbol_id = None\n\n layers = []\n for _bin in partitions.keys():\n countries = partitions[_bin]\n\n _source = {}\n _source.setdefault('type', source['type'])\n _source['features'] = filter(\n lambda d: d['properties']['name'] in countries,\n source['features']\n )\n\n layer = dict(\n sourcetype='geojson',\n source=_source,\n type='fill',\n color=colors[int(_bin)],\n opacity=DEFAULT_OPACITY,\n # below=\"water\"\n # below=\"state-label-sm\",\n # below=\"mapbox\"\n )\n layers.append(layer)\n\n layer = dict(\n sourcetype='geojson',\n source=_source,\n type='line',\n color=layer_border_colors[int(_bin)],\n opacity=1.0,\n )\n layers.append(layer)\n\n return layers\n\ndef build_app_layout(app, data, layers, mapbox_access_token, default_style_value='custom'):\n \"\"\"Build Application Layout.\n\n :param app: dash app.\n :type app: dash.dash.Dash\n :param data: mapbox data inner struct.\n :type data: list[dict]\n :param layers: mapbox layers inner struct.\n :type layers: list[dict], each dict being an inner map layer.\n :param mapbox_access_token: mapbox access token.\n :type mapbox_access_token: str\n :param default_style_value: default style.\n :type default_style_value: str.\n\n :returns: app object with layout field updated.\n :rtype: dash.dash.Dash\n \"\"\"\n ## Main layout\n app.layout = html.Div(children=[\n\n html.Div([\n ## Header\n html.Div(\n [\n html.H4(\n 'World Map Metrics',\n style={'text-align':'center', 'display':'inline-block', 'margin':'20px 0px 20px 40px'}\n ),\n html.Div(\n # [\n # dash_dangerously_set_inner_html.DangerouslySetInnerHTML('''\n # <a class=\"github-button\" href=\"https://github.com/greyzor/dash-worldmap-metrics\" data-size=\"large\" data-show-count=\"true\" aria-label=\"Star greyzor/dash-worldmap-metrics on GitHub\">Star</a>\n # ''')\n # ],\n html.A(\n html.Button('Show on Github!'),\n href='https://github.com/greyzor/dash-worldmap-metrics',\n target='_blank'\n ),\n style={'float':'right', 'background-color':'white', 'margin':'20px 40px 20px 0px'},\n )\n ],\n style={'background-color':'#e51b79', 'color':'white'}\n ),\n\n ## Inputs and selection dropdowns\n html.Div(\n [\n html.Div(\n [\n dcc.Dropdown(\n id='metric-1-dropdown',\n options=[\n {'label': 'PM25 pollution exposure', 'value': 'PM25'},\n {'label': 'Other metric', 'value': 'OTHER'},\n ],\n value='PM25',\n ),\n ],\n className='three columns'\n ),\n html.Div(\n 'Exposure to PM25 air pollution for 2015, with data from: www.stateofglobalair.org',\n className='six columns',\n style={'font-weight':'bold', 'font-size':'16px'}\n ),\n html.Div(\n dcc.Dropdown(\n id='map-style-selector',\n options=[\n {'label': 'Style: Default', 'value': 'default'},\n {'label': 'Style: Custom', 'value': 'custom'},\n ],\n value=default_style_value,\n ),\n className='three columns'\n ),\n ],\n style={'background-color':'white', 'text-align':'center', 'padding':'1.5rem'},\n className='row'\n ),\n\n ## The Map\n dcc.Graph(\n id='world-map',\n figure=build_map_figure(\n data,\n None,\n mapbox_access_token,\n DEFAULT_COLORSCALE,\n map_style=VALUE_TO_MAPBOX_STYLE[default_style_value]\n ),\n style={'height':'80vh'}\n ),\n ], style={'height':'100%'}),\n ], className='twelve columns', style={'margin':0, 'height':'98vh', 'background-color':'white'})\n return app\n\ndef build_mapbox_geo_data(df_geo, text_col='description', markers=None):\n \"\"\"Build Mapbox geolocation inner data struct.\n\n :param df_geo: a geolocation DataFrame.\n :type df_geo: pd.DataFrame\n :param text_col: column name for text.\n :type text_col: str\n :param markers: markers to be displayed on map.\n :type markers: dict\n\n :returns: mapbox data inner struct.\n :rtype: list[dict]\n \"\"\"\n data = [\n dict(\n lat=df_geo['lat'],\n lon=df_geo['lng'],\n text=df_geo[text_col],\n type='scattermapbox',\n hoverinfo='text',\n selected = dict(marker = dict(opacity=1)),\n unselected = dict(marker = dict(opacity = 0)),\n # mode='markers+text',\n mode='markers+text',\n marker=markers,\n )\n ]\n return data\n\n\ndef build_map_figure(data, layers, mapbox_access_token, annot_colors, map_style='light'):\n \"\"\"Build Mapbox figure.\n\n :param data: mapbox data inner struct.\n :type data: list[dict]\n :param layers: mapbox layers inner struct.\n :type layers: list[dict], each dict being an inner map layer.\n :param mapbox_access_token: mapbox access token.\n :type mapbox_access_token: str\n :param annot_colors: annotation colors used to show a legend.\n :type annot_colors: list\n :param map_style: default map style.\n :type map_style: str.\n\n :returns: dash dcc.Graph figure field.\n :rtype: dict\n \"\"\"\n annotations = None\n if layers is not None and len(layers) > 0:\n annotations = [dict(\n showarrow=False,\n align='right',\n text='<b>PM25 level ranges:</b>',\n x=0.975,\n y=0.95,\n bgcolor='white'\n )]\n\n for k, color in enumerate(annot_colors):\n annotations.append(\n dict(\n arrowcolor = color,\n text='range: %s-%s'%(10*k, 10*(k+1)),\n x = 0.975,\n y = 0.90-0.3*k/N_BINS,\n ax = -90,\n ay = 0,\n arrowwidth=12,\n arrowhead=0,\n bgcolor = '#EFEFEE'\n )\n )\n\n return dict(\n data=data,\n layout=dict(\n mapbox=dict(\n layers=layers,\n accesstoken=mapbox_access_token,\n style=map_style,\n center=dict(\n lat=30, #38.72490,\n lon=-1.67571, #-95.61446,\n ),\n pitch=0,\n zoom=1.5,\n ),\n annotations=annotations,\n margin=dict(r=0, l=0, t=0, b=0),\n showlegend=False,\n # **{'height':'900px','min-height':'300px','max-height':'70vh'}\n )\n )\n\ndef build_app(app):\n \"\"\"From default dash.dash.Dash application, return build and customized app.\"\"\"\n ## load: source data\n with open('data/countries.geo.json') as f:\n source = json.load(f)\n\n df_geo = create_country_geoloc_dataframe(source)\n # partitions = generate_random_country_partitions(source, scale=SCALE)\n (partitions, df_scores) = compute_country_airquality_scores(source, fpath='./data/air_quality_country.csv')\n\n df_geo = df_geo.merge(df_scores[['Country','Exposure_Mean', 'bin']])\n df_geo['description'] = df_geo['Country']+': '+df_geo['Exposure_Mean'].astype(str)\n\n ## colors for markers (one per country), and borders (one color per layer)\n marker_colors = df_geo['bin'].apply(lambda idx: _color_from_bin(idx, N_BINS))\n layer_border_colors = [_border_color_from_bin(int(_bin), N_BINS) for _bin in partitions.keys()]\n\n markers = dict(\n size=25,\n color=marker_colors,\n # opacity=df_geo['bin'].apply(lambda idx: _opacity_from_bin(idx, N_BINS))\n opacity=1.\n )\n\n ## build: map data and layers\n layers = build_mapbox_layers_for_countries(\n source, partitions, DEFAULT_COLORSCALE,\n layer_border_colors=layer_border_colors\n )\n data = build_mapbox_geo_data(df_geo, text_col='description', markers=markers)\n\n ## build: layout\n app = build_app_layout(app, data, layers, MAPBOX_ACCESS_TOKEN, default_style_value='custom')\n\n ## styling: external\n app.css.append_css({'external_url': 'https://codepen.io/plotly/pen/EQZeaW.css'})\n\n ## callbacks\n def _change_map_style_callback(value):\n \"\"\" Callback to change map style, according to value.\"\"\"\n map_style = VALUE_TO_MAPBOX_STYLE[value]\n\n return build_map_figure(\n data,\n layers,\n MAPBOX_ACCESS_TOKEN,\n DEFAULT_COLORSCALE,\n map_style=map_style\n )\n\n app.callback(\n Output('world-map', 'figure'),\n [Input('map-style-selector', 'value')]\n )(_change_map_style_callback)\n\n return app" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "numpy.max", "pandas.concat", "numpy.random.rand", "numpy.mean" ] ]
MalikIdreesHasanKhan/NeMo
[ "984fd34921e81659c4594a22ab142311808b3bb7" ]
[ "nemo/collections/asr/models/clustering_diarizer.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\nimport pickle as pkl\nimport shutil\nimport tarfile\nimport tempfile\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom typing import List, Optional\n\nimport torch\nfrom omegaconf import DictConfig, OmegaConf\nfrom pytorch_lightning.utilities import rank_zero_only\nfrom tqdm import tqdm\n\nfrom nemo.collections.asr.models.classification_models import EncDecClassificationModel\nfrom nemo.collections.asr.models.label_models import EncDecSpeakerLabelModel\nfrom nemo.collections.asr.parts.mixins.mixins import DiarizationMixin\nfrom nemo.collections.asr.parts.utils.speaker_utils import (\n audio_rttm_map,\n get_uniqname_from_filepath,\n perform_clustering,\n score_labels,\n segments_manifest_to_subsegments_manifest,\n write_rttm2manifest,\n)\nfrom nemo.collections.asr.parts.utils.vad_utils import (\n generate_overlap_vad_seq,\n generate_vad_segment_table,\n get_vad_stream_status,\n prepare_manifest,\n)\nfrom nemo.core.classes import Model\nfrom nemo.utils import logging, model_utils\n\ntry:\n from torch.cuda.amp import autocast\nexcept ImportError:\n from contextlib import contextmanager\n\n @contextmanager\n def autocast(enabled=None):\n yield\n\n\n__all__ = ['ClusteringDiarizer']\n\n_MODEL_CONFIG_YAML = \"model_config.yaml\"\n_VAD_MODEL = \"vad_model.nemo\"\n_SPEAKER_MODEL = \"speaker_model.nemo\"\n\n\ndef get_available_model_names(class_name):\n \"lists available pretrained model names from NGC\"\n available_models = class_name.list_available_models()\n return list(map(lambda x: x.pretrained_model_name, available_models))\n\n\nclass ClusteringDiarizer(Model, DiarizationMixin):\n \"\"\"\n Inference model Class for offline speaker diarization. \n This class handles required functionality for diarization : Speech Activity Detection, Segmentation, \n Extract Embeddings, Clustering, Resegmentation and Scoring. \n All the parameters are passed through config file \n \"\"\"\n\n def __init__(self, cfg: DictConfig):\n cfg = model_utils.convert_model_config_to_dict_config(cfg)\n # Convert config to support Hydra 1.0+ instantiation\n cfg = model_utils.maybe_update_config_version(cfg)\n self._cfg = cfg\n\n # Diarizer set up\n self._diarizer_params = self._cfg.diarizer\n\n # init vad model\n self.has_vad_model = False\n if not self._diarizer_params.oracle_vad:\n if self._cfg.diarizer.vad.model_path is not None:\n self._vad_params = self._cfg.diarizer.vad.parameters\n self._init_vad_model()\n\n # init speaker model\n self._init_speaker_model()\n self._speaker_params = self._cfg.diarizer.speaker_embeddings.parameters\n self._speaker_dir = os.path.join(self._diarizer_params.out_dir, 'speaker_outputs')\n shutil.rmtree(self._speaker_dir, ignore_errors=True)\n os.makedirs(self._speaker_dir)\n\n # Clustering params\n self._cluster_params = self._diarizer_params.clustering.parameters\n\n self._device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n @classmethod\n def list_available_models(cls):\n pass\n\n def _init_vad_model(self):\n \"\"\"\n Initialize vad model with model name or path passed through config\n \"\"\"\n model_path = self._cfg.diarizer.vad.model_path\n if model_path.endswith('.nemo'):\n self._vad_model = EncDecClassificationModel.restore_from(model_path)\n logging.info(\"VAD model loaded locally from {}\".format(model_path))\n else:\n if model_path not in get_available_model_names(EncDecClassificationModel):\n logging.warning(\n \"requested {} model name not available in pretrained models, instead\".format(model_path)\n )\n model_path = \"vad_telephony_marblenet\"\n logging.info(\"Loading pretrained {} model from NGC\".format(model_path))\n self._vad_model = EncDecClassificationModel.from_pretrained(model_name=model_path)\n\n self._vad_window_length_in_sec = self._vad_params.window_length_in_sec\n self._vad_shift_length_in_sec = self._vad_params.shift_length_in_sec\n self.has_vad_model = True\n\n def _init_speaker_model(self):\n \"\"\"\n Initialize speaker embedding model with model name or path passed through config\n \"\"\"\n model_path = self._cfg.diarizer.speaker_embeddings.model_path\n if model_path is not None and model_path.endswith('.nemo'):\n self._speaker_model = EncDecSpeakerLabelModel.restore_from(model_path)\n logging.info(\"Speaker Model restored locally from {}\".format(model_path))\n elif model_path.endswith('.ckpt'):\n self._speaker_model = EncDecSpeakerLabelModel.load_from_checkpoint(model_path)\n logging.info(\"Speaker Model restored locally from {}\".format(model_path))\n else:\n if model_path not in get_available_model_names(EncDecSpeakerLabelModel):\n logging.warning(\n \"requested {} model name not available in pretrained models, instead\".format(model_path)\n )\n model_path = \"ecapa_tdnn\"\n logging.info(\"Loading pretrained {} model from NGC\".format(model_path))\n self._speaker_model = EncDecSpeakerLabelModel.from_pretrained(model_name=model_path)\n\n def _setup_vad_test_data(self, manifest_vad_input):\n vad_dl_config = {\n 'manifest_filepath': manifest_vad_input,\n 'sample_rate': self._cfg.sample_rate,\n 'batch_size': self._cfg.get('batch_size'),\n 'vad_stream': True,\n 'labels': ['infer',],\n 'time_length': self._vad_window_length_in_sec,\n 'shift_length': self._vad_shift_length_in_sec,\n 'trim_silence': False,\n 'num_workers': self._cfg.num_workers,\n }\n self._vad_model.setup_test_data(test_data_config=vad_dl_config)\n\n def _setup_spkr_test_data(self, manifest_file):\n spk_dl_config = {\n 'manifest_filepath': manifest_file,\n 'sample_rate': self._cfg.sample_rate,\n 'batch_size': self._cfg.get('batch_size'),\n 'time_length': self._speaker_params.window_length_in_sec,\n 'shift_length': self._speaker_params.shift_length_in_sec,\n 'trim_silence': False,\n 'labels': None,\n 'task': \"diarization\",\n 'num_workers': self._cfg.num_workers,\n }\n self._speaker_model.setup_test_data(spk_dl_config)\n\n def _run_vad(self, manifest_file):\n \"\"\"\n Run voice activity detection. \n Get log probability of voice activity detection and smoothes using the post processing parameters. \n Using generated frame level predictions generated manifest file for later speaker embedding extraction.\n input:\n manifest_file (str) : Manifest file containing path to audio file and label as infer\n\n \"\"\"\n\n shutil.rmtree(self._vad_dir, ignore_errors=True)\n os.makedirs(self._vad_dir)\n\n self._vad_model = self._vad_model.to(self._device)\n self._vad_model.eval()\n\n time_unit = int(self._vad_window_length_in_sec / self._vad_shift_length_in_sec)\n trunc = int(time_unit / 2)\n trunc_l = time_unit - trunc\n all_len = 0\n data = []\n for line in open(manifest_file, 'r'):\n file = json.loads(line)['audio_filepath']\n data.append(get_uniqname_from_filepath(file))\n\n status = get_vad_stream_status(data)\n for i, test_batch in enumerate(tqdm(self._vad_model.test_dataloader())):\n test_batch = [x.to(self._device) for x in test_batch]\n with autocast():\n log_probs = self._vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1])\n probs = torch.softmax(log_probs, dim=-1)\n pred = probs[:, 1]\n if status[i] == 'start':\n to_save = pred[:-trunc]\n elif status[i] == 'next':\n to_save = pred[trunc:-trunc_l]\n elif status[i] == 'end':\n to_save = pred[trunc_l:]\n else:\n to_save = pred\n all_len += len(to_save)\n outpath = os.path.join(self._vad_dir, data[i] + \".frame\")\n with open(outpath, \"a\") as fout:\n for f in range(len(to_save)):\n fout.write('{0:0.4f}\\n'.format(to_save[f]))\n del test_batch\n if status[i] == 'end' or status[i] == 'single':\n all_len = 0\n\n if not self._vad_params.smoothing:\n # Shift the window by 10ms to generate the frame and use the prediction of the window to represent the label for the frame;\n self.vad_pred_dir = self._vad_dir\n else:\n # Generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.\n # smoothing_method would be either in majority vote (median) or average (mean)\n logging.info(\"Generating predictions with overlapping input segments\")\n smoothing_pred_dir = generate_overlap_vad_seq(\n frame_pred_dir=self._vad_dir,\n smoothing_method=self._vad_params.smoothing,\n overlap=self._vad_params.overlap,\n seg_len=self._vad_window_length_in_sec,\n shift_len=self._vad_shift_length_in_sec,\n num_workers=self._cfg.num_workers,\n )\n self.vad_pred_dir = smoothing_pred_dir\n\n logging.info(\"Converting frame level prediction to speech/no-speech segment in start and end times format.\")\n\n table_out_dir = generate_vad_segment_table(\n vad_pred_dir=self.vad_pred_dir,\n postprocessing_params=self._vad_params,\n shift_len=self._vad_shift_length_in_sec,\n num_workers=self._cfg.num_workers,\n )\n AUDIO_VAD_RTTM_MAP = deepcopy(self.AUDIO_RTTM_MAP.copy())\n for key in AUDIO_VAD_RTTM_MAP:\n AUDIO_VAD_RTTM_MAP[key]['rttm_filepath'] = os.path.join(table_out_dir, key + \".txt\")\n\n write_rttm2manifest(AUDIO_VAD_RTTM_MAP, self._vad_out_file)\n self._speaker_manifest_path = self._vad_out_file\n\n def _run_segmentation(self):\n\n self.subsegments_manifest_path = os.path.join(self._speaker_dir, 'subsegments.json')\n self.subsegments_manifest_path = segments_manifest_to_subsegments_manifest(\n segments_manifest_file=self._speaker_manifest_path,\n subsegments_manifest_file=self.subsegments_manifest_path,\n window=self._speaker_params.window_length_in_sec,\n shift=self._speaker_params.shift_length_in_sec,\n )\n\n return None\n\n def _perform_speech_activity_detection(self):\n \"\"\"\n Checks for type of speech activity detection from config. Choices are NeMo VAD,\n external vad manifest and oracle VAD (generates speech activity labels from provided RTTM files)\n \"\"\"\n if self.has_vad_model:\n self._dont_auto_split = False\n self._split_duration = 50\n manifest_vad_input = self._diarizer_params.manifest_filepath\n\n if not self._dont_auto_split:\n logging.info(\"Split long audio file to avoid CUDA memory issue\")\n logging.debug(\"Try smaller split_duration if you still have CUDA memory issue\")\n config = {\n 'manifest_filepath': manifest_vad_input,\n 'time_length': self._vad_window_length_in_sec,\n 'split_duration': self._split_duration,\n 'num_workers': self._cfg.num_workers,\n }\n manifest_vad_input = prepare_manifest(config)\n else:\n logging.warning(\n \"If you encounter CUDA memory issue, try splitting manifest entry by split_duration to avoid it.\"\n )\n\n self._setup_vad_test_data(manifest_vad_input)\n self._run_vad(manifest_vad_input)\n\n elif self._diarizer_params.vad.external_vad_manifest is not None:\n self._speaker_manifest_path = self._diarizer_params.vad.external_vad_manifest\n elif self._diarizer_params.oracle_vad:\n self._speaker_manifest_path = os.path.join(self._speaker_dir, 'oracle_vad_manifest.json')\n self._speaker_manifest_path = write_rttm2manifest(self.AUDIO_RTTM_MAP, self._speaker_manifest_path)\n else:\n raise ValueError(\n \"Only one of diarizer.oracle_vad, vad.model_path or vad.external_vad_manifest must be passed\"\n )\n\n def _extract_embeddings(self, manifest_file):\n \"\"\"\n This method extracts speaker embeddings from segments passed through manifest_file\n Optionally you may save the intermediate speaker embeddings for debugging or any use. \n \"\"\"\n logging.info(\"Extracting embeddings for Diarization\")\n self._setup_spkr_test_data(manifest_file)\n self.embeddings = defaultdict(list)\n self._speaker_model = self._speaker_model.to(self._device)\n self._speaker_model.eval()\n self.time_stamps = {}\n\n all_embs = []\n for test_batch in tqdm(self._speaker_model.test_dataloader()):\n test_batch = [x.to(self._device) for x in test_batch]\n audio_signal, audio_signal_len, labels, slices = test_batch\n with autocast():\n _, embs = self._speaker_model.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n emb_shape = embs.shape[-1]\n embs = embs.view(-1, emb_shape)\n all_embs.extend(embs.cpu().detach().numpy())\n del test_batch\n\n with open(manifest_file, 'r') as manifest:\n for i, line in enumerate(manifest.readlines()):\n line = line.strip()\n dic = json.loads(line)\n uniq_name = get_uniqname_from_filepath(dic['audio_filepath'])\n self.embeddings[uniq_name].extend([all_embs[i]])\n if uniq_name not in self.time_stamps:\n self.time_stamps[uniq_name] = []\n start = dic['offset']\n end = start + dic['duration']\n stamp = '{:.3f} {:.3f} '.format(start, end)\n self.time_stamps[uniq_name].append(stamp)\n\n if self._speaker_params.save_embeddings:\n embedding_dir = os.path.join(self._speaker_dir, 'embeddings')\n if not os.path.exists(embedding_dir):\n os.makedirs(embedding_dir, exist_ok=True)\n\n prefix = get_uniqname_from_filepath(manifest_file)\n\n name = os.path.join(embedding_dir, prefix)\n self._embeddings_file = name + '_embeddings.pkl'\n pkl.dump(self.embeddings, open(self._embeddings_file, 'wb'))\n logging.info(\"Saved embedding files to {}\".format(embedding_dir))\n\n def path2audio_files_to_manifest(self, paths2audio_files, manifest_filepath):\n with open(manifest_filepath, 'w') as fp:\n for audio_file in paths2audio_files:\n audio_file = audio_file.strip()\n entry = {'audio_filepath': audio_file, 'offset': 0.0, 'duration': None, 'text': '-', 'label': 'infer'}\n fp.write(json.dumps(entry) + '\\n')\n\n def diarize(self, paths2audio_files: List[str] = None, batch_size: int = 0):\n \"\"\"\n Diarize files provided thorugh paths2audio_files or manifest file\n input:\n paths2audio_files (List[str]): list of paths to file containing audio file\n batch_size (int): batch_size considered for extraction of speaker embeddings and VAD computation\n \"\"\"\n\n self._out_dir = self._diarizer_params.out_dir\n if not os.path.exists(self._out_dir):\n os.mkdir(self._out_dir)\n\n self._vad_dir = os.path.join(self._out_dir, 'vad_outputs')\n self._vad_out_file = os.path.join(self._vad_dir, \"vad_out.json\")\n\n if batch_size:\n self._cfg.batch_size = batch_size\n\n if paths2audio_files:\n if type(paths2audio_files) is list:\n self._diarizer_params.manifest_filepath = os.path.json(self._out_dir, 'paths2audio_filepath.json')\n self.path2audio_files_to_manifest(paths2audio_files, self._diarizer_params.manifest_filepath)\n else:\n raise ValueError(\"paths2audio_files must be of type list of paths to file containing audio file\")\n\n self.AUDIO_RTTM_MAP = audio_rttm_map(self._diarizer_params.manifest_filepath)\n\n # Speech Activity Detection\n self._perform_speech_activity_detection()\n\n # Segmentation\n self._run_segmentation()\n\n # Embedding Extraction\n self._extract_embeddings(self.subsegments_manifest_path)\n\n out_rttm_dir = os.path.join(self._out_dir, 'pred_rttms')\n os.makedirs(out_rttm_dir, exist_ok=True)\n\n # Clustering\n all_reference, all_hypothesis = perform_clustering(\n embeddings=self.embeddings,\n time_stamps=self.time_stamps,\n AUDIO_RTTM_MAP=self.AUDIO_RTTM_MAP,\n out_rttm_dir=out_rttm_dir,\n clustering_params=self._cluster_params,\n )\n\n # TODO Resegmentation -> Coming Soon\n\n # Scoring\n score = score_labels(\n self.AUDIO_RTTM_MAP,\n all_reference,\n all_hypothesis,\n collar=self._diarizer_params.collar,\n ignore_overlap=self._diarizer_params.ignore_overlap,\n )\n\n logging.info(\"Outputs are saved in {} directory\".format(os.path.abspath(self._diarizer_params.out_dir)))\n return score\n\n @staticmethod\n def __make_nemo_file_from_folder(filename, source_dir):\n with tarfile.open(filename, \"w:gz\") as tar:\n tar.add(source_dir, arcname=\"./\")\n\n @rank_zero_only\n def save_to(self, save_path: str):\n \"\"\"\n Saves model instance (weights and configuration) into EFF archive or .\n You can use \"restore_from\" method to fully restore instance from .nemo file.\n\n .nemo file is an archive (tar.gz) with the following:\n model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor\n model_wights.chpt - model checkpoint\n\n Args:\n save_path: Path to .nemo file where model instance should be saved\n \"\"\"\n\n # TODO: Why does this override the main save_to?\n\n with tempfile.TemporaryDirectory() as tmpdir:\n config_yaml = os.path.join(tmpdir, _MODEL_CONFIG_YAML)\n spkr_model = os.path.join(tmpdir, _SPEAKER_MODEL)\n\n self.to_config_file(path2yaml_file=config_yaml)\n if self.has_vad_model:\n vad_model = os.path.join(tmpdir, _VAD_MODEL)\n self._vad_model.save_to(vad_model)\n self._speaker_model.save_to(spkr_model)\n self.__make_nemo_file_from_folder(filename=save_path, source_dir=tmpdir)\n\n @staticmethod\n def __unpack_nemo_file(path2file: str, out_folder: str) -> str:\n if not os.path.exists(path2file):\n raise FileNotFoundError(f\"{path2file} does not exist\")\n tar = tarfile.open(path2file, \"r:gz\")\n tar.extractall(path=out_folder)\n tar.close()\n return out_folder\n\n @classmethod\n def restore_from(\n cls,\n restore_path: str,\n override_config_path: Optional[str] = None,\n map_location: Optional[torch.device] = None,\n strict: bool = False,\n ):\n # Get path where the command is executed - the artifacts will be \"retrieved\" there\n # (original .nemo behavior)\n cwd = os.getcwd()\n\n with tempfile.TemporaryDirectory() as tmpdir:\n try:\n cls.__unpack_nemo_file(path2file=restore_path, out_folder=tmpdir)\n os.chdir(tmpdir)\n if override_config_path is None:\n config_yaml = os.path.join(tmpdir, _MODEL_CONFIG_YAML)\n else:\n config_yaml = override_config_path\n conf = OmegaConf.load(config_yaml)\n if os.path.exists(os.path.join(tmpdir, _VAD_MODEL)):\n conf.diarizer.vad.model_path = os.path.join(tmpdir, _VAD_MODEL)\n else:\n logging.info(\n f'Model {cls.__name__} does not contain a VAD model. A VAD model or manifest file with'\n f'speech segments need for diarization with this model'\n )\n\n conf.diarizer.speaker_embeddings.model_path = os.path.join(tmpdir, _SPEAKER_MODEL)\n conf.restore_map_location = map_location\n OmegaConf.set_struct(conf, True)\n instance = cls(cfg=conf)\n\n logging.info(f'Model {cls.__name__} was successfully restored from {restore_path}.')\n finally:\n os.chdir(cwd)\n\n return instance\n" ]
[ [ "torch.cuda.is_available", "torch.softmax", "torch.cuda.amp.autocast" ] ]
ZephyrZhuQi/vqa-maskrcnn-benchmark
[ "16f6c7c9a2e75e8877901e17d6536c108b66e694" ]
[ "maskrcnn_benchmark/modeling/detector/generalized_rcnn.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# Modified by Qi Zhu, November 2019\n\"\"\"\nImplements the Generalized R-CNN framework\n\"\"\"\n\nimport torch\nfrom torch import nn\n\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\n\nfrom ..backbone import build_backbone\nfrom ..rpn.rpn import build_rpn\nfrom ..roi_heads.roi_heads import build_roi_heads\n\n\nimport numpy as np\n\n\nclass GeneralizedRCNN(nn.Module):\n \"\"\"\n Main class for Generalized R-CNN. Currently supports boxes and masks.\n It consists of three main parts:\n - backbone\n = rpn\n - heads: takes the features + the proposals from the RPN and computes\n detections / masks from it.\n - disable the functionality of rpn, use ground truth bounding box instead\n \"\"\"\n\n def __init__(self, cfg):\n super(GeneralizedRCNN, self).__init__()\n\n self.backbone = build_backbone(cfg)\n self.rpn = build_rpn(cfg)\n self.roi_heads = build_roi_heads(cfg)\n self.return_feats = cfg.MODEL.ROI_BOX_HEAD.RETURN_FC_FEATS\n\n def forward(self, images, targets=None):\n \"\"\"\n Arguments:\n images (list[Tensor] or ImageList): images to be processed\n targets (list[BoxList]): ground-truth boxes present in the image (optional)\n\n Returns:\n result (list[BoxList] or dict[Tensor]): the output from the model.\n During training, it returns a dict[Tensor] which contains the losses.\n During testing, it returns list[BoxList] contains additional fields\n like `scores`, `labels` and `mask` (for Mask R-CNN models).\n\n \"\"\"\n if self.training and targets is None:\n raise ValueError(\"In training mode, targets should be passed\")\n # images = to_image_list(images)\n features = self.backbone(images.tensors)\n # proposals, proposal_losses = self.rpn(images, features, targets)\n # use gt as proposals instead of rpn\n proposals = []\n for image_index in range(len(images.image_sizes)):\n image_size = images.image_sizes[image_index]\n image_width = image_size[1]\n image_height = image_size[0]\n image_bboxes = images.image_bboxes[image_index]\n # multiply height & width\n image_bboxes = np.asarray(image_bboxes, dtype='float32')\n image_bboxes[:,0] *= image_width\n image_bboxes[:,1] *= image_width\n image_bboxes[:,2] *= image_height\n image_bboxes[:,3] *= image_height\n # xxyy to xyxy\n image_bboxes = image_bboxes[:,[0,2,1,3]]\n b_row = image_bboxes.shape[0]\n b_col = image_bboxes.shape[1]\n pad_col = b_col\n pad_row = b_row if b_row<100 else 100\n bbox_temp = np.zeros((100,4))\n bbox_temp[:pad_row,:pad_col]= image_bboxes[:pad_row,:pad_col] \n bbox_temp = torch.from_numpy(bbox_temp) \n bbox_temp = bbox_temp.cuda()\n #print('bbox', bbox_temp)\n proposal = BoxList(bbox_temp, (image_width,image_height), mode=\"xyxy\")\n proposals.append(proposal)\n\t\t\n \n if self.roi_heads:\n x, result, detector_losses = self.roi_heads(features, proposals, targets)\n else:\n # RPN-only models don't have roi_heads\n x = features\n result = proposals\n detector_losses = {}\n\n if self.training:\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n return losses\n\n if self.return_feats and not self.training:\n #print('result', result[0].bbox)\n return (x, result)\n\n return result\n" ]
[ [ "torch.from_numpy", "numpy.asarray", "numpy.zeros" ] ]
Kin-Zhang/LAV
[ "0a5068c0fad3ecc2f2616801c6d3b00bc0ff03f3" ]
[ "team_code/planner.py" ]
[ "import os\nfrom collections import deque\n\nimport numpy as np\nimport math\n\nclass RoutePlanner(object):\n \n EARTH_RADIUS = 6371e3 # 6371km\n\n def __init__(self, global_plan, curr_threshold=20, next_threshold=75, debug=False):\n self.route = deque()\n self.curr_threshold = curr_threshold\n self.next_threshold = next_threshold\n\n # Convert lat,lon to x,y\n cos_0 = 0.\n for gnss, _ in global_plan:\n cos_0 += gnss['lat'] * (math.pi / 180)\n cos_0 = cos_0 / (len(global_plan))\n self.cos_0 = cos_0\n \n for node in global_plan:\n gnss, cmd = node\n\n x, y = self.latlon_to_xy(gnss['lat'], gnss['lon'])\n self.route.append((x, y))\n\n self.debug = debug\n\n self.current_idx = 0\n self.checkpoint = self.route[0]\n\n def run_step(self, gnss):\n\n x, y = self.latlon_to_xy(gnss[0], gnss[1])\n \n wx, wy = np.array(self.checkpoint)\n curr_distance = np.linalg.norm([wx-x, wy-y])\n\n for i, (wx, wy) in enumerate(self.route):\n \n distance = np.linalg.norm([wx-x, wy-y])\n \n if distance < self.next_threshold and i - self.current_idx==1 and curr_distance < self.curr_threshold:\n self.checkpoint = [wx, wy]\n self.current_idx += 1\n break\n \n return np.array(self.checkpoint) - [x,y]\n\n\n def latlon_to_xy(self, lat, lon):\n\n x = self.EARTH_RADIUS * lat * (math.pi / 180)\n y = self.EARTH_RADIUS * lon * (math.pi / 180) * math.cos(self.cos_0)\n\n return x, y\n" ]
[ [ "numpy.array", "numpy.linalg.norm" ] ]
chengzee/disease_predict
[ "d7a3c57b710ab2e93d56c8d73aeaa21120d3e98c" ]
[ "orchid_DiseasePredict/datasets/Attention_in_several_test/LSTM64_LSTM64_LSTM64_LSTM64_A_LSTM64_LSTM64.py" ]
[ "import pandas as pd\nimport numpy as np\nimport csv\nfrom keras.layers import Dense, Lambda, dot, Activation, concatenate\nfrom keras.layers import Layer\nimport keras.backend as K\n\n# Parameters\n# -------------------------------------------------------------------------------------------------------------------\nbed = [631, 742, 701, 759, 765, 698]\nlookback_days = 3\ndatasInADay = 288\ninput_dim = 3\nsecondsInADay = 60*60*24 \n\n# 定義 attention 機制 (return_sequence=True)\nclass attention(Layer):\n def __init__(self,**kwargs):\n super(attention,self).__init__(**kwargs)\n def build(self,input_shape):\n self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n super(attention, self).build(input_shape)\n def call(self,x):\n et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n at=K.softmax(et)\n at=K.expand_dims(at,axis=-1)\n output=x*at\n return K.sum(output,axis=1, keepdims=True)\n def compute_output_shape(self,input_shape):\n return (input_shape)\n def get_config(self):\n return super(attention,self).get_config()\n\n\n# # 定義 attention 機制 (return_sequence=False)\n# class attention(Layer):\n# def __init__(self,**kwargs):\n# super(attention,self).__init__(**kwargs)\n# def build(self,input_shape):\n# self.W=self.add_weight(name=\"att_weight\",shape=(input_shape[-1],1),initializer=\"normal\")\n# self.b=self.add_weight(name=\"att_bias\",shape=(input_shape[1],1),initializer=\"zeros\") \n# super(attention, self).build(input_shape)\n# def call(self,x):\n# et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)\n# at=K.softmax(et)\n# at=K.expand_dims(at,axis=-1)\n# output=x*at\n# return K.sum(output,axis=1)\n# def compute_output_shape(self,input_shape):\n# return (input_shape[0],input_shape[-1])\n# def get_config(self):\n# return super(attention,self).get_config()\n\n# np.random.seed(1)\n# 讀取 「(統計近期三日)近期死亡csv」\ntargetRecent = pd.read_csv(\"targetRecent.csv\")\n# 轉為 numpy array\ntargetRecent_arr = np.array(targetRecent)\n# print(targetRecent_arr)\n# -------------------------------------------------------------------------------------------------------------------\n# 生成資料集\ndef generator_with_augmentation(inputdata, starttime, lookback, dead_recently, samp_list_1, samp_list_0, targ_list_1, targ_list_0): # 輸入資料 samp_list = []; 輸出結果 targ_list = []\n for i in range(datasInADay):\n rows = np.arange(i+starttime, i+starttime+lookback)\n if np.count_nonzero(inputdata[rows, 4] == 0) <= 316:\n if dead_recently == 1:\n samp_list_1.append(inputdata[rows, 1:4])\n targ_list_1.append(dead_recently)\n if dead_recently == 0:\n samp_list_0.append(inputdata[rows, 1:4])\n targ_list_0.append(dead_recently)\n return samp_list_1, samp_list_0, targ_list_1, targ_list_0\n\nsamples_1 = []\nsamples_0 = []\ntargets_1 = []\ntargets_0 = []\n\n# 測試結果csv建立\nwith open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n writer.writerow([\"第n次,LSTM64_64_64_64_A_LSTM64_64\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n\nfor n in range(len(targetRecent_arr)): # 近期死亡統計數量\n for m in range(len(bed)): # 試驗植床總共六床\n if targetRecent_arr[n, 2] == bed[m]:\n paddeddata_arr = np.array(pd.read_csv(\"addfeature9{}.csv\".format(m+1)))\n # print(\"BedPlant:{}\".format(m+1))\n # ----------------------------------------------------------------------------------------------------------------------------------------\n # 平均值正規化 [-1, 1]\n data_min = np.min(paddeddata_arr[:, 1:4], axis=0)\n data_max = np.max(paddeddata_arr[:, 1:4], axis=0)\n data_mean = np.mean(paddeddata_arr[:, 1:4], axis=0)\n # print(data_min)\n # print(data_max)\n # print(data_mean)\n paddeddata_arr[:, 1:4] = (paddeddata_arr[:, 1:4]-data_mean)/(data_max-data_min)\n # ----------------------------------------------------------------------------------------------------------------------------------------\n where = np.searchsorted(paddeddata_arr[:, 0], targetRecent_arr[n, 0]-secondsInADay*lookback_days) # 604800 是七天的秒數; 432000 是五天的秒數; 259200 是三天的秒數\n # print(\"where:{}\".format(where))\n samples_1, samples_0, targets_1, targets_0 = generator_with_augmentation(paddeddata_arr, starttime=where, lookback=datasInADay*lookback_days, dead_recently=targetRecent_arr[n, 1], samp_list_1=samples_1, samp_list_0=samples_0, targ_list_1=targets_1, targ_list_0=targets_0)\n# 轉為 numpy array\nsamples_1_arr = np.array(samples_1)\nsamples_0_arr = np.array(samples_0)\ntargets_1_arr = np.array(targets_1)\ntargets_0_arr = np.array(targets_0)\nprint(\"samples_1_arr.shape:{}\".format(samples_1_arr.shape))\nprint(\"samples_0_arr.shape:{}\".format(samples_0_arr.shape))\nprint(\"targets_1_arr.shape:{}\".format(targets_1_arr.shape))\nprint(\"targets_0_arr.shape:{}\".format(targets_0_arr.shape))\n\nprint(np.count_nonzero(targets_1_arr==1))\nprint(np.count_nonzero(targets_0_arr==1))\n\n# # -------------------------------------------------------------------------------------------------------------------\n# # # train test split\nx_train_arr = np.concatenate((samples_1_arr[:int(len(samples_1_arr)*0.7)], samples_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\ny_train_arr = np.concatenate((targets_1_arr[:int(len(samples_1_arr)*0.7)], targets_0_arr[:int(len(samples_1_arr)*0.7)]), axis=0)\nx_test_arr = np.concatenate((samples_1_arr[int(len(samples_1_arr)*0.7):], samples_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\ny_test_arr = np.concatenate((targets_1_arr[int(len(samples_1_arr)*0.7):], targets_0_arr[int(len(samples_1_arr)*0.7):]), axis=0)\nprint(\"x_train_arr.shape:{}\".format(x_train_arr.shape))\nprint(\"y_train_arr.shape:{}\".format(y_train_arr.shape))\nprint(\"x_test_arr.shape:{}\".format(x_test_arr.shape))\nprint(\"y_test_arr.shape:{}\".format(y_test_arr.shape))\n\n# -------------------------------------------------------------------------------------------------------------------\n# tf.keras model\nfor t in range(10): # 做幾遍\n # LSTM 模型的訓練與驗證\n from keras.models import Sequential\n from keras import layers\n from keras.optimizers import RMSprop, Adam\n from keras.callbacks import ModelCheckpoint\n model = Sequential()\n model.add(layers.LSTM(64,\n input_shape=(datasInADay*lookback_days, input_dim), # (288*3, 3)\n return_sequences=True,\n # dropout=0.2\n ))\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(attention())\n model.add(layers.LSTM(64,\n return_sequences=True,\n ))\n model.add(layers.LSTM(64,\n return_sequences=False,\n ))\n model.add(layers.Dense(1, activation='sigmoid'))\n model.summary()\n model.compile(optimizer=Adam(),\n loss = 'binary_crossentropy',\n metrics=['accuracy'])\n# -------------------------------------------------------------------------------------------------------------------\n # checkpoint\n filepath=\"weights.best.hdf5\"\n checkpoint = ModelCheckpoint(filepath, \n monitor='val_accuracy', \n verbose=1, \n save_best_only=True,\n mode='max')\n callbacks_list = [checkpoint]\n # fit the model\n history = model.fit(x_train_arr, y_train_arr,\n epochs=200,\n batch_size=256,\n # validation_data=(x_val_arr, y_val_arr),\n validation_split=0.4, \n callbacks=callbacks_list,\n verbose=1)\n model.load_weights(\"weights.best.hdf5\")\n print(\"第{}次結果,選用最好的val_acc來對testSet做預測:\".format(t+1))\n test_score = model.evaluate(x_test_arr, y_test_arr)\n print(\"test_score:{}\".format(test_score))\n # 預測結果\n pred = model.predict(x_test_arr)\n TrueP = 0\n TrueN = 0\n FalseP = 0\n FalseN = 0 \n for pp in range(len(pred)):\n if(pred[pp]>0.5 and y_test_arr[pp]==1):\n TrueP += 1\n if(pred[pp]>0.5 and y_test_arr[pp]==0):\n FalseP += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==1):\n FalseN += 1\n if(pred[pp]<=0.5 and y_test_arr[pp]==0):\n TrueN += 1\n print(\"test數量:{}\".format(len(x_test_arr)))\n print(\"True_Positive:{}\".format(TrueP))\n print(\"True_Nagitive:{}\".format(TrueN))\n print(\"False_Positive:{}\".format(FalseP))\n print(\"False_Nagitive:{}\".format(FalseN))\n precision = TrueP/(TrueP+FalseP)\n recall = TrueP/(TrueP+FalseN)\n print(\"Precision:{}\".format(precision))\n print(\"Recall:{}\".format(recall))\n with open(\"predict_with_attention.csv\", 'a+') as predictcsv:\n writer = csv.writer(predictcsv)\n # writer.writerow([\"第n次\", \"test_acc\", \"True Positive\", \"True Negative\", \"False Positive\", \"False Negative\", \"Precision\", \"Recall\"])\n writer.writerow([t+1, test_score[1], TrueP, TrueN, FalseP, FalseN, precision, recall])" ]
[ [ "numpy.searchsorted", "pandas.read_csv", "numpy.count_nonzero", "numpy.arange", "numpy.max", "numpy.min", "numpy.array", "numpy.mean" ] ]
aemoser/PyWake
[ "889a2c10882195af21339e9bcf2ede0db9b58319" ]
[ "py_wake/utils/area_overlapping_factor.py" ]
[ "from numpy import newaxis as na\n\nimport numpy as np\n\n\nclass AreaOverlappingFactor():\n\n def overlapping_area_factor(self, wake_radius_ijlk, dw_ijlk, cw_ijlk, D_src_il, D_dst_ijl):\n \"\"\"Calculate overlapping factor\n\n Parameters\n ----------\n dw_jl : array_like\n down wind distance [m]\n cw_jl : array_like\n cross wind distance [m]\n D_src_l : array_like\n Diameter of source turbines [m]\n D_dst_jl : array_like or None\n Diameter of destination turbines [m]. If None destination is assumed to be a point\n\n Returns\n -------\n A_ol_factor_jl : array_like\n area overlaping factor\n \"\"\"\n\n if np.all(D_dst_ijl == 0) or D_dst_ijl is None:\n return wake_radius_ijlk > cw_ijlk\n else:\n if wake_radius_ijlk.ndim == 5:\n return self._cal_overlapping_area_factor(\n np.broadcast_to(wake_radius_ijlk, cw_ijlk.shape),\n np.broadcast_to(D_dst_ijl[..., na, na] / 2, cw_ijlk.shape),\n np.abs(cw_ijlk))\n else:\n return self._cal_overlapping_area_factor(wake_radius_ijlk,\n (D_dst_ijl[..., na] / 2),\n np.abs(cw_ijlk))\n\n def _cal_overlapping_area_factor(self, R1, R2, d):\n \"\"\" Calculate the overlapping area of two circles with radius R1 and\n R2, centers distanced d.\n\n The calculation formula can be found in Eq. (A1) of :\n [Ref] Feng J, Shen WZ, Solving the wind farm layout optimization\n problem using Random search algorithm, Renewable Energy 78 (2015)\n 182-192\n Note that however there are typos in Equation (A1), '2' before alpha\n and beta should be 1.\n\n Parameters\n ----------\n R1: array:float\n Radius of the first circle [m]\n\n R2: array:float\n Radius of the second circle [m]\n\n d: array:float\n Distance between two centers [m]\n\n Returns\n -------\n A_ol: array:float\n Overlapping area [m^2]\n \"\"\"\n # treat all input as array\n R1, R2, d = [np.asarray(a) for a in [R1, R2, d]]\n if R2.shape != R1.shape:\n R2 = np.zeros_like(R1) + R2\n if d.shape != R1.shape:\n d = np.zeros_like(R1) + d\n A_ol_f = np.zeros(np.maximum(R1.shape, R2.shape))\n p = (R1 + R2 + d) / 2.0\n\n # make sure R_big >= R_small\n Rmax = np.where(R1 < R2, R2, R1)\n Rmin = np.where(R1 < R2, R1, R2)\n\n # full wake cases\n index_fullwake = (d <= (Rmax - Rmin))\n A_ol_f[index_fullwake] = 1\n\n # partial wake cases\n mask = (d > (Rmax - Rmin)) & (d < (Rmin + Rmax))\n\n # in somecases cos_alpha or cos_beta can be larger than 1 or less than\n # -1.0, cause problem to arccos(), resulting nan values, here fix this\n # issue.\n def arccos_lim(x):\n return np.arccos(np.maximum(np.minimum(x, 1), -1))\n\n alpha = arccos_lim((Rmax[mask]**2.0 + d[mask]**2 - Rmin[mask]**2) /\n (2.0 * Rmax[mask] * d[mask]))\n\n beta = arccos_lim((Rmin[mask]**2.0 + d[mask]**2 - Rmax[mask]**2) /\n (2.0 * Rmin[mask] * d[mask]))\n\n A_triangle = np.sqrt(p[mask] * (p[mask] - Rmin[mask]) *\n (p[mask] - Rmax[mask]) * (p[mask] - d[mask]))\n\n A_ol_f[mask] = (alpha * Rmax[mask]**2 + beta * Rmin[mask]**2 -\n 2.0 * A_triangle) / (R2[mask]**2 * np.pi)\n\n return A_ol_f\n" ]
[ [ "numpy.zeros_like", "numpy.maximum", "numpy.abs", "numpy.asarray", "numpy.all", "numpy.broadcast_to", "numpy.sqrt", "numpy.where", "numpy.minimum" ] ]
keyboardbear/IzunaDSP
[ "2e4a93dc1e09775f23aebec70e2f51f8706a7635" ]
[ "izunadsp/parts/apply_eq.py" ]
[ "# External Libraries\nfrom essentia.standard import FFT, IFFT\nimport numpy as np\n\n# IzunaDSP\nfrom izunadsp import DSPPart, AudioSequence\n\n\nclass ApplyEQ(DSPPart):\n def __init__(self):\n super().__init__()\n self._eq = np.array([1])\n self.eq = [1]\n self.fft = FFT()\n self.ifft = IFFT()\n\n @property\n def eq(self):\n return self._eq\n\n @eq.setter\n def eq(self, value: list):\n group_size = 513 // len(value) + 1\n v = np.array(value).repeat(group_size)\n too_many = len(v) - 513\n for i in range(too_many):\n v = np.delete(v, i * (group_size - 1))\n\n self._eq = v\n\n def set_eq(self, eq: list):\n if not len or len(eq) > 512:\n raise ValueError(\"Expected a list of size 0 < n <= 512\")\n self.eq = eq\n\n def bands_to_eq_size(self, frame: np.array) -> np.array:\n frame *= self.eq\n return frame / 1000\n\n def transform(self, frame: np.ndarray) -> np.ndarray:\n fftified = self.fft(frame.copy())\n eq_applied = self.bands_to_eq_size(fftified)\n return self.ifft(eq_applied)\n\n def handle(self, audio: AudioSequence) -> AudioSequence:\n left, right = audio / 2\n\n new_left = []\n new_right = []\n\n for old, new in zip([left, right], [new_left, new_right]):\n for frame in old:\n new.append(frame.apply(self.transform, seq=True))\n\n return sum(new_left) * sum(new_right)\n" ]
[ [ "numpy.array", "numpy.delete" ] ]
leelabcnbc/keras-vis
[ "0af75c03b8eed2e488c122fcd3f535d27a4ede8f" ]
[ "vis/visualization/__init__.py" ]
[ "from __future__ import absolute_import\n\n\nfrom .activation_maximization import visualize_activation_with_losses\nfrom .activation_maximization import visualize_activation\n\nfrom .saliency import visualize_saliency_with_losses\nfrom .saliency import visualize_saliency\nfrom .saliency import visualize_cam_with_losses\nfrom .saliency import visualize_cam\n\nfrom tensorflow.keras import backend as K\n\n\ndef get_num_filters(layer):\n \"\"\"Determines the number of filters within the given `layer`.\n\n Args:\n layer: The keras layer to use.\n\n Returns:\n Total number of filters within `layer`.\n For `keras.layers.Dense` layer, this is the total number of outputs.\n \"\"\"\n # Handle layers with no channels.\n if K.ndim(layer.output) == 2:\n return K.int_shape(layer.output)[-1]\n\n channel_idx = 1 if K.image_data_format() == 'channels_first' else -1\n return K.int_shape(layer.output)[channel_idx]\n\n\ndef overlay(array1, array2, alpha=0.5):\n \"\"\"Overlays `array1` onto `array2` with `alpha` blending.\n\n Args:\n array1: The first numpy array.\n array2: The second numpy array.\n alpha: The alpha value of `array1` as overlayed onto `array2`. This value needs to be between [0, 1],\n with 0 being `array2` only to 1 being `array1` only (Default value = 0.5).\n\n Returns:\n The `array1`, overlayed with `array2` using `alpha` blending.\n \"\"\"\n if alpha < 0. or alpha > 1.:\n raise ValueError(\"`alpha` needs to be between [0, 1]\")\n if array1.shape != array2.shape:\n raise ValueError('`array1` and `array2` must have the same shapes')\n\n return (array1 * alpha + array2 * (1. - alpha)).astype(array1.dtype)\n" ]
[ [ "tensorflow.keras.backend.int_shape", "tensorflow.keras.backend.ndim", "tensorflow.keras.backend.image_data_format" ] ]
tomspur/scipy
[ "5309706537dbd96e0409f890a20fc6f5badfbac3" ]
[ "scipy/sparse/csc.py" ]
[ "\"\"\"Compressed Sparse Column matrix format\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n__docformat__ = \"restructuredtext en\"\n\n__all__ = ['csc_matrix', 'isspmatrix_csc']\n\nfrom warnings import warn\n\nimport numpy as np\nfrom scipy.lib.six import xrange\n\nfrom .base import isspmatrix\nfrom .sparsetools import csc_tocsr\nfrom . import sparsetools\nfrom .sputils import upcast, isintlike, IndexMixin, get_index_dtype\n\nfrom .compressed import _cs_matrix\n\n\nclass csc_matrix(_cs_matrix, IndexMixin):\n \"\"\"\n Compressed Sparse Column matrix\n\n This can be instantiated in several ways:\n\n csc_matrix(D)\n with a dense matrix or rank-2 ndarray D\n\n csc_matrix(S)\n with another sparse matrix S (equivalent to S.tocsc())\n\n csc_matrix((M, N), [dtype])\n to construct an empty matrix with shape (M, N)\n dtype is optional, defaulting to dtype='d'.\n\n csc_matrix((data, ij), [shape=(M, N)])\n where ``data`` and ``ij`` satisfy the relationship\n ``a[ij[0, k], ij[1, k]] = data[k]``\n\n csc_matrix((data, indices, indptr), [shape=(M, N)])\n is the standard CSC representation where the row indices for\n column i are stored in ``indices[indptr[i]:indptr[i+1]]``\n and their corresponding values are stored in\n ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is\n not supplied, the matrix dimensions are inferred from\n the index arrays.\n\n Attributes\n ----------\n dtype : dtype\n Data type of the matrix\n shape : 2-tuple\n Shape of the matrix\n ndim : int\n Number of dimensions (this is always 2)\n nnz\n Number of nonzero elements\n data\n Data array of the matrix\n indices\n CSC format index array\n indptr\n CSC format index pointer array\n has_sorted_indices\n Whether indices are sorted\n\n Notes\n -----\n\n Sparse matrices can be used in arithmetic operations: they support\n addition, subtraction, multiplication, division, and matrix power.\n\n Advantages of the CSC format\n - efficient arithmetic operations CSC + CSC, CSC * CSC, etc.\n - efficient column slicing\n - fast matrix vector products (CSR, BSR may be faster)\n\n Disadvantages of the CSC format\n - slow row slicing operations (consider CSR)\n - changes to the sparsity structure are expensive (consider LIL or DOK)\n\n\n Examples\n --------\n\n >>> from scipy.sparse import *\n >>> from scipy import *\n >>> csc_matrix( (3,4), dtype=int8 ).todense()\n matrix([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=int8)\n\n >>> row = array([0,2,2,0,1,2])\n >>> col = array([0,0,1,2,2,2])\n >>> data = array([1,2,3,4,5,6])\n >>> csc_matrix( (data,(row,col)), shape=(3,3) ).todense()\n matrix([[1, 0, 4],\n [0, 0, 5],\n [2, 3, 6]])\n\n >>> indptr = array([0,2,3,6])\n >>> indices = array([0,2,2,0,1,2])\n >>> data = array([1,2,3,4,5,6])\n >>> csc_matrix( (data,indices,indptr), shape=(3,3) ).todense()\n matrix([[1, 0, 4],\n [0, 0, 5],\n [2, 3, 6]])\n\n \"\"\"\n\n def transpose(self, copy=False):\n from .csr import csr_matrix\n M,N = self.shape\n return csr_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy)\n\n def __iter__(self):\n csr = self.tocsr()\n for r in xrange(self.shape[0]):\n yield csr[r,:]\n\n def tocsc(self, copy=False):\n if copy:\n return self.copy()\n else:\n return self\n\n def tocsr(self):\n M,N = self.shape\n idx_dtype = get_index_dtype((self.indptr, self.indices),\n maxval=max(self.nnz, N))\n indptr = np.empty(M + 1, dtype=idx_dtype)\n indices = np.empty(self.nnz, dtype=idx_dtype)\n data = np.empty(self.nnz, dtype=upcast(self.dtype))\n\n csc_tocsr(M, N,\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n self.data,\n indptr,\n indices,\n data)\n\n from .csr import csr_matrix\n A = csr_matrix((data, indices, indptr), shape=self.shape)\n A.has_sorted_indices = True\n return A\n\n def __getitem__(self, key):\n # Use CSR to implement fancy indexing.\n\n row, col = self._unpack_index(key)\n # Things that return submatrices. row or col is a int or slice.\n if (isinstance(row, slice) or isinstance(col, slice) or\n isintlike(row) or isintlike(col)):\n return self.T[col, row].T\n # Things that return a sequence of values.\n else:\n return self.T[col, row]\n\n def nonzero(self):\n # CSC can't use _cs_matrix's .nonzero method because it\n # returns the indices sorted for self transposed.\n\n # Get row and col indices, from _cs_matrix.tocoo\n major_dim, minor_dim = self._swap(self.shape)\n minor_indices = self.indices\n major_indices = np.empty(len(minor_indices), dtype=self.indptr.dtype)\n sparsetools.expandptr(major_dim, self.indptr, major_indices)\n row, col = self._swap((major_indices, minor_indices))\n\n # Sort them to be in C-style order\n ind = np.lexsort((col, row))\n row = row[ind]\n col = col[ind]\n\n return row, col\n\n nonzero.__doc__ = _cs_matrix.nonzero.__doc__\n\n def getrow(self, i):\n \"\"\"Returns a copy of row i of the matrix, as a (1 x n)\n CSR matrix (row vector).\n \"\"\"\n # transpose to use CSR code\n # we convert to CSR to maintain compatibility with old impl.\n # in spmatrix.getrow()\n return self.T.getcol(i).T.tocsr()\n\n def getcol(self, i):\n \"\"\"Returns a copy of column i of the matrix, as a (m x 1)\n CSC matrix (column vector).\n \"\"\"\n # transpose to use CSR code\n return self.T.getrow(i).T\n\n # these functions are used by the parent class (_cs_matrix)\n # to remove redudancy between csc_matrix and csr_matrix\n def _swap(self,x):\n \"\"\"swap the members of x if this is a column-oriented matrix\n \"\"\"\n return (x[1],x[0])\n\n\ndef isspmatrix_csc(x):\n return isinstance(x, csc_matrix)\n" ]
[ [ "numpy.lexsort", "scipy.lib.six.xrange", "numpy.empty" ] ]
otanan/STP
[ "85e6f6ff7bfe1bdd832dc5c2f32d0fdb084c55fa" ]
[ "stp/info.py" ]
[ "#!/usr/bin/env python3\n\"\"\"Entropy and information theory related calculations.\n\n**Author: Jonathan Delgado**\n\n\"\"\"\n\n######################## Imports ########################\n\n\nimport numpy as np\n\nimport stp\n\n\n######################## Helper functions ########################\n\n\ndef _eps_filter(x):\n \"\"\" Checks if the value is within machine-epsilon of zero and maps it to \n zero if it is the case. Useful for removing negative values in entropies that should otherwise be zero.\n \n Args:\n x (float): value to be checked.\n \n Returns:\n (float): x if the value is not within machine epsilon of zero, 0 otherwise.\n \n \"\"\"\n return x if not np.isclose(x, 0, atol=9*10E-15) else 0\n \n\n######################## Entropy calculations ########################\n\n\ndef entropy(p):\n \"\"\" Calculates the Shannon entropy for a marginal distribution.\n\n Args:\n p (np.ndarray): the marginal distribution.\n\n Returns:\n (float): the entropy of p\n\n \"\"\"\n # Since zeros do not contribute to the Shannon entropy by definition, we \n # ignore them to avoid any errors/warnings.\n p = p[p != 0]\n\n H = -np.dot(p, np.log(p))\n # Filter against machine epsilon\n return _eps_filter(H)\n\n\ndef delta_entropy(R, p):\n \"\"\" Calculates the discrete time change in entropy using the entropy of p \n evolved with R, minus the entropy of p.\n \n Args:\n R (np.ndarray): the transition matrix.\n\n p (np.ndarray): the marginal distribution.\n \n Returns:\n (float): the change in entropy\n \n \"\"\"\n return entropy(step(R, p)) - entropy(p)\n\n\ndef relative_entropy(p, q):\n \"\"\" Calculates the Kullback-Leibler divergence, which is nonnegative and \n vanishes if and only if the distributions coincide.\n \n Args:\n p, q (np.ndarray): the probability distributions.\n \n Returns:\n (float): the relative entropy.\n \n \"\"\"\n if p.shape[0] != q.shape[0]:\n print('Dimensions of vectors are not equal. Cannot find relative entropy.')\n sys.exit()\n\n # Any values where p is zero are defined to be zero and hence do not\n # contribute to the relative entropy\n # By masking q as well we automatically skip the values that were supposed\n # to vanish with p avoiding any misalignment issues\n # Note that by masking q only where p is zero doesn't remove\n # any mismatching meaning it will still be infinite (as it should be)\n # in the case where q has a zero that p does not.\n p_filtered = p[p != 0]\n log_ratio = np.log(p_filtered / q[p != 0])\n\n return np.dot(p_filtered, log_ratio)\n\n\ndef entropy_production(matrix, p, discrete=True):\n \"\"\" Calculates the entropy production for either discrete or continuous \n time.\n \n Args:\n matrix (np.ndarray): the stochastic matrix, either a discrete time transition matrix or a continuous time rate matrix.\n\n p (np.ndarray): the marginal distribution\n\n Kwargs:\n discrete (bool): True if we are calculating the discrete time entropy production (nats), False if we are calculating it in continuous time (nats/time).\n \n Returns:\n (float/np.inf): the entropy production\n \n \"\"\"\n log_product = matrix * np.log( matrix / matrix.T )\n # The entropy term only exists in the case of discrete time\n # it vanishes when we calculate the continuous time EP,\n # by multiplying by the boolean we include it only when\n # necessary\n EP = np.dot(log_product.sum(axis=0), p) - (entropy(p) * discrete) \\\n - np.dot(stp.step(matrix, p), np.log(p))\n return EP\n\n\ndef entropy_flow(R, p):\n \"\"\" Calculates the discrete time entropy flow. This has not been \n generalized to handle the continuous time entropy flow yet.\n \n Args:\n R (np.ndarray): the discrete time transition matrix\n \n p (np.ndarray): the marginal distribution\n \n Returns:\n (float): the entropy flow\n \n \"\"\"\n # Vectorized calculation\n log_product = R * np.log( R / R.T )\n p_step = step(R, p)\n EF = -np.dot(log_product.sum(axis=0), p) + entropy(p_step) \\\n + np.dot(p_step, np.log(p))\n return EF\n\n\n######################## Entropy rates ########################\n\n\ndef entropy_rate(R):\n \"\"\" Calculates the asymptotic entropy rate for the provided transition \n matrix. If the matrix is time-inhomogeneous then we return a function that generates the entropy_rate as a function of n by calculating the systems limiting distribution for each n.\n \n Args:\n R (np.ndarray/function): the transition matrix.\n \n Returns:\n (float/function): the entropy velocity.\n \n \"\"\"\n if callable(R):\n return lambda n : entropy_rate(R(n))\n\n pst = stp.get_stationary_distribution(R, discrete=True)\n RProduct = (R * np.log(R)).sum(axis=0)\n\n return -np.dot(pst, RProduct)\n\n\n######################## Information Space Objects ########################\n\n\nclass InfoSpace:\n \"\"\" Information space. Holds collections of paths that traverse states in a \n state space as a matrix, and the probability of each of those paths. \n \n Provides functionality on this path space such as providing path entropies.\n \n Attributes:\n paths: the matrix of paths.\n\n probabilities: a list of probabilities each path.\n\n num_paths: the number of paths considered.\n\n path_length: the length of the paths considered.\n\n probabilities: a matrix where the (i,j)th element is the probability of observing the first j states of the ith path.\n\n entropies: a list of path entropies for each path\n\n total_probability: the sum of the probabilities of each path.\n\n \"\"\"\n\n def __init__(self, paths, p_matrix):\n \"\"\" Initializes the InfoSpace object.\n \n Args:\n paths (np.ndarray): a matrix of paths where the (i,j)th element corresponds to the jth symbol of the ith path.\n\n p_matrix (np.ndarray): a matrix of probabilities where the (i,j)th element corresponds to the probability of observing the ith path for the first j+1 (zero-indexing) symbols.\n \n \"\"\"\n self._paths = np.array(paths)\n \n # Matrix of probabilities corresponding to the probability for the path\n # at each moment.\n self._p_matrix = np.array(p_matrix)\n \n if self._p_matrix.size != 0:\n # The information space is not empty\n self._probabilities = self._p_matrix[:, -1]\n else:\n # There is zero probability here.\n self._probabilities = 0\n\n\n #------------- Properties -------------#\n\n\n @property\n def paths(self):\n return self._paths\n\n\n @property\n def num_paths(self):\n return self.paths.shape[0]\n\n\n @property\n def path_length(self):\n return self.paths.shape[1]\n \n\n @property\n def probabilities(self):\n return self._probabilities\n\n\n @property\n def entropies(self):\n \"\"\" Returns a list of path entropies for each corresponding path \n probability.\n\n \"\"\"\n try:\n return self._entropies\n except AttributeError:\n # It's never been calculated before\n self._entropies = -np.log(self.probabilities)\n\n return self._entropies\n\n\n @property\n def total_probability(self):\n try:\n return self.probabilities.sum()\n except AttributeError:\n # Space is empty\n return 0\n \n\n #------------- Static methods -------------#\n\n\n @staticmethod\n def shorten(infospace, path_length, return_index=False):\n \"\"\" Takes an Information Space and shortens it. Since unique paths of \n length n, may be degenerate when truncated to paths of length m < n, we need to check for degeneracies and filter them out in both paths and probabilities.\n \n Args:\n infospace (InfoSpace): the information space to shorten.\n\n path_length (int): the path length the information space should be shortened to.\n\n Kwargs:\n return_index (bool): returns the indices of the non-degenerate paths for the given path length using the original matrix. Useful for filtering other quantities of interest that may not be attached to this object. \n \n Returns:\n (InfoSpace): the shortened InfoSpace.\n \n \"\"\"\n if path_length < 1:\n raise ValueError(f'Invalid path length: {path_length}. Path length must be an integer greater than 0.')\n elif path_length > infospace.path_length:\n raise ValueError(f'Cannot shorten an InformationSpace from length: {infospace.path_length} -> {path_length}.')\n\n if infospace.paths.size == 0:\n # This is an empty information space\n return infospace if not return_index else (infospace, [])\n\n # Truncate the path matrix\n paths = infospace.paths[:, :path_length]\n # Return index will provide the path indices of the non-degenerate paths\n _, indices = np.unique(paths, axis=0, return_index=True) \n # Sort the indices\n indices = sorted(indices)\n # Filter out the paths. Not taken from np.unique to ensure the correct\n # ordering.\n paths = paths[indices, :]\n # Truncate the probability matrix\n p_matrix = infospace._p_matrix[:, :path_length]\n # Filter the probabilities matrix\n p_matrix = p_matrix[indices, :]\n\n infospace = InfoSpace(paths, p_matrix)\n return infospace if not return_index else infospace, indices\n\n\nclass PartitionedInfoSpace(InfoSpace):\n \"\"\" Partitioned Information Space. Constructs a typical set on an \n information space to partition it into a typical information space and an atypical one. \n\n Holds path probabilities, typical paths, atypical paths, atypical path probabilities and more. This object will use a provided (often sampled) path space to partition the space into a collection of typical and atypical paths depending on the dynamics provided. Will also track other quantities of interest such as the upper and lower bounds on the path probabilities required for the paths to be considered typical.\n \n Attributes:\n paths: the matrix of paths.\n\n probabilities: a list of probabilities each path.\n\n num_paths: the number of paths considered.\n\n path_length: the length of the paths considered.\n\n probabilities: a matrix where the (i,j)th element is the probability of observing the first j states of the ith path.\n\n entropies: a list of path entropies for each path.\n\n entropy_rates: a list of the entropy rates for each various path length. This will be the center of the epsilon-neighborhood for path entropies to qualify paths as typical for.\n\n epsilon: the widths of the neighborhood used for paths to be considered typical for each path length.\n\n upper/lower: the upper/lower bounds as measured in nats. This means that a path is typical if and only if its path entropy rate is within these bounds.\n\n typicalities: a matrix where the (i,j)th element is a boolean determining whether the ith path is typical after j+1 steps.\n\n ts: the typical set.\n\n ats: the atypical set.\n\n \"\"\"\n\n def __init__(self, entropy_rates, epsilon, paths=None, p_matrix=None, typical_space=None, atypical_space=None):\n \"\"\" Generates the PartitionedInfoSpace.\n \n Args:\n entropy_rates (np.ndarray): a list of the entropy rates for each various path length. This will be the center of the epsilon-neighborhood for path entropies to qualify paths as typical for.\n\n epsilon (np.ndarray): the widths of the neighborhood used for paths to be considered typical for each path length.\n\n Kwargs:\n paths (np.ndarray/None): the entire sampled path space, the union of the typical and atypical spaces. If not provided these spaces will be merged to generate it.\n\n p_matrix (np.ndarray/None): the entire matrix of probabilities for each path and each path length. If not provided, this will be generated by merging the p_matrix of the typical and atypical spaces.\n\n typical_space (InfoSpace/None): the typical set on this space. If None, partitions the provided path space.\n\n atypical_space (InfoSpace): the atypical set on this space. If None, partitions the provided path space.\n \n \"\"\"\n # Bool if the space simply needs to be partitioned\n must_partition = (paths is None) or (p_matrix is None)\n # Bool if the space simply needs to be merged since it's already been \n # partitioned into a typical and atypical space\n must_union = (typical_space is None) or (atypical_space is None)\n\n if must_partition and must_union:\n # We need either the paths AND the p_matrix or the tupical/atypical \n # spaces to partition/union the spaces respectively.\n raise TypeError('In sufficient information provided to partition/union the Information Space. We need either paths with their probabilities or the already partitioned spaces.')\n\n\n if must_partition:\n # Partition the paths and probability matrix into a typical and\n # atypical space\n\n # Need to generate the upper/lower bounds for the partitioning\n # of the spaces\n self._lower = entropy_rates - epsilon\n self._upper = entropy_rates + epsilon\n\n ts_paths = []; ts_p_matrix = []\n ats_paths = []; ats_p_matrix = []\n\n for path, path_index in enumerate(paths):\n path_prob = p_matrix[path_index]\n # The path entropy rate for direct comparison with the\n # upper/lower bounds\n path_entropy_rate = -np.log(path_prob[-1]) / path_length\n\n is_typical = (\n (self.lower[-1] <= path_entropy_rate)\n and (path_entropy_rate <= self._upper)\n )\n\n if is_typical:\n ts_paths.append(path)\n ts_p_matrix.append(path_prob)\n else:\n ats_paths.append(path)\n ats_p_matrix.append(path_prob)\n\n typical_space = InfoSpace(ts_paths, ts_p_matrix)\n atypical_space = InfoSpace(ats_paths, ats_p_matrix)\n\n elif must_union:\n # Union the path data\n ts_empty = (typical_space.paths.size == 0)\n ats_empty = (atypical_space.paths.size == 0)\n\n if not ts_empty and not ats_empty:\n # Both are nonempty\n paths = np.vstack( (typical_space.paths, atypical_space.paths) )\n p_matrix = np.vstack(\n (typical_space._p_matrix, atypical_space._p_matrix)\n )\n elif ts_empty:\n # Only the typical_space is empty\n paths = atypical_space.paths\n p_matrix = atypical_space._p_matrix\n else:\n # Only the atypical_space is empty\n paths = typical_space.paths\n p_matrix = typical_space._p_matrix\n\n ### Storing properties ###\n self._paths = paths\n self._p_matrix = p_matrix\n\n self._probabilities = self._p_matrix[:, -1]\n\n self._entropy_rates = entropy_rates\n\n # Generalize the epsilon to a path_length dependent epsilon for\n # potential generalizations in child classes.\n if isinstance(epsilon, list):\n epsilon = np.array(epsilon)\n if not isinstance(epsilon, np.ndarray):\n # We were only provided a float\n epsilon = np.full(self.path_length, epsilon)\n self._epsilon = epsilon\n\n self._ts = typical_space\n self._ats = atypical_space\n\n\n #------------- Properties -------------#\n\n\n @property\n def entropy_rates(self):\n return self._entropy_rates\n \n\n @property\n def epsilon(self):\n return self._epsilon\n\n\n @property\n def upper(self):\n try:\n return self._upper\n except AttributeError:\n # It's never been calculated before\n self._upper = self.entropy_rates + self.epsilon \n\n return self._upper\n\n\n @property\n def lower(self):\n try:\n return self._lower\n except AttributeError:\n # It's never been calculated before.\n self._lower = self.entropy_rates - self.epsilon\n\n return self._lower\n\n\n @property\n def typicalities(self):\n \"\"\" Returns the matrix of typicalities. \"\"\"\n try:\n return self._typicalities\n except AttributeError:\n # It's never been calculated before\n typicalities = []\n ns = np.arange(1, self.path_length + 1)\n\n path_entropy_rates = -np.log(self._p_matrix) / ns\n\n self._typicalities = (\n (self.lower <= path_entropy_rates)\n & (path_entropy_rates <= self.upper)\n )\n\n return self._typicalities\n\n\n @property\n def ats(self):\n return self._ats\n\n\n @property\n def ts(self):\n return self._ts\n \n \n #------------- Static methods -------------#\n\n\n @staticmethod\n def shorten(pinfospace, path_length, return_index=False):\n \"\"\" Takes a PartitionedInformationSpace and shortens it. Since unique \n paths of length n, may be degenerate when truncated to paths of length m < n, we need to check for degeneracies and filter them out in both paths and probabilities.\n \n Args:\n pinfospace (PartitionedInfoSpace): the partitioned information space to shorten.\n\n path_length (int): the path length the information space should be shortened to. \n\n Kwargs:\n return_index (bool): returns the indices of the non-degenerate paths for the given path length using the original matrix. Useful for filtering other quantities of interest that may not be attached to this object. \n \n Returns:\n (PartitionedInfoSpace): the shortened PartitionedInfoSpace.\n \n \"\"\"\n # Hold the current information space to access properties\n old_pinfospace = pinfospace\n # Call parent method\n # Paths and p_matrix will be handled here along with any other \n # properties shared with parent. Sorted indices of non-degenerate \n # paths will be calculated here too.\n pinfospace, indices = InfoSpace.shorten(old_pinfospace, path_length, return_index=True)\n \n # Finish the rest of this object's specific properties\n\n # Truncate the entropy_rates\n entropy_rates = old_pinfospace.entropy_rates[:path_length]\n\n # Truncate the epsilon\n epsilon = old_pinfospace.epsilon[:path_length]\n\n # Truncate the typicalities matrix\n # Necessary to re-partition the space.\n # Filter out the typicalities matrix\n typicalities = old_pinfospace.typicalities[indices, :path_length]\n\n ### Partitioning ###\n ts_paths, ts_p_matrix = [], []\n ats_paths, ats_p_matrix = [], []\n\n paths = pinfospace.paths\n p_matrix = pinfospace._p_matrix\n for path_index, is_typical in enumerate(typicalities[:, -1]):\n path = paths[path_index]\n probs = p_matrix[path_index]\n\n if is_typical:\n ts_paths.append(path)\n ts_p_matrix.append(probs)\n else:\n ats_paths.append(path)\n ats_p_matrix.append(probs)\n\n # The partitioned spaces\n ts = InfoSpace(ts_paths, ts_p_matrix)\n ats = InfoSpace(ats_paths, ats_p_matrix)\n\n pinfospace = PartitionedInfoSpace(entropy_rates=entropy_rates, epsilon=epsilon, paths=paths, p_matrix=p_matrix, typical_space=ts, atypical_space=ats)\n \n # Save the pre-generated property\n pinfospace._typicalities = typicalities\n\n return pinfospace if not return_index else pinfospace, indices\n\n\n @staticmethod\n def partition_space(R, p, paths, epsilon=0.5, return_p=False):\n \"\"\" Partitions a path space using the dynamics provided.\n\n Args:\n R (np.ndarray/function): the transition matrix, time-dependent if provided as a function.\n\n p (np.ndarray): the initial marginal distribution.\n\n paths (np.ndarray): the portion of the path space to use.\n \n Kwargs:\n epsilon (float/np.ndarray): the radius/radii of the epsilon neighborhood to consider paths to be typical within.\n\n return_p (bool): False, return only the PartitionedInfoSpace, True returns both the PartitionedInfoSpace and a list of the marginal vs time.\n \n Returns:\n (ParitionedInfoSpace/2-tuple): the PartitionedInfoSpace (PIS) or the PIS and a list of the marginal versus observation step if return_p is True.\n\n \"\"\"\n\n #------------- Data preparation -------------#\n\n # Convert the transition matrix to add time-dependence as a constant \n # matrix if a constant matrix was provided\n if not callable(R):\n # Not being saved as an attribute since this is not easily\n # recoverable by being saved to a file.\n # Emphasize saving properties that can be saved/loaded.\n oldR = R\n R = lambda n : oldR\n\n num_paths, path_length = paths.shape\n\n p_matrix = np.zeros(paths.shape)\n # Initialize the marginal distribution data\n for x, path in enumerate(paths):\n # Just equal to the initial marginal\n p_matrix[x, 0] = p[path[0]]\n \n # Used for the bounds\n entropy_rates = np.array([\n entropy_rate(R(i))\n for i in range(path_length)\n ])\n\n # The marginal versus time\n if return_p: p_vs_time = [p]\n\n #------------- Data gathering -------------#\n\n # bar = gui.ProgressBar(path_length * num_paths, width=300, title='Gathering data...')\n\n ### Quantities versus time ###\n for current_path_length in range(2, path_length + 1):\n # The data index\n i = current_path_length - 1\n # Since the marginals are zero-indexed as are the paths\n step_index = current_path_length - 2\n\n currentR = R(current_path_length - 1)\n # Propagate the marginal one step and save it separately\n # for quantities like the temporal coarse graining term\n pstep = stp.step(currentR, p)\n\n ### Path probability calculations ###\n for x, path in enumerate(paths):\n\n current_state = path[step_index]\n jump_state = path[step_index + 1]\n\n # Forward calculations\n # Recursive calculation to save time\n last_joint = p_matrix[x, i - 1]\n jump_prob = currentR[jump_state, current_state]\n p_matrix[x, i] = last_joint * jump_prob\n\n # If updated in each iteration, slows down the simulation \n # drastically\n # bar.update(amount=num_paths)\n\n if return_p: p_vs_time.append(pstep)\n # Finished data gathering for this iteration, propagate marginal\n # forward in time\n p = pstep\n\n # bar.finish()\n\n ### Partitioning ###\n upper = entropy_rates + epsilon\n lower = entropy_rates - epsilon\n\n ts_paths, ts_p_matrix = [], []\n ats_paths, ats_p_matrix = [], []\n\n path_entropy_rates = -np.log(p_matrix[:, -1]) / path_length\n\n # Identify the paths that are typical and atypical\n for path_index, path_entropy_rate in enumerate(path_entropy_rates):\n # Can't create typicality matrix since partitioning it will\n # break the ordering\n # Determines whether this path is ultimately typical\n is_typical = (\n (lower[-1] <= path_entropy_rate)\n and (path_entropy_rate <= upper[-1])\n )\n\n probs = p_matrix[path_index]\n\n if is_typical:\n ts_paths.append(path)\n ts_p_matrix.append(probs)\n else:\n ats_paths.append(path)\n ats_p_matrix.append(probs)\n\n # The partitioned spaces\n ts = InfoSpace(ts_paths, ts_p_matrix)\n ats = InfoSpace(ats_paths, ats_p_matrix)\n\n pinfospace = PartitionedInfoSpace(\n entropy_rates=entropy_rates,\n epsilon=epsilon,\n paths=paths,\n p_matrix=p_matrix,\n typical_space=ts,\n atypical_space=ats\n )\n\n # Set pre-calculated properties\n pinfospace._upper = upper\n pinfospace._lower = lower\n\n return (pinfospace, p_vs_time) if return_p else pinfospace\n\n\n######################## Entry ########################\n\n\ndef main():\n print('info.py')\n \n ### Testing ###\n p = stp.rand_p(3)\n R = stp.self_assembly_transition_matrix()\n paths = stp.complete_path_space(3, 4)\n pinfospace = PartitionedInfoSpace.partition_space(R, p, paths)\n print( f'pinfospace.total_probability: {pinfospace.total_probability}' )\n print(pinfospace.ats.num_paths)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.vstack", "numpy.zeros", "numpy.isclose", "numpy.arange", "numpy.log", "numpy.array", "numpy.dot", "numpy.full", "numpy.unique" ] ]
I--P/scipy
[ "ad02ef083824d195f04f267b141716e0f047197f" ]
[ "scipy/stats/_distn_infrastructure.py" ]
[ "#\n# Author: Travis Oliphant 2002-2011 with contributions from\n# SciPy Developers 2004-2011\n#\nfrom __future__ import division, print_function, absolute_import\n\nfrom scipy._lib.six import string_types, exec_\nfrom scipy._lib._util import getargspec_no_self as _getargspec\n\nimport sys\nimport keyword\nimport re\nimport types\nimport warnings\n\nfrom scipy.misc import doccer\nfrom ._distr_params import distcont, distdiscrete\nfrom scipy._lib._util import check_random_state, _lazywhere\nfrom scipy._lib._util import _valarray as valarray\n\nfrom scipy.special import (comb, chndtr, gammaln, entr, kl_div, xlogy, ive)\n\n# for root finding for discrete distribution ppf, and max likelihood estimation\nfrom scipy import optimize\n\n# for functions of continuous distributions (e.g. moments, entropy, cdf)\nfrom scipy import integrate\n\n# to approximate the pdf of a continuous distribution given its cdf\nfrom scipy.misc import derivative\n\nfrom numpy import (arange, putmask, ravel, take, ones, shape, ndarray,\n product, reshape, zeros, floor, logical_and, log, sqrt, exp)\n\nfrom numpy import (place, argsort, argmax, vectorize,\n asarray, nan, inf, isinf, NINF, empty)\n\nimport numpy as np\n\nfrom ._constants import _EPS, _XMAX\n\ntry:\n from new import instancemethod\nexcept ImportError:\n # Python 3\n def instancemethod(func, obj, cls):\n return types.MethodType(func, obj)\n\n\n# These are the docstring parts used for substitution in specific\n# distribution docstrings\n\ndocheaders = {'methods': \"\"\"\\nMethods\\n-------\\n\"\"\",\n 'notes': \"\"\"\\nNotes\\n-----\\n\"\"\",\n 'examples': \"\"\"\\nExamples\\n--------\\n\"\"\"}\n\n_doc_rvs = \"\"\"\\\n``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``\n Random variates.\n\"\"\"\n_doc_pdf = \"\"\"\\\n``pdf(x, %(shapes)s, loc=0, scale=1)``\n Probability density function.\n\"\"\"\n_doc_logpdf = \"\"\"\\\n``logpdf(x, %(shapes)s, loc=0, scale=1)``\n Log of the probability density function.\n\"\"\"\n_doc_pmf = \"\"\"\\\n``pmf(x, %(shapes)s, loc=0, scale=1)``\n Probability mass function.\n\"\"\"\n_doc_logpmf = \"\"\"\\\n``logpmf(x, %(shapes)s, loc=0, scale=1)``\n Log of the probability mass function.\n\"\"\"\n_doc_cdf = \"\"\"\\\n``cdf(x, %(shapes)s, loc=0, scale=1)``\n Cumulative density function.\n\"\"\"\n_doc_logcdf = \"\"\"\\\n``logcdf(x, %(shapes)s, loc=0, scale=1)``\n Log of the cumulative density function.\n\"\"\"\n_doc_sf = \"\"\"\\\n``sf(x, %(shapes)s, loc=0, scale=1)``\n Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).\n\"\"\"\n_doc_logsf = \"\"\"\\\n``logsf(x, %(shapes)s, loc=0, scale=1)``\n Log of the survival function.\n\"\"\"\n_doc_ppf = \"\"\"\\\n``ppf(q, %(shapes)s, loc=0, scale=1)``\n Percent point function (inverse of ``cdf`` --- percentiles).\n\"\"\"\n_doc_isf = \"\"\"\\\n``isf(q, %(shapes)s, loc=0, scale=1)``\n Inverse survival function (inverse of ``sf``).\n\"\"\"\n_doc_moment = \"\"\"\\\n``moment(n, %(shapes)s, loc=0, scale=1)``\n Non-central moment of order n\n\"\"\"\n_doc_stats = \"\"\"\\\n``stats(%(shapes)s, loc=0, scale=1, moments='mv')``\n Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').\n\"\"\"\n_doc_entropy = \"\"\"\\\n``entropy(%(shapes)s, loc=0, scale=1)``\n (Differential) entropy of the RV.\n\"\"\"\n_doc_fit = \"\"\"\\\n``fit(data, %(shapes)s, loc=0, scale=1)``\n Parameter estimates for generic data.\n\"\"\"\n_doc_expect = \"\"\"\\\n``expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``\n Expected value of a function (of one argument) with respect to the distribution.\n\"\"\"\n_doc_expect_discrete = \"\"\"\\\n``expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)``\n Expected value of a function (of one argument) with respect to the distribution.\n\"\"\"\n_doc_median = \"\"\"\\\n``median(%(shapes)s, loc=0, scale=1)``\n Median of the distribution.\n\"\"\"\n_doc_mean = \"\"\"\\\n``mean(%(shapes)s, loc=0, scale=1)``\n Mean of the distribution.\n\"\"\"\n_doc_var = \"\"\"\\\n``var(%(shapes)s, loc=0, scale=1)``\n Variance of the distribution.\n\"\"\"\n_doc_std = \"\"\"\\\n``std(%(shapes)s, loc=0, scale=1)``\n Standard deviation of the distribution.\n\"\"\"\n_doc_interval = \"\"\"\\\n``interval(alpha, %(shapes)s, loc=0, scale=1)``\n Endpoints of the range that contains alpha percent of the distribution\n\"\"\"\n_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,\n _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,\n _doc_logsf, _doc_ppf, _doc_isf, _doc_moment,\n _doc_stats, _doc_entropy, _doc_fit,\n _doc_expect, _doc_median,\n _doc_mean, _doc_var, _doc_std, _doc_interval])\n\n_doc_default_longsummary = \"\"\"\\\nAs an instance of the `rv_continuous` class, `%(name)s` object inherits from it\na collection of generic methods (see below for the full list),\nand completes them with details specific for this particular distribution.\n\"\"\"\n\n_doc_default_frozen_note = \"\"\"\nAlternatively, the object may be called (as a function) to fix the shape,\nlocation, and scale parameters returning a \"frozen\" continuous RV object:\n\nrv = %(name)s(%(shapes)s, loc=0, scale=1)\n - Frozen RV object with the same methods but holding the given shape,\n location, and scale fixed.\n\"\"\"\n_doc_default_example = \"\"\"\\\nExamples\n--------\n>>> from scipy.stats import %(name)s\n>>> import matplotlib.pyplot as plt\n>>> fig, ax = plt.subplots(1, 1)\n\nCalculate a few first moments:\n\n%(set_vals_stmt)s\n>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')\n\nDisplay the probability density function (``pdf``):\n\n>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),\n... %(name)s.ppf(0.99, %(shapes)s), 100)\n>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),\n... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')\n\nAlternatively, the distribution object can be called (as a function)\nto fix the shape, location and scale parameters. This returns a \"frozen\"\nRV object holding the given parameters fixed.\n\nFreeze the distribution and display the frozen ``pdf``:\n\n>>> rv = %(name)s(%(shapes)s)\n>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')\n\nCheck accuracy of ``cdf`` and ``ppf``:\n\n>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)\n>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))\nTrue\n\nGenerate random numbers:\n\n>>> r = %(name)s.rvs(%(shapes)s, size=1000)\n\nAnd compare the histogram:\n\n>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)\n>>> ax.legend(loc='best', frameon=False)\n>>> plt.show()\n\n\"\"\"\n\n_doc_default_locscale = \"\"\"\\\nThe probability density above is defined in the \"standardized\" form. To shift\nand/or scale the distribution use the ``loc`` and ``scale`` parameters.\nSpecifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically\nequivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with\n``y = (x - loc) / scale``.\n\"\"\"\n\n_doc_default = ''.join([_doc_default_longsummary,\n _doc_allmethods,\n '\\n',\n _doc_default_example])\n\n_doc_default_before_notes = ''.join([_doc_default_longsummary,\n _doc_allmethods])\n\ndocdict = {\n 'rvs': _doc_rvs,\n 'pdf': _doc_pdf,\n 'logpdf': _doc_logpdf,\n 'cdf': _doc_cdf,\n 'logcdf': _doc_logcdf,\n 'sf': _doc_sf,\n 'logsf': _doc_logsf,\n 'ppf': _doc_ppf,\n 'isf': _doc_isf,\n 'stats': _doc_stats,\n 'entropy': _doc_entropy,\n 'fit': _doc_fit,\n 'moment': _doc_moment,\n 'expect': _doc_expect,\n 'interval': _doc_interval,\n 'mean': _doc_mean,\n 'std': _doc_std,\n 'var': _doc_var,\n 'median': _doc_median,\n 'allmethods': _doc_allmethods,\n 'longsummary': _doc_default_longsummary,\n 'frozennote': _doc_default_frozen_note,\n 'example': _doc_default_example,\n 'default': _doc_default,\n 'before_notes': _doc_default_before_notes,\n 'after_notes': _doc_default_locscale\n}\n\n# Reuse common content between continuous and discrete docs, change some\n# minor bits.\ndocdict_discrete = docdict.copy()\n\ndocdict_discrete['pmf'] = _doc_pmf\ndocdict_discrete['logpmf'] = _doc_logpmf\ndocdict_discrete['expect'] = _doc_expect_discrete\n_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',\n 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',\n 'mean', 'var', 'std', 'interval']\nfor obj in _doc_disc_methods:\n docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')\ndocdict_discrete.pop('pdf')\ndocdict_discrete.pop('logpdf')\n\n_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])\ndocdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods\n\ndocdict_discrete['longsummary'] = _doc_default_longsummary.replace(\n 'rv_continuous', 'rv_discrete')\n\n_doc_default_frozen_note = \"\"\"\nAlternatively, the object may be called (as a function) to fix the shape and\nlocation parameters returning a \"frozen\" discrete RV object:\n\nrv = %(name)s(%(shapes)s, loc=0)\n - Frozen RV object with the same methods but holding the given shape and\n location fixed.\n\"\"\"\ndocdict_discrete['frozennote'] = _doc_default_frozen_note\n\n_doc_default_discrete_example = \"\"\"\\\nExamples\n--------\n>>> from scipy.stats import %(name)s\n>>> import matplotlib.pyplot as plt\n>>> fig, ax = plt.subplots(1, 1)\n\nCalculate a few first moments:\n\n%(set_vals_stmt)s\n>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')\n\nDisplay the probability mass function (``pmf``):\n\n>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),\n... %(name)s.ppf(0.99, %(shapes)s))\n>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')\n>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)\n\nAlternatively, the distribution object can be called (as a function)\nto fix the shape and location. This returns a \"frozen\" RV object holding\nthe given parameters fixed.\n\nFreeze the distribution and display the frozen ``pmf``:\n\n>>> rv = %(name)s(%(shapes)s)\n>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,\n... label='frozen pmf')\n>>> ax.legend(loc='best', frameon=False)\n>>> plt.show()\n\nCheck accuracy of ``cdf`` and ``ppf``:\n\n>>> prob = %(name)s.cdf(x, %(shapes)s)\n>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))\nTrue\n\nGenerate random numbers:\n\n>>> r = %(name)s.rvs(%(shapes)s, size=1000)\n\"\"\"\n\n\n_doc_default_discrete_locscale = \"\"\"\\\nThe probability mass function above is defined in the \"standardized\" form.\nTo shift distribution use the ``loc`` parameter.\nSpecifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically\nequivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.\n\"\"\"\n\ndocdict_discrete['example'] = _doc_default_discrete_example\ndocdict_discrete['after_notes'] = _doc_default_discrete_locscale\n\n_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],\n docdict_discrete['allmethods']])\ndocdict_discrete['before_notes'] = _doc_default_before_notes\n\n_doc_default_disc = ''.join([docdict_discrete['longsummary'],\n docdict_discrete['allmethods'],\n docdict_discrete['frozennote'],\n docdict_discrete['example']])\ndocdict_discrete['default'] = _doc_default_disc\n\n# clean up all the separate docstring elements, we do not need them anymore\nfor obj in [s for s in dir() if s.startswith('_doc_')]:\n exec('del ' + obj)\ndel obj\ntry:\n del s\nexcept NameError:\n # in Python 3, loop variables are not visible after the loop\n pass\n\n\ndef _moment(data, n, mu=None):\n if mu is None:\n mu = data.mean()\n return ((data - mu)**n).mean()\n\n\ndef _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):\n if (n == 0):\n return 1.0\n elif (n == 1):\n if mu is None:\n val = moment_func(1, *args)\n else:\n val = mu\n elif (n == 2):\n if mu2 is None or mu is None:\n val = moment_func(2, *args)\n else:\n val = mu2 + mu*mu\n elif (n == 3):\n if g1 is None or mu2 is None or mu is None:\n val = moment_func(3, *args)\n else:\n mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment\n val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment\n elif (n == 4):\n if g1 is None or g2 is None or mu2 is None or mu is None:\n val = moment_func(4, *args)\n else:\n mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment\n mu3 = g1*np.power(mu2, 1.5) # 3rd central moment\n val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu\n else:\n val = moment_func(n, *args)\n\n return val\n\n\ndef _skew(data):\n \"\"\"\n skew is third central moment / variance**(1.5)\n \"\"\"\n data = np.ravel(data)\n mu = data.mean()\n m2 = ((data - mu)**2).mean()\n m3 = ((data - mu)**3).mean()\n return m3 / np.power(m2, 1.5)\n\n\ndef _kurtosis(data):\n \"\"\"\n kurtosis is fourth central moment / variance**2 - 3\n \"\"\"\n data = np.ravel(data)\n mu = data.mean()\n m2 = ((data - mu)**2).mean()\n m4 = ((data - mu)**4).mean()\n return m4 / m2**2 - 3\n\n\n# Frozen RV class\nclass rv_frozen(object):\n\n def __init__(self, dist, *args, **kwds):\n self.args = args\n self.kwds = kwds\n\n # create a new instance\n self.dist = dist.__class__(**dist._updated_ctor_param())\n\n # a, b may be set in _argcheck, depending on *args, **kwds. Ouch.\n shapes, _, _ = self.dist._parse_args(*args, **kwds)\n self.dist._argcheck(*shapes)\n self.a, self.b = self.dist.a, self.dist.b\n\n @property\n def random_state(self):\n return self.dist._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self.dist._random_state = check_random_state(seed)\n\n def pdf(self, x): # raises AttributeError in frozen discrete distribution\n return self.dist.pdf(x, *self.args, **self.kwds)\n\n def logpdf(self, x):\n return self.dist.logpdf(x, *self.args, **self.kwds)\n\n def cdf(self, x):\n return self.dist.cdf(x, *self.args, **self.kwds)\n\n def logcdf(self, x):\n return self.dist.logcdf(x, *self.args, **self.kwds)\n\n def ppf(self, q):\n return self.dist.ppf(q, *self.args, **self.kwds)\n\n def isf(self, q):\n return self.dist.isf(q, *self.args, **self.kwds)\n\n def rvs(self, size=None, random_state=None):\n kwds = self.kwds.copy()\n kwds.update({'size': size, 'random_state': random_state})\n return self.dist.rvs(*self.args, **kwds)\n\n def sf(self, x):\n return self.dist.sf(x, *self.args, **self.kwds)\n\n def logsf(self, x):\n return self.dist.logsf(x, *self.args, **self.kwds)\n\n def stats(self, moments='mv'):\n kwds = self.kwds.copy()\n kwds.update({'moments': moments})\n return self.dist.stats(*self.args, **kwds)\n\n def median(self):\n return self.dist.median(*self.args, **self.kwds)\n\n def mean(self):\n return self.dist.mean(*self.args, **self.kwds)\n\n def var(self):\n return self.dist.var(*self.args, **self.kwds)\n\n def std(self):\n return self.dist.std(*self.args, **self.kwds)\n\n def moment(self, n):\n return self.dist.moment(n, *self.args, **self.kwds)\n\n def entropy(self):\n return self.dist.entropy(*self.args, **self.kwds)\n\n def pmf(self, k):\n return self.dist.pmf(k, *self.args, **self.kwds)\n\n def logpmf(self, k):\n return self.dist.logpmf(k, *self.args, **self.kwds)\n\n def interval(self, alpha):\n return self.dist.interval(alpha, *self.args, **self.kwds)\n\n def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):\n # expect method only accepts shape parameters as positional args\n # hence convert self.args, self.kwds, also loc/scale\n # See the .expect method docstrings for the meaning of\n # other parameters.\n a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)\n if isinstance(self.dist, rv_discrete):\n return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)\n else:\n return self.dist.expect(func, a, loc, scale, lb, ub,\n conditional, **kwds)\n\n\n# This should be rewritten\ndef argsreduce(cond, *args):\n \"\"\"Return the sequence of ravel(args[i]) where ravel(condition) is\n True in 1D.\n\n Examples\n --------\n >>> import numpy as np\n >>> rand = np.random.random_sample\n >>> A = rand((4, 5))\n >>> B = 2\n >>> C = rand((1, 5))\n >>> cond = np.ones(A.shape)\n >>> [A1, B1, C1] = argsreduce(cond, A, B, C)\n >>> B1.shape\n (20,)\n >>> cond[2,:] = 0\n >>> [A2, B2, C2] = argsreduce(cond, A, B, C)\n >>> B2.shape\n (15,)\n\n \"\"\"\n newargs = np.atleast_1d(*args)\n if not isinstance(newargs, list):\n newargs = [newargs, ]\n expand_arr = (cond == cond)\n return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]\n\n\nparse_arg_template = \"\"\"\ndef _parse_args(self, %(shape_arg_str)s %(locscale_in)s):\n return (%(shape_arg_str)s), %(locscale_out)s\n\ndef _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):\n return (%(shape_arg_str)s), %(locscale_out)s, size\n\ndef _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):\n return (%(shape_arg_str)s), %(locscale_out)s, moments\n\"\"\"\n\n\n# Both the continuous and discrete distributions depend on ncx2.\n# I think the function name ncx2 is an abbreviation for noncentral chi squared.\n\ndef _ncx2_log_pdf(x, df, nc):\n # We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor\n # of exp(-xs*ns) into the ive function to improve numerical stability\n # at large values of xs. See also `rice.pdf`.\n df2 = df/2.0 - 1.0\n xs, ns = np.sqrt(x), np.sqrt(nc)\n res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2\n res += np.log(ive(df2, xs*ns) / 2.0)\n return res\n\n\ndef _ncx2_pdf(x, df, nc):\n return np.exp(_ncx2_log_pdf(x, df, nc))\n\n\ndef _ncx2_cdf(x, df, nc):\n return chndtr(x, df, nc)\n\n\nclass rv_generic(object):\n \"\"\"Class which encapsulates common functionality between rv_discrete\n and rv_continuous.\n\n \"\"\"\n def __init__(self, seed=None):\n super(rv_generic, self).__init__()\n\n # figure out if _stats signature has 'moments' keyword\n sign = _getargspec(self._stats)\n self._stats_has_moments = ((sign[2] is not None) or\n ('moments' in sign[0]))\n self._random_state = check_random_state(seed)\n\n @property\n def random_state(self):\n \"\"\" Get or set the RandomState object for generating random variates.\n\n This can be either None or an existing RandomState object.\n\n If None (or np.random), use the RandomState singleton used by np.random.\n If already a RandomState instance, use it.\n If an int, use a new RandomState instance seeded with seed.\n\n \"\"\"\n return self._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self._random_state = check_random_state(seed)\n\n def __getstate__(self):\n return self._updated_ctor_param(), self._random_state\n\n def __setstate__(self, state):\n ctor_param, r = state\n self.__init__(**ctor_param)\n self._random_state = r\n return self\n\n def _construct_argparser(\n self, meths_to_inspect, locscale_in, locscale_out):\n \"\"\"Construct the parser for the shape arguments.\n\n Generates the argument-parsing functions dynamically and attaches\n them to the instance.\n Is supposed to be called in __init__ of a class for each distribution.\n\n If self.shapes is a non-empty string, interprets it as a\n comma-separated list of shape parameters.\n\n Otherwise inspects the call signatures of `meths_to_inspect`\n and constructs the argument-parsing functions from these.\n In this case also sets `shapes` and `numargs`.\n \"\"\"\n\n if self.shapes:\n # sanitize the user-supplied shapes\n if not isinstance(self.shapes, string_types):\n raise TypeError('shapes must be a string.')\n\n shapes = self.shapes.replace(',', ' ').split()\n\n for field in shapes:\n if keyword.iskeyword(field):\n raise SyntaxError('keywords cannot be used as shapes.')\n if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):\n raise SyntaxError(\n 'shapes must be valid python identifiers')\n else:\n # find out the call signatures (_pdf, _cdf etc), deduce shape\n # arguments. Generic methods only have 'self, x', any further args\n # are shapes.\n shapes_list = []\n for meth in meths_to_inspect:\n shapes_args = _getargspec(meth) # NB: does not contain self\n args = shapes_args.args[1:] # peel off 'x', too\n\n if args:\n shapes_list.append(args)\n\n # *args or **kwargs are not allowed w/automatic shapes\n if shapes_args.varargs is not None:\n raise TypeError(\n '*args are not allowed w/out explicit shapes')\n if shapes_args.keywords is not None:\n raise TypeError(\n '**kwds are not allowed w/out explicit shapes')\n if shapes_args.defaults is not None:\n raise TypeError('defaults are not allowed for shapes')\n\n if shapes_list:\n shapes = shapes_list[0]\n\n # make sure the signatures are consistent\n for item in shapes_list:\n if item != shapes:\n raise TypeError('Shape arguments are inconsistent.')\n else:\n shapes = []\n\n # have the arguments, construct the method from template\n shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None\n dct = dict(shape_arg_str=shapes_str,\n locscale_in=locscale_in,\n locscale_out=locscale_out,\n )\n ns = {}\n exec_(parse_arg_template % dct, ns)\n # NB: attach to the instance, not class\n for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:\n setattr(self, name,\n instancemethod(ns[name], self, self.__class__)\n )\n\n self.shapes = ', '.join(shapes) if shapes else None\n if not hasattr(self, 'numargs'):\n # allows more general subclassing with *args\n self.numargs = len(shapes)\n\n def _construct_doc(self, docdict, shapes_vals=None):\n \"\"\"Construct the instance docstring with string substitutions.\"\"\"\n tempdict = docdict.copy()\n tempdict['name'] = self.name or 'distname'\n tempdict['shapes'] = self.shapes or ''\n\n if shapes_vals is None:\n shapes_vals = ()\n vals = ', '.join('%.3g' % val for val in shapes_vals)\n tempdict['vals'] = vals\n\n tempdict['shapes_'] = self.shapes or ''\n if self.shapes and self.numargs == 1:\n tempdict['shapes_'] += ','\n\n if self.shapes:\n tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)\n else:\n tempdict['set_vals_stmt'] = ''\n\n if self.shapes is None:\n # remove shapes from call parameters if there are none\n for item in ['default', 'before_notes']:\n tempdict[item] = tempdict[item].replace(\n \"\\n%(shapes)s : array_like\\n shape parameters\", \"\")\n for i in range(2):\n if self.shapes is None:\n # necessary because we use %(shapes)s in two forms (w w/o \", \")\n self.__doc__ = self.__doc__.replace(\"%(shapes)s, \", \"\")\n self.__doc__ = doccer.docformat(self.__doc__, tempdict)\n\n # correct for empty shapes\n self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')\n\n def _construct_default_doc(self, longname=None, extradoc=None,\n docdict=None, discrete='continuous'):\n \"\"\"Construct instance docstring from the default template.\"\"\"\n if longname is None:\n longname = 'A'\n if extradoc is None:\n extradoc = ''\n if extradoc.startswith('\\n\\n'):\n extradoc = extradoc[2:]\n self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),\n '\\n\\n%(before_notes)s\\n', docheaders['notes'],\n extradoc, '\\n%(example)s'])\n self._construct_doc(docdict)\n\n def freeze(self, *args, **kwds):\n \"\"\"Freeze the distribution for the given arguments.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution. Should include all\n the non-optional arguments, may include ``loc`` and ``scale``.\n\n Returns\n -------\n rv_frozen : rv_frozen instance\n The frozen distribution.\n\n \"\"\"\n return rv_frozen(self, *args, **kwds)\n\n def __call__(self, *args, **kwds):\n return self.freeze(*args, **kwds)\n __call__.__doc__ = freeze.__doc__\n\n # The actual calculation functions (no basic checking need be done)\n # If these are defined, the others won't be looked at.\n # Otherwise, the other set can be defined.\n def _stats(self, *args, **kwds):\n return None, None, None, None\n\n # Central moments\n def _munp(self, n, *args):\n # Silence floating point warnings from integration.\n olderr = np.seterr(all='ignore')\n vals = self.generic_moment(n, *args)\n np.seterr(**olderr)\n return vals\n\n ## These are the methods you must define (standard form functions)\n ## NB: generic _pdf, _logpdf, _cdf are different for\n ## rv_continuous and rv_discrete hence are defined in there\n def _argcheck(self, *args):\n \"\"\"Default check for correct values on args and keywords.\n\n Returns condition array of 1's where arguments are correct and\n 0's where they are not.\n\n \"\"\"\n cond = 1\n for arg in args:\n cond = logical_and(cond, (asarray(arg) > 0))\n return cond\n\n ##(return 1-d using self._size to get number)\n def _rvs(self, *args):\n ## Use basic inverse cdf algorithm for RV generation as default.\n U = self._random_state.random_sample(self._size)\n Y = self._ppf(U, *args)\n return Y\n\n def _logcdf(self, x, *args):\n return log(self._cdf(x, *args))\n\n def _sf(self, x, *args):\n return 1.0-self._cdf(x, *args)\n\n def _logsf(self, x, *args):\n return log(self._sf(x, *args))\n\n def _ppf(self, q, *args):\n return self._ppfvec(q, *args)\n\n def _isf(self, q, *args):\n return self._ppf(1.0-q, *args) # use correct _ppf for subclasses\n\n # These are actually called, and should not be overwritten if you\n # want to keep error checking.\n def rvs(self, *args, **kwds):\n \"\"\"\n Random variates of given type.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n scale : array_like, optional\n Scale parameter (default=1).\n size : int or tuple of ints, optional\n Defining number of random variates (default is 1).\n random_state : None or int or ``np.random.RandomState`` instance, optional\n If int or RandomState, use it for drawing the random variates.\n If None, rely on ``self.random_state``.\n Default is None.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of given `size`.\n\n \"\"\"\n discrete = kwds.pop('discrete', None)\n rndm = kwds.pop('random_state', None)\n args, loc, scale, size = self._parse_args_rvs(*args, **kwds)\n cond = logical_and(self._argcheck(*args), (scale >= 0))\n if not np.all(cond):\n raise ValueError(\"Domain error in arguments.\")\n\n # self._size is total size of all output values\n self._size = product(size, axis=0)\n if self._size is not None and self._size > 1:\n size = np.array(size, ndmin=1)\n\n if np.all(scale == 0):\n return loc*ones(size, 'd')\n\n # extra gymnastics needed for a custom random_state\n if rndm is not None:\n random_state_saved = self._random_state\n self._random_state = check_random_state(rndm)\n\n vals = self._rvs(*args)\n if self._size is not None:\n vals = reshape(vals, size)\n\n vals = vals * scale + loc\n\n # do not forget to restore the _random_state\n if rndm is not None:\n self._random_state = random_state_saved\n\n # Cast to int if discrete\n if discrete:\n if np.isscalar(vals):\n vals = int(vals)\n else:\n vals = vals.astype(int)\n\n return vals\n\n def stats(self, *args, **kwds):\n \"\"\"\n Some statistics of the given RV.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional (continuous RVs only)\n scale parameter (default=1)\n moments : str, optional\n composed of letters ['mvsk'] defining which moments to compute:\n 'm' = mean,\n 'v' = variance,\n 's' = (Fisher's) skew,\n 'k' = (Fisher's) kurtosis.\n (default is 'mv')\n\n Returns\n -------\n stats : sequence\n of requested moments.\n\n \"\"\"\n args, loc, scale, moments = self._parse_args_stats(*args, **kwds)\n # scale = 1 by construction for discrete RVs\n loc, scale = map(asarray, (loc, scale))\n args = tuple(map(asarray, args))\n cond = self._argcheck(*args) & (scale > 0) & (loc == loc)\n output = []\n default = valarray(shape(cond), self.badvalue)\n\n # Use only entries that are valid in calculation\n if np.any(cond):\n goodargs = argsreduce(cond, *(args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n\n if self._stats_has_moments:\n mu, mu2, g1, g2 = self._stats(*goodargs,\n **{'moments': moments})\n else:\n mu, mu2, g1, g2 = self._stats(*goodargs)\n if g1 is None:\n mu3 = None\n else:\n if mu2 is None:\n mu2 = self._munp(2, *goodargs)\n if g2 is None:\n # (mu2**1.5) breaks down for nan and inf\n mu3 = g1 * np.power(mu2, 1.5)\n\n if 'm' in moments:\n if mu is None:\n mu = self._munp(1, *goodargs)\n out0 = default.copy()\n place(out0, cond, mu * scale + loc)\n output.append(out0)\n\n if 'v' in moments:\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n mu2 = mu2p - mu * mu\n if np.isinf(mu):\n #if mean is inf then var is also inf\n mu2 = np.inf\n out0 = default.copy()\n place(out0, cond, mu2 * scale * scale)\n output.append(out0)\n\n if 's' in moments:\n if g1 is None:\n mu3p = self._munp(3, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n mu2 = mu2p - mu * mu\n mu3 = mu3p - 3 * mu * mu2 - mu**3\n g1 = mu3 / np.power(mu2, 1.5)\n out0 = default.copy()\n place(out0, cond, g1)\n output.append(out0)\n\n if 'k' in moments:\n if g2 is None:\n mu4p = self._munp(4, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n mu2 = mu2p - mu * mu\n if mu3 is None:\n mu3p = self._munp(3, *goodargs)\n mu3 = mu3p - 3 * mu * mu2 - mu**3\n mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4\n g2 = mu4 / mu2**2.0 - 3.0\n out0 = default.copy()\n place(out0, cond, g2)\n output.append(out0)\n else: # no valid args\n output = []\n for _ in moments:\n out0 = default.copy()\n output.append(out0)\n\n if len(output) == 1:\n return output[0]\n else:\n return tuple(output)\n\n def entropy(self, *args, **kwds):\n \"\"\"\n Differential entropy of the RV.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n scale : array_like, optional (continuous distributions only).\n Scale parameter (default=1).\n\n Notes\n -----\n Entropy is defined base `e`:\n\n >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))\n >>> np.allclose(drv.entropy(), np.log(2.0))\n True\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n # NB: for discrete distributions scale=1 by construction in _parse_args\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n output = zeros(shape(cond0), 'd')\n place(output, (1-cond0), self.badvalue)\n goodargs = argsreduce(cond0, *args)\n # np.vectorize doesn't work when numargs == 0 in numpy 1.6.2. Once the\n # lowest supported numpy version is >= 1.7.0, this special case can be\n # removed (see gh-4314).\n if self.numargs == 0:\n place(output, cond0, self._entropy() + log(scale))\n else:\n place(output, cond0, self.vecentropy(*goodargs) + log(scale))\n return output\n\n def moment(self, n, *args, **kwds):\n \"\"\"\n n-th order non-central moment of distribution.\n\n Parameters\n ----------\n n : int, n >= 1\n Order of moment.\n arg1, arg2, arg3,... : float\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n if not (self._argcheck(*args) and (scale > 0)):\n return nan\n if (floor(n) != n):\n raise ValueError(\"Moment must be an integer.\")\n if (n < 0):\n raise ValueError(\"Moment must be positive.\")\n mu, mu2, g1, g2 = None, None, None, None\n if (n > 0) and (n < 5):\n if self._stats_has_moments:\n mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}\n else:\n mdict = {}\n mu, mu2, g1, g2 = self._stats(*args, **mdict)\n val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)\n\n # Convert to transformed X = L + S*Y\n # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)\n if loc == 0:\n return scale**n * val\n else:\n result = 0\n fac = float(scale) / float(loc)\n for k in range(n):\n valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)\n result += comb(n, k, exact=True)*(fac**k) * valk\n result += fac**n * val\n return result * loc**n\n\n def median(self, *args, **kwds):\n \"\"\"\n Median of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n Location parameter, Default is 0.\n scale : array_like, optional\n Scale parameter, Default is 1.\n\n Returns\n -------\n median : float\n The median of the distribution.\n\n See Also\n --------\n stats.distributions.rv_discrete.ppf\n Inverse of the CDF\n\n \"\"\"\n return self.ppf(0.5, *args, **kwds)\n\n def mean(self, *args, **kwds):\n \"\"\"\n Mean of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n mean : float\n the mean of the distribution\n\n \"\"\"\n kwds['moments'] = 'm'\n res = self.stats(*args, **kwds)\n if isinstance(res, ndarray) and res.ndim == 0:\n return res[()]\n return res\n\n def var(self, *args, **kwds):\n \"\"\"\n Variance of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n var : float\n the variance of the distribution\n\n \"\"\"\n kwds['moments'] = 'v'\n res = self.stats(*args, **kwds)\n if isinstance(res, ndarray) and res.ndim == 0:\n return res[()]\n return res\n\n def std(self, *args, **kwds):\n \"\"\"\n Standard deviation of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n std : float\n standard deviation of the distribution\n\n \"\"\"\n kwds['moments'] = 'v'\n res = sqrt(self.stats(*args, **kwds))\n return res\n\n def interval(self, alpha, *args, **kwds):\n \"\"\"\n Confidence interval with equal areas around the median.\n\n Parameters\n ----------\n alpha : array_like of float\n Probability that an rv will be drawn from the returned range.\n Each value should be in the range [0, 1].\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter, Default is 0.\n scale : array_like, optional\n scale parameter, Default is 1.\n\n Returns\n -------\n a, b : ndarray of float\n end-points of range that contain ``100 * alpha %`` of the rv's\n possible values.\n\n \"\"\"\n alpha = asarray(alpha)\n if np.any((alpha > 1) | (alpha < 0)):\n raise ValueError(\"alpha must be between 0 and 1 inclusive\")\n q1 = (1.0-alpha)/2\n q2 = (1.0+alpha)/2\n a = self.ppf(q1, *args, **kwds)\n b = self.ppf(q2, *args, **kwds)\n return a, b\n\n\n## continuous random variables: implement maybe later\n##\n## hf --- Hazard Function (PDF / SF)\n## chf --- Cumulative hazard function (-log(SF))\n## psf --- Probability sparsity function (reciprocal of the pdf) in\n## units of percent-point-function (as a function of q).\n## Also, the derivative of the percent-point function.\n\nclass rv_continuous(rv_generic):\n \"\"\"\n A generic continuous random variable class meant for subclassing.\n\n `rv_continuous` is a base class to construct specific distribution classes\n and instances for continuous random variables. It cannot be used\n directly as a distribution.\n\n Parameters\n ----------\n momtype : int, optional\n The type of generic moment calculation to use: 0 for pdf, 1 (default)\n for ppf.\n a : float, optional\n Lower bound of the support of the distribution, default is minus\n infinity.\n b : float, optional\n Upper bound of the support of the distribution, default is plus\n infinity.\n xtol : float, optional\n The tolerance for fixed point calculation for generic ppf.\n badvalue : float, optional\n The value in a result arrays that indicates a value that for which\n some argument restriction is violated, default is np.nan.\n name : str, optional\n The name of the instance. This string is used to construct the default\n example for distributions.\n longname : str, optional\n This string is used as part of the first line of the docstring returned\n when a subclass has no docstring of its own. Note: `longname` exists\n for backwards compatibility, do not use for new subclasses.\n shapes : str, optional\n The shape of the distribution. For example ``\"m, n\"`` for a\n distribution that takes two integers as the two shape arguments for all\n its methods. If not provided, shape parameters will be inferred from\n the signature of the private methods, ``_pdf`` and ``_cdf`` of the\n instance.\n extradoc : str, optional, deprecated\n This string is used as the last part of the docstring returned when a\n subclass has no docstring of its own. Note: `extradoc` exists for\n backwards compatibility, do not use for new subclasses.\n seed : None or int or ``numpy.random.RandomState`` instance, optional\n This parameter defines the RandomState object to use for drawing\n random variates.\n If None (or np.random), the global np.random state is used.\n If integer, it is used to seed the local RandomState instance.\n Default is None.\n\n Methods\n -------\n rvs\n pdf\n logpdf\n cdf\n logcdf\n sf\n logsf\n ppf\n isf\n moment\n stats\n entropy\n expect\n median\n mean\n std\n var\n interval\n __call__\n fit\n fit_loc_scale\n nnlf\n\n Notes\n -----\n Public methods of an instance of a distribution class (e.g., ``pdf``,\n ``cdf``) check their arguments and pass valid arguments to private,\n computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid\n if it is within the support of a distribution, ``self.a <= x <= self.b``.\n Whether a shape parameter is valid is decided by an ``_argcheck`` method\n (which defaults to checking that its arguments are strictly positive.)\n\n **Subclassing**\n\n New random variables can be defined by subclassing the `rv_continuous` class\n and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized\n to location 0 and scale 1).\n\n If positive argument checking is not correct for your RV\n then you will also need to re-define the ``_argcheck`` method.\n\n Correct, but potentially slow defaults exist for the remaining\n methods but for speed and/or accuracy you can over-ride::\n\n _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf\n\n Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.\n\n **Methods that can be overwritten by subclasses**\n ::\n\n _rvs\n _pdf\n _cdf\n _sf\n _ppf\n _isf\n _stats\n _munp\n _entropy\n _argcheck\n\n There are additional (internal and private) generic methods that can\n be useful for cross-checking and for debugging, but might work in all\n cases when directly called.\n\n A note on ``shapes``: subclasses need not specify them explicitly. In this\n case, `shapes` will be automatically deduced from the signatures of the\n overridden methods (`pdf`, `cdf` etc).\n If, for some reason, you prefer to avoid relying on introspection, you can\n specify ``shapes`` explicitly as an argument to the instance constructor.\n\n\n **Frozen Distributions**\n\n Normally, you must provide shape parameters (and, optionally, location and\n scale parameters to each call of a method of a distribution.\n\n Alternatively, the object may be called (as a function) to fix the shape,\n location, and scale parameters returning a \"frozen\" continuous RV object:\n\n rv = generic(<shape(s)>, loc=0, scale=1)\n frozen RV object with the same methods but holding the given shape,\n location, and scale fixed\n\n **Statistics**\n\n Statistics are computed using numerical integration by default.\n For speed you can redefine this using ``_stats``:\n\n - take shape parameters and return mu, mu2, g1, g2\n - If you can't compute one of these, return it as None\n - Can also be defined with a keyword argument ``moments``, which is a\n string composed of \"m\", \"v\", \"s\", and/or \"k\".\n Only the components appearing in string should be computed and\n returned in the order \"m\", \"v\", \"s\", or \"k\" with missing values\n returned as None.\n\n Alternatively, you can override ``_munp``, which takes ``n`` and shape\n parameters and returns the n-th non-central moment of the distribution.\n\n Examples\n --------\n To create a new Gaussian distribution, we would do the following:\n\n >>> from scipy.stats import rv_continuous\n >>> class gaussian_gen(rv_continuous):\n ... \"Gaussian distribution\"\n ... def _pdf(self, x):\n ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)\n >>> gaussian = gaussian_gen(name='gaussian')\n\n ``scipy.stats`` distributions are *instances*, so here we subclass\n `rv_continuous` and create an instance. With this, we now have\n a fully functional distribution with all relevant methods automagically\n generated by the framework.\n\n Note that above we defined a standard normal distribution, with zero mean\n and unit variance. Shifting and scaling of the distribution can be done\n by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``\n essentially computes ``y = (x - loc) / scale`` and\n ``gaussian._pdf(y) / scale``.\n\n \"\"\"\n def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,\n badvalue=None, name=None, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_continuous, self).__init__(seed)\n\n # save the ctor parameters, cf generic freeze\n self._ctor_param = dict(\n momtype=momtype, a=a, b=b, xtol=xtol,\n badvalue=badvalue, name=name, longname=longname,\n shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n if name is None:\n name = 'Distribution'\n self.badvalue = badvalue\n self.name = name\n self.a = a\n self.b = b\n if a is None:\n self.a = -inf\n if b is None:\n self.b = inf\n self.xtol = xtol\n self._size = 1\n self.moment_type = momtype\n self.shapes = shapes\n self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],\n locscale_in='loc=0, scale=1',\n locscale_out='loc, scale')\n\n # nin correction\n self._ppfvec = vectorize(self._ppf_single, otypes='d')\n self._ppfvec.nin = self.numargs + 1\n self.vecentropy = vectorize(self._entropy, otypes='d')\n self._cdfvec = vectorize(self._cdf_single, otypes='d')\n self._cdfvec.nin = self.numargs + 1\n\n self.extradoc = extradoc\n if momtype == 0:\n self.generic_moment = vectorize(self._mom0_sc, otypes='d')\n else:\n self.generic_moment = vectorize(self._mom1_sc, otypes='d')\n # Because of the *args argument of _mom0_sc, vectorize cannot count the\n # number of arguments correctly.\n self.generic_moment.nin = self.numargs + 1\n\n if longname is None:\n if name[0] in ['aeiouAEIOU']:\n hstr = \"An \"\n else:\n hstr = \"A \"\n longname = hstr + name\n\n if sys.flags.optimize < 2:\n # Skip adding docstrings if interpreter is run with -OO\n if self.__doc__ is None:\n self._construct_default_doc(longname=longname,\n extradoc=extradoc,\n docdict=docdict,\n discrete='continuous')\n else:\n dct = dict(distcont)\n self._construct_doc(docdict, dct.get(self.name))\n\n def _updated_ctor_param(self):\n \"\"\" Return the current version of _ctor_param, possibly updated by user.\n\n Used by freezing and pickling.\n Keep this in sync with the signature of __init__.\n \"\"\"\n dct = self._ctor_param.copy()\n dct['a'] = self.a\n dct['b'] = self.b\n dct['xtol'] = self.xtol\n dct['badvalue'] = self.badvalue\n dct['name'] = self.name\n dct['shapes'] = self.shapes\n dct['extradoc'] = self.extradoc\n return dct\n\n def _ppf_to_solve(self, x, q, *args):\n return self.cdf(*(x, )+args)-q\n\n def _ppf_single(self, q, *args):\n left = right = None\n if self.a > -np.inf:\n left = self.a\n if self.b < np.inf:\n right = self.b\n\n factor = 10.\n if not left: # i.e. self.a = -inf\n left = -1.*factor\n while self._ppf_to_solve(left, q, *args) > 0.:\n right = left\n left *= factor\n # left is now such that cdf(left) < q\n if not right: # i.e. self.b = inf\n right = factor\n while self._ppf_to_solve(right, q, *args) < 0.:\n left = right\n right *= factor\n # right is now such that cdf(right) > q\n\n return optimize.brentq(self._ppf_to_solve,\n left, right, args=(q,)+args, xtol=self.xtol)\n\n # moment from definition\n def _mom_integ0(self, x, m, *args):\n return x**m * self.pdf(x, *args)\n\n def _mom0_sc(self, m, *args):\n return integrate.quad(self._mom_integ0, self.a, self.b,\n args=(m,)+args)[0]\n\n # moment calculated using ppf\n def _mom_integ1(self, q, m, *args):\n return (self.ppf(q, *args))**m\n\n def _mom1_sc(self, m, *args):\n return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]\n\n def _pdf(self, x, *args):\n return derivative(self._cdf, x, dx=1e-5, args=args, order=5)\n\n ## Could also define any of these\n def _logpdf(self, x, *args):\n return log(self._pdf(x, *args))\n\n def _cdf_single(self, x, *args):\n return integrate.quad(self._pdf, self.a, x, args=args)[0]\n\n def _cdf(self, x, *args):\n return self._cdfvec(x, *args)\n\n ## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined\n ## in rv_generic\n\n def pdf(self, x, *args, **kwds):\n \"\"\"\n Probability density function at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n pdf : ndarray\n Probability density function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n putmask(output, (1-cond0)+np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args+(scale,)))\n scale, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._pdf(*goodargs) / scale)\n if output.ndim == 0:\n return output[()]\n return output\n\n def logpdf(self, x, *args, **kwds):\n \"\"\"\n Log of the probability density function at x of the given RV.\n\n This uses a more numerically accurate calculation if available.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logpdf : array_like\n Log of the probability density function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n putmask(output, (1-cond0)+np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args+(scale,)))\n scale, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._logpdf(*goodargs) - log(scale))\n if output.ndim == 0:\n return output[()]\n return output\n\n def cdf(self, x, *args, **kwds):\n \"\"\"\n Cumulative distribution function of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n cdf : ndarray\n Cumulative distribution function evaluated at `x`\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x > self.a) & (x < self.b)\n cond2 = (x >= self.b) & cond0\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._cdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logcdf(self, x, *args, **kwds):\n \"\"\"\n Log of the cumulative distribution function at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logcdf : array_like\n Log of the cumulative distribution function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x > self.a) & (x < self.b)\n cond2 = (x >= self.b) & cond0\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._logcdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def sf(self, x, *args, **kwds):\n \"\"\"\n Survival function (1 - `cdf`) at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n sf : array_like\n Survival function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x > self.a) & (x < self.b)\n cond2 = cond0 & (x <= self.a)\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._sf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logsf(self, x, *args, **kwds):\n \"\"\"\n Log of the survival function of the given RV.\n\n Returns the log of the \"survival function,\" defined as (1 - `cdf`),\n evaluated at `x`.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logsf : ndarray\n Log of the survival function evaluated at `x`.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = (scale > 0) & (x > self.a) & (x < self.b)\n cond2 = cond0 & (x <= self.a)\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._logsf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def ppf(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of `cdf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n lower tail probability\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : array_like\n quantile corresponding to the lower tail probability q.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n q, loc, scale = map(asarray, (q, loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n cond1 = (0 < q) & (q < 1)\n cond2 = cond0 & (q == 0)\n cond3 = cond0 & (q == 1)\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue)\n\n lower_bound = self.a * scale + loc\n upper_bound = self.b * scale + loc\n place(output, cond2, argsreduce(cond2, lower_bound)[0])\n place(output, cond3, argsreduce(cond3, upper_bound)[0])\n\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n place(output, cond, self._ppf(*goodargs) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output\n\n def isf(self, q, *args, **kwds):\n \"\"\"\n Inverse survival function (inverse of `sf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n upper tail probability\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : ndarray or scalar\n Quantile corresponding to the upper tail probability q.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n q, loc, scale = map(asarray, (q, loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n cond1 = (0 < q) & (q < 1)\n cond2 = cond0 & (q == 1)\n cond3 = cond0 & (q == 0)\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue)\n\n lower_bound = self.a * scale + loc\n upper_bound = self.b * scale + loc\n place(output, cond2, argsreduce(cond2, lower_bound)[0])\n place(output, cond3, argsreduce(cond3, upper_bound)[0])\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n place(output, cond, self._isf(*goodargs) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output\n\n def _nnlf(self, x, *args):\n return -np.sum(self._logpdf(x, *args), axis=0)\n\n def nnlf(self, theta, x):\n '''Return negative loglikelihood function.\n\n Notes\n -----\n This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the\n parameters (including loc and scale).\n '''\n try:\n loc = theta[-2]\n scale = theta[-1]\n args = tuple(theta[:-2])\n except IndexError:\n raise ValueError(\"Not enough input arguments.\")\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = asarray((x-loc) / scale)\n cond0 = (x <= self.a) | (self.b <= x)\n if np.any(cond0):\n return inf\n else:\n N = len(x)\n return self._nnlf(x, *args) + N * log(scale)\n\n def _penalized_nnlf(self, theta, x):\n ''' Return negative loglikelihood function,\n i.e., - sum (log pdf(x, theta), axis=0)\n where theta are the parameters (including loc and scale)\n '''\n try:\n loc = theta[-2]\n scale = theta[-1]\n args = tuple(theta[:-2])\n except IndexError:\n raise ValueError(\"Not enough input arguments.\")\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = asarray((x-loc) / scale)\n\n loginf = log(_XMAX)\n\n if np.isneginf(self.a).all() and np.isinf(self.b).all():\n Nbad = 0\n else:\n cond0 = (x <= self.a) | (self.b <= x)\n Nbad = np.sum(cond0)\n if Nbad > 0:\n x = argsreduce(~cond0, x)[0]\n\n N = len(x)\n return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf\n\n # return starting point for fit (shape arguments + loc + scale)\n def _fitstart(self, data, args=None):\n if args is None:\n args = (1.0,)*self.numargs\n loc, scale = self._fit_loc_scale_support(data, *args)\n return args + (loc, scale)\n\n # Return the (possibly reduced) function to optimize in order to find MLE\n # estimates for the .fit method\n def _reduce_func(self, args, kwds):\n # First of all, convert fshapes params to fnum: eg for stats.beta,\n # shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.\n # Convert the latter into the former.\n if self.shapes:\n shapes = self.shapes.replace(',', ' ').split()\n for j, s in enumerate(shapes):\n val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)\n if val is not None:\n key = 'f%d' % j\n if key in kwds:\n raise ValueError(\"Duplicate entry for %s.\" % key)\n else:\n kwds[key] = val\n\n args = list(args)\n Nargs = len(args)\n fixedn = []\n names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']\n x0 = []\n for n, key in enumerate(names):\n if key in kwds:\n fixedn.append(n)\n args[n] = kwds.pop(key)\n else:\n x0.append(args[n])\n\n if len(fixedn) == 0:\n func = self._penalized_nnlf\n restore = None\n else:\n if len(fixedn) == Nargs:\n raise ValueError(\n \"All parameters fixed. There is nothing to optimize.\")\n\n def restore(args, theta):\n # Replace with theta for all numbers not in fixedn\n # This allows the non-fixed values to vary, but\n # we still call self.nnlf with all parameters.\n i = 0\n for n in range(Nargs):\n if n not in fixedn:\n args[n] = theta[i]\n i += 1\n return args\n\n def func(theta, x):\n newtheta = restore(args[:], theta)\n return self._penalized_nnlf(newtheta, x)\n\n return x0, func, restore, args\n\n def fit(self, data, *args, **kwds):\n \"\"\"\n Return MLEs for shape, location, and scale parameters from data.\n\n MLE stands for Maximum Likelihood Estimate. Starting estimates for\n the fit are given by input arguments; for any arguments not provided\n with starting estimates, ``self._fitstart(data)`` is called to generate\n such.\n\n One can hold some parameters fixed to specific values by passing in\n keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)\n and ``floc`` and ``fscale`` (for location and scale parameters,\n respectively).\n\n Parameters\n ----------\n data : array_like\n Data to use in calculating the MLEs.\n args : floats, optional\n Starting value(s) for any shape-characterizing arguments (those not\n provided will be determined by a call to ``_fitstart(data)``).\n No default value.\n kwds : floats, optional\n Starting values for the location and scale parameters; no default.\n Special keyword arguments are recognized as holding certain\n parameters fixed:\n\n - f0...fn : hold respective shape parameters fixed.\n Alternatively, shape parameters to fix can be specified by name.\n For example, if ``self.shapes == \"a, b\"``, ``fa``and ``fix_a``\n are equivalent to ``f0``, and ``fb`` and ``fix_b`` are\n equivalent to ``f1``.\n\n - floc : hold location parameter fixed to specified value.\n\n - fscale : hold scale parameter fixed to specified value.\n\n - optimizer : The optimizer to use. The optimizer must take ``func``,\n and starting position as the first two arguments,\n plus ``args`` (for extra arguments to pass to the\n function to be optimized) and ``disp=0`` to suppress\n output as keyword arguments.\n\n Returns\n -------\n shape, loc, scale : tuple of floats\n MLEs for any shape statistics, followed by those for location and\n scale.\n\n Notes\n -----\n This fit is computed by maximizing a log-likelihood function, with\n penalty applied for samples outside of range of the distribution. The\n returned answer is not guaranteed to be the globally optimal MLE, it\n may only be locally optimal, or the optimization may fail altogether.\n\n\n Examples\n --------\n\n Generate some data to fit: draw random variates from the `beta`\n distribution\n\n >>> from scipy.stats import beta\n >>> a, b = 1., 2.\n >>> x = beta.rvs(a, b, size=1000)\n\n Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):\n\n >>> a1, b1, loc1, scale1 = beta.fit(x)\n\n We can also use some prior knowledge about the dataset: let's keep\n ``loc`` and ``scale`` fixed:\n\n >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)\n >>> loc1, scale1\n (0, 1)\n\n We can also keep shape parameters fixed by using ``f``-keywords. To\n keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,\n equivalently, ``fa=1``:\n\n >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)\n >>> a1\n 1\n\n \"\"\"\n Narg = len(args)\n if Narg > self.numargs:\n raise TypeError(\"Too many input arguments.\")\n\n start = [None]*2\n if (Narg < self.numargs) or not ('loc' in kwds and\n 'scale' in kwds):\n # get distribution specific starting locations\n start = self._fitstart(data)\n args += start[Narg:-2]\n loc = kwds.pop('loc', start[-2])\n scale = kwds.pop('scale', start[-1])\n args += (loc, scale)\n x0, func, restore, args = self._reduce_func(args, kwds)\n\n optimizer = kwds.pop('optimizer', optimize.fmin)\n # convert string to function in scipy.optimize\n if not callable(optimizer) and isinstance(optimizer, string_types):\n if not optimizer.startswith('fmin_'):\n optimizer = \"fmin_\"+optimizer\n if optimizer == 'fmin_':\n optimizer = 'fmin'\n try:\n optimizer = getattr(optimize, optimizer)\n except AttributeError:\n raise ValueError(\"%s is not a valid optimizer\" % optimizer)\n\n # by now kwds must be empty, since everybody took what they needed\n if kwds:\n raise TypeError(\"Unknown arguments: %s.\" % kwds)\n\n vals = optimizer(func, x0, args=(ravel(data),), disp=0)\n if restore is not None:\n vals = restore(args, vals)\n vals = tuple(vals)\n return vals\n\n def _fit_loc_scale_support(self, data, *args):\n \"\"\"\n Estimate loc and scale parameters from data accounting for support.\n\n Parameters\n ----------\n data : array_like\n Data to fit.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n\n Returns\n -------\n Lhat : float\n Estimated location parameter for the data.\n Shat : float\n Estimated scale parameter for the data.\n\n \"\"\"\n data = np.asarray(data)\n\n # Estimate location and scale according to the method of moments.\n loc_hat, scale_hat = self.fit_loc_scale(data, *args)\n\n # Compute the support according to the shape parameters.\n self._argcheck(*args)\n a, b = self.a, self.b\n support_width = b - a\n\n # If the support is empty then return the moment-based estimates.\n if support_width <= 0:\n return loc_hat, scale_hat\n\n # Compute the proposed support according to the loc and scale estimates.\n a_hat = loc_hat + a * scale_hat\n b_hat = loc_hat + b * scale_hat\n\n # Use the moment-based estimates if they are compatible with the data.\n data_a = np.min(data)\n data_b = np.max(data)\n if a_hat < data_a and data_b < b_hat:\n return loc_hat, scale_hat\n\n # Otherwise find other estimates that are compatible with the data.\n data_width = data_b - data_a\n rel_margin = 0.1\n margin = data_width * rel_margin\n\n # For a finite interval, both the location and scale\n # should have interesting values.\n if support_width < np.inf:\n loc_hat = (data_a - a) - margin\n scale_hat = (data_width + 2 * margin) / support_width\n return loc_hat, scale_hat\n\n # For a one-sided interval, use only an interesting location parameter.\n if a > -np.inf:\n return (data_a - a) - margin, 1\n elif b < np.inf:\n return (data_b - b) + margin, 1\n else:\n raise RuntimeError\n\n def fit_loc_scale(self, data, *args):\n \"\"\"\n Estimate loc and scale parameters from data using 1st and 2nd moments.\n\n Parameters\n ----------\n data : array_like\n Data to fit.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n\n Returns\n -------\n Lhat : float\n Estimated location parameter for the data.\n Shat : float\n Estimated scale parameter for the data.\n\n \"\"\"\n mu, mu2 = self.stats(*args, **{'moments': 'mv'})\n tmp = asarray(data)\n muhat = tmp.mean()\n mu2hat = tmp.var()\n Shat = sqrt(mu2hat / mu2)\n Lhat = muhat - Shat*mu\n if not np.isfinite(Lhat):\n Lhat = 0\n if not (np.isfinite(Shat) and (0 < Shat)):\n Shat = 1\n return Lhat, Shat\n\n def _entropy(self, *args):\n def integ(x):\n val = self._pdf(x, *args)\n return entr(val)\n\n # upper limit is often inf, so suppress warnings when integrating\n olderr = np.seterr(over='ignore')\n h = integrate.quad(integ, self.a, self.b)[0]\n np.seterr(**olderr)\n\n if not np.isnan(h):\n return h\n else:\n # try with different limits if integration problems\n low, upp = self.ppf([1e-10, 1. - 1e-10], *args)\n if np.isinf(self.b):\n upper = upp\n else:\n upper = self.b\n if np.isinf(self.a):\n lower = low\n else:\n lower = self.a\n return integrate.quad(integ, lower, upper)[0]\n\n def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,\n conditional=False, **kwds):\n \"\"\"Calculate expected value of a function with respect to the\n distribution.\n\n The expected value of a function ``f(x)`` with respect to a\n distribution ``dist`` is defined as::\n\n ubound\n E[x] = Integral(f(x) * dist.pdf(x))\n lbound\n\n Parameters\n ----------\n func : callable, optional\n Function for which integral is calculated. Takes only one argument.\n The default is the identity mapping f(x) = x.\n args : tuple, optional\n Shape parameters of the distribution.\n loc : float, optional\n Location parameter (default=0).\n scale : float, optional\n Scale parameter (default=1).\n lb, ub : scalar, optional\n Lower and upper bound for integration. Default is set to the\n support of the distribution.\n conditional : bool, optional\n If True, the integral is corrected by the conditional probability\n of the integration interval. The return value is the expectation\n of the function, conditional on being in the given interval.\n Default is False.\n\n Additional keyword arguments are passed to the integration routine.\n\n Returns\n -------\n expect : float\n The calculated expected value.\n\n Notes\n -----\n The integration behavior of this function is inherited from\n `integrate.quad`.\n\n \"\"\"\n lockwds = {'loc': loc,\n 'scale': scale}\n self._argcheck(*args)\n if func is None:\n def fun(x, *args):\n return x * self.pdf(x, *args, **lockwds)\n else:\n def fun(x, *args):\n return func(x) * self.pdf(x, *args, **lockwds)\n if lb is None:\n lb = loc + self.a * scale\n if ub is None:\n ub = loc + self.b * scale\n if conditional:\n invfac = (self.sf(lb, *args, **lockwds)\n - self.sf(ub, *args, **lockwds))\n else:\n invfac = 1.0\n kwds['args'] = args\n # Silence floating point warnings from integration.\n olderr = np.seterr(all='ignore')\n vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac\n np.seterr(**olderr)\n return vals\n\n\n## Handlers for generic case where xk and pk are given\n## The _drv prefix probably means discrete random variable.\n\ndef _drv_pmf(self, xk, *args):\n try:\n return self.P[xk]\n except KeyError:\n return 0.0\n\n\ndef _drv_cdf(self, xk, *args):\n indx = argmax((self.xk > xk), axis=-1)-1\n return self.F[self.xk[indx]]\n\n\ndef _drv_ppf(self, q, *args):\n indx = argmax((self.qvals >= q), axis=-1)\n return self.Finv[self.qvals[indx]]\n\n\ndef _drv_nonzero(self, k, *args):\n return 1\n\n\ndef _drv_moment(self, n, *args):\n n = asarray(n)\n return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)\n\n\ndef _drv_moment_gen(self, t, *args):\n t = asarray(t)\n return np.sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)\n\n\ndef _drv2_moment(self, n, *args):\n \"\"\"Non-central moment of discrete distribution.\"\"\"\n def fun(x):\n return np.power(x, n) * self._pmf(x, *args)\n return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc)\n\n\ndef _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm\n b = self.b\n a = self.a\n if isinf(b): # Be sure ending point is > q\n b = int(max(100*q, 10))\n while 1:\n if b >= self.b:\n qb = 1.0\n break\n qb = self._cdf(b, *args)\n if (qb < q):\n b += 10\n else:\n break\n else:\n qb = 1.0\n if isinf(a): # be sure starting point < q\n a = int(min(-100*q, -10))\n while 1:\n if a <= self.a:\n qb = 0.0\n break\n qa = self._cdf(a, *args)\n if (qa > q):\n a -= 10\n else:\n break\n else:\n qa = self._cdf(a, *args)\n\n while 1:\n if (qa == q):\n return a\n if (qb == q):\n return b\n if b <= a+1:\n # testcase: return wrong number at lower index\n # python -c \"from scipy.stats import zipf;print zipf.ppf(0.01, 2)\" wrong\n # python -c \"from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)\"\n # python -c \"from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)\"\n if qa > q:\n return a\n else:\n return b\n c = int((a+b)/2.0)\n qc = self._cdf(c, *args)\n if (qc < q):\n if a != c:\n a = c\n else:\n raise RuntimeError('updating stopped, endless loop')\n qa = qc\n elif (qc > q):\n if b != c:\n b = c\n else:\n raise RuntimeError('updating stopped, endless loop')\n qb = qc\n else:\n return c\n\n\ndef entropy(pk, qk=None, base=None):\n \"\"\"Calculate the entropy of a distribution for given probability values.\n\n If only probabilities `pk` are given, the entropy is calculated as\n ``S = -sum(pk * log(pk), axis=0)``.\n\n If `qk` is not None, then compute the Kullback-Leibler divergence\n ``S = sum(pk * log(pk / qk), axis=0)``.\n\n This routine will normalize `pk` and `qk` if they don't sum to 1.\n\n Parameters\n ----------\n pk : sequence\n Defines the (discrete) distribution. ``pk[i]`` is the (possibly\n unnormalized) probability of event ``i``.\n qk : sequence, optional\n Sequence against which the relative entropy is computed. Should be in\n the same format as `pk`.\n base : float, optional\n The logarithmic base to use, defaults to ``e`` (natural logarithm).\n\n Returns\n -------\n S : float\n The calculated entropy.\n\n \"\"\"\n pk = asarray(pk)\n pk = 1.0*pk / np.sum(pk, axis=0)\n if qk is None:\n vec = entr(pk)\n else:\n qk = asarray(qk)\n if len(qk) != len(pk):\n raise ValueError(\"qk and pk must have same length.\")\n qk = 1.0*qk / np.sum(qk, axis=0)\n vec = kl_div(pk, qk)\n S = np.sum(vec, axis=0)\n if base is not None:\n S /= log(base)\n return S\n\n\n# Must over-ride one of _pmf or _cdf or pass in\n# x_k, p(x_k) lists in initialization\n\nclass rv_discrete(rv_generic):\n \"\"\"\n A generic discrete random variable class meant for subclassing.\n\n `rv_discrete` is a base class to construct specific distribution classes\n and instances for discrete random variables. It can also be used\n to construct an arbitrary distribution defined by a list of support\n points and corresponding probabilities.\n\n Parameters\n ----------\n a : float, optional\n Lower bound of the support of the distribution, default: 0\n b : float, optional\n Upper bound of the support of the distribution, default: plus infinity\n moment_tol : float, optional\n The tolerance for the generic calculation of moments.\n values : tuple of two array_like, optional\n ``(xk, pk)`` where ``xk`` are integers with non-zero\n probabilities ``pk`` with ``sum(pk) = 1``.\n inc : integer, optional\n Increment for the support of the distribution.\n Default is 1. (other values have not been tested)\n badvalue : float, optional\n The value in a result arrays that indicates a value that for which\n some argument restriction is violated, default is np.nan.\n name : str, optional\n The name of the instance. This string is used to construct the default\n example for distributions.\n longname : str, optional\n This string is used as part of the first line of the docstring returned\n when a subclass has no docstring of its own. Note: `longname` exists\n for backwards compatibility, do not use for new subclasses.\n shapes : str, optional\n The shape of the distribution. For example \"m, n\" for a distribution\n that takes two integers as the two shape arguments for all its methods\n If not provided, shape parameters will be inferred from\n the signatures of the private methods, ``_pmf`` and ``_cdf`` of\n the instance.\n extradoc : str, optional\n This string is used as the last part of the docstring returned when a\n subclass has no docstring of its own. Note: `extradoc` exists for\n backwards compatibility, do not use for new subclasses.\n seed : None or int or ``numpy.random.RandomState`` instance, optional\n This parameter defines the RandomState object to use for drawing\n random variates.\n If None, the global np.random state is used.\n If integer, it is used to seed the local RandomState instance.\n Default is None.\n\n Methods\n -------\n rvs\n pmf\n logpmf\n cdf\n logcdf\n sf\n logsf\n ppf\n isf\n moment\n stats\n entropy\n expect\n median\n mean\n std\n var\n interval\n __call__\n\n\n Notes\n -----\n\n This class is similar to `rv_continuous`, the main differences being:\n\n - the support of the distribution is a set of integers\n - instead of the probability density function, ``pdf`` (and the\n corresponding private ``_pdf``), this class defines the\n *probability mass function*, `pmf` (and the corresponding\n private ``_pmf``.)\n - scale parameter is not defined.\n\n To create a new discrete distribution, we would do the following:\n\n >>> from scipy.stats import rv_discrete\n >>> class poisson_gen(rv_discrete):\n ... \"Poisson distribution\"\n ... def _pmf(self, k, mu):\n ... return exp(-mu) * mu**k / factorial(k)\n\n and create an instance::\n\n >>> poisson = poisson_gen(name=\"poisson\")\n\n Note that above we defined the Poisson distribution in the standard form.\n Shifting the distribution can be done by providing the ``loc`` parameter\n to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``\n delegates the work to ``poisson._pmf(x-loc, mu)``.\n\n **Discrete distributions from a list of probabilities**\n\n Alternatively, you can construct an arbitrary discrete rv defined\n on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the\n ``values`` keyword argument to the `rv_discrete` constructor.\n\n Examples\n --------\n\n Custom made discrete distribution:\n\n >>> from scipy import stats\n >>> xk = np.arange(7)\n >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)\n >>> custm = stats.rv_discrete(name='custm', values=(xk, pk))\n >>>\n >>> import matplotlib.pyplot as plt\n >>> fig, ax = plt.subplots(1, 1)\n >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')\n >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)\n >>> plt.show()\n\n Random number generation:\n\n >>> R = custm.rvs(size=100)\n\n \"\"\"\n\n def __init__(self, a=0, b=inf, name=None, badvalue=None,\n moment_tol=1e-8, values=None, inc=1, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_discrete, self).__init__(seed)\n\n # cf generic freeze\n self._ctor_param = dict(\n a=a, b=b, name=name, badvalue=badvalue,\n moment_tol=moment_tol, values=values, inc=inc,\n longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n if name is None:\n name = 'Distribution'\n self.badvalue = badvalue\n self.a = a\n self.b = b\n self.name = name\n self.moment_tol = moment_tol\n self.inc = inc\n self._cdfvec = vectorize(self._cdf_single, otypes='d')\n self.return_integers = 1\n self.vecentropy = vectorize(self._entropy)\n self.shapes = shapes\n self.extradoc = extradoc\n\n if values is not None:\n self.xk, self.pk = values\n self.return_integers = 0\n indx = argsort(ravel(self.xk))\n self.xk = take(ravel(self.xk), indx, 0)\n self.pk = take(ravel(self.pk), indx, 0)\n self.a = self.xk[0]\n self.b = self.xk[-1]\n self.P = dict(zip(self.xk, self.pk))\n self.qvals = np.cumsum(self.pk, axis=0)\n self.F = dict(zip(self.xk, self.qvals))\n decreasing_keys = sorted(self.F.keys(), reverse=True)\n self.Finv = dict((self.F[k], k) for k in decreasing_keys)\n self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),\n self, rv_discrete)\n self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),\n self, rv_discrete)\n self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),\n self, rv_discrete)\n self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)\n self.generic_moment = instancemethod(_drv_moment,\n self, rv_discrete)\n self.moment_gen = instancemethod(_drv_moment_gen,\n self, rv_discrete)\n\n self.shapes = ' ' # bypass inspection\n self._construct_argparser(meths_to_inspect=[self._pmf],\n locscale_in='loc=0',\n # scale=1 for discrete RVs\n locscale_out='loc, 1')\n else:\n self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],\n locscale_in='loc=0',\n # scale=1 for discrete RVs\n locscale_out='loc, 1')\n\n # nin correction needs to be after we know numargs\n # correct nin for generic moment vectorization\n _vec_generic_moment = vectorize(_drv2_moment, otypes='d')\n _vec_generic_moment.nin = self.numargs + 2\n self.generic_moment = instancemethod(_vec_generic_moment,\n self, rv_discrete)\n\n # correct nin for ppf vectorization\n _vppf = vectorize(_drv2_ppfsingle, otypes='d')\n _vppf.nin = self.numargs + 2 # +1 is for self\n self._ppfvec = instancemethod(_vppf,\n self, rv_discrete)\n\n # now that self.numargs is defined, we can adjust nin\n self._cdfvec.nin = self.numargs + 1\n\n # generate docstring for subclass instances\n if longname is None:\n if name[0] in ['aeiouAEIOU']:\n hstr = \"An \"\n else:\n hstr = \"A \"\n longname = hstr + name\n\n if sys.flags.optimize < 2:\n # Skip adding docstrings if interpreter is run with -OO\n if self.__doc__ is None:\n self._construct_default_doc(longname=longname,\n extradoc=extradoc,\n docdict=docdict_discrete,\n discrete='discrete')\n else:\n dct = dict(distdiscrete)\n self._construct_doc(docdict_discrete, dct.get(self.name))\n\n #discrete RV do not have the scale parameter, remove it\n self.__doc__ = self.__doc__.replace(\n '\\n scale : array_like, '\n 'optional\\n scale parameter (default=1)', '')\n\n def _updated_ctor_param(self):\n \"\"\" Return the current version of _ctor_param, possibly updated by user.\n\n Used by freezing and pickling.\n Keep this in sync with the signature of __init__.\n \"\"\"\n dct = self._ctor_param.copy()\n dct['a'] = self.a\n dct['b'] = self.b\n dct['badvalue'] = self.badvalue\n dct['moment_tol'] = self.moment_tol\n dct['inc'] = self.inc\n dct['name'] = self.name\n dct['shapes'] = self.shapes\n dct['extradoc'] = self.extradoc\n return dct\n\n def _nonzero(self, k, *args):\n return floor(k) == k\n\n def _pmf(self, k, *args):\n return self._cdf(k, *args) - self._cdf(k-1, *args)\n\n def _logpmf(self, k, *args):\n return log(self._pmf(k, *args))\n\n def _cdf_single(self, k, *args):\n m = arange(int(self.a), k+1)\n return np.sum(self._pmf(m, *args), axis=0)\n\n def _cdf(self, x, *args):\n k = floor(x)\n return self._cdfvec(k, *args)\n\n # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic\n\n def rvs(self, *args, **kwargs):\n \"\"\"\n Random variates of given type.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n size : int or tuple of ints, optional\n Defining number of random variates (Default is 1). Note that `size`\n has to be given as keyword, not as positional argument.\n random_state : None or int or ``np.random.RandomState`` instance, optional\n If int or RandomState, use it for drawing the random variates.\n If None, rely on ``self.random_state``.\n Default is None.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of given `size`.\n\n \"\"\"\n kwargs['discrete'] = True\n return super(rv_discrete, self).rvs(*args, **kwargs)\n\n def pmf(self, k, *args, **kwds):\n \"\"\"\n Probability mass function at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n pmf : array_like\n Probability mass function evaluated at k\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logpmf(self, k, *args, **kwds):\n \"\"\"\n Log of the probability mass function at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter. Default is 0.\n\n Returns\n -------\n logpmf : array_like\n Log of the probability mass function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logpmf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def cdf(self, k, *args, **kwds):\n \"\"\"\n Cumulative distribution function of the given RV.\n\n Parameters\n ----------\n k : array_like, int\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n cdf : ndarray\n Cumulative distribution function evaluated at `k`.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k < self.b)\n cond2 = (k >= self.b)\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2*(cond0 == cond0), 1.0)\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logcdf(self, k, *args, **kwds):\n \"\"\"\n Log of the cumulative distribution function at k of the given RV.\n\n Parameters\n ----------\n k : array_like, int\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n logcdf : array_like\n Log of the cumulative distribution function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k < self.b)\n cond2 = (k >= self.b)\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2*(cond0 == cond0), 0.0)\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logcdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def sf(self, k, *args, **kwds):\n \"\"\"\n Survival function (1 - `cdf`) at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n sf : array_like\n Survival function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray(k-loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k < self.b)\n cond2 = (k < self.a) & cond0\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._sf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logsf(self, k, *args, **kwds):\n \"\"\"\n Log of the survival function of the given RV.\n\n Returns the log of the \"survival function,\" defined as 1 - `cdf`,\n evaluated at `k`.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n logsf : ndarray\n Log of the survival function evaluated at `k`.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray(k-loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= self.a) & (k < self.b)\n cond2 = (k < self.a) & cond0\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logsf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def ppf(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of `cdf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n Lower tail probability.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n k : array_like\n Quantile corresponding to the lower tail probability, q.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n q, loc = map(asarray, (q, loc))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (loc == loc)\n cond1 = (q > 0) & (q < 1)\n cond2 = (q == 1) & cond0\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue, typecode='d')\n # output type 'd' to handle nin and inf\n place(output, (q == 0)*(cond == cond), self.a-1)\n place(output, cond2, self.b)\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(loc,)))\n loc, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._ppf(*goodargs) + loc)\n\n if output.ndim == 0:\n return output[()]\n return output\n\n def isf(self, q, *args, **kwds):\n \"\"\"\n Inverse survival function (inverse of `sf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n Upper tail probability.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n k : ndarray or scalar\n Quantile corresponding to the upper tail probability, q.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n q, loc = map(asarray, (q, loc))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (loc == loc)\n cond1 = (q > 0) & (q < 1)\n cond2 = (q == 1) & cond0\n cond = cond0 & cond1\n\n # same problem as with ppf; copied from ppf and changed\n output = valarray(shape(cond), value=self.badvalue, typecode='d')\n # output type 'd' to handle nin and inf\n place(output, (q == 0)*(cond == cond), self.b)\n place(output, cond2, self.a-1)\n\n # call place only if at least 1 valid argument\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(loc,)))\n loc, goodargs = goodargs[-1], goodargs[:-1]\n # PB same as ticket 766\n place(output, cond, self._isf(*goodargs) + loc)\n\n if output.ndim == 0:\n return output[()]\n return output\n\n def _entropy(self, *args):\n if hasattr(self, 'pk'):\n return entropy(self.pk)\n else:\n return _expect(lambda x: entr(self.pmf(x, *args)),\n self.a, self.b, self.ppf(0.5, *args), self.inc)\n\n def expect(self, func=None, args=(), loc=0, lb=None, ub=None,\n conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):\n \"\"\"\n Calculate expected value of a function with respect to the distribution\n for discrete distribution.\n\n Parameters\n ----------\n func : callable, optional\n Function for which the expectation value is calculated.\n Takes only one argument.\n The default is the identity mapping f(k) = k.\n args : tuple, optional\n Shape parameters of the distribution.\n loc : float, optional\n Location parameter.\n Default is 0.\n lb, ub : int, optional\n Lower and upper bound for the summation, default is set to the\n support of the distribution, inclusive (``ul <= k <= ub``).\n conditional : bool, optional\n If true then the expectation is corrected by the conditional\n probability of the summation interval. The return value is the\n expectation of the function, `func`, conditional on being in\n the given interval (k such that ``ul <= k <= ub``).\n Default is False.\n maxcount : int, optional\n Maximal number of terms to evaluate (to avoid an endless loop for\n an infinite sum). Default is 1000.\n tolerance : float, optional\n Absolute tolerance for the summation. Default is 1e-10.\n chunksize : int, optional\n Iterate over the support of a distributions in chunks of this size.\n Default is 32.\n\n Returns\n -------\n expect : float\n Expected value.\n\n Notes\n -----\n For heavy-tailed distributions, the expected value may or may not exist,\n depending on the function, `func`. If it does exist, but the sum converges\n slowly, the accuracy of the result may be rather low. For instance, for\n ``zipf(4)``, accuracy for mean, variance in example is only 1e-5.\n increasing `maxcount` and/or `chunksize` may improve the result, but may also\n make zipf very slow.\n\n The function is not vectorized.\n\n \"\"\"\n if func is None:\n def fun(x):\n # loc and args from outer scope\n return (x+loc)*self._pmf(x, *args)\n else:\n def fun(x):\n # loc and args from outer scope\n return func(x+loc)*self._pmf(x, *args)\n # used pmf because _pmf does not check support in randint and there\n # might be problems(?) with correct self.a, self.b at this stage maybe\n # not anymore, seems to work now with _pmf\n\n self._argcheck(*args) # (re)generate scalar self.a and self.b\n if lb is None:\n lb = self.a\n else:\n lb = lb - loc # convert bound for standardized distribution\n if ub is None:\n ub = self.b\n else:\n ub = ub - loc # convert bound for standardized distribution\n if conditional:\n invfac = self.sf(lb-1, *args) - self.sf(ub, *args)\n else:\n invfac = 1.0\n\n # iterate over the support, starting from the median\n x0 = self.ppf(0.5, *args)\n res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)\n return res / invfac\n\n\ndef _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,\n chunksize=32):\n \"\"\"Helper for computing the expectation value of `fun`.\"\"\"\n\n # short-circuit if the support size is small enough\n if (ub - lb) <= chunksize:\n supp = np.arange(lb, ub+1, inc)\n vals = fun(supp)\n return np.sum(vals)\n\n # otherwise, iterate starting from x0\n if x0 < lb:\n x0 = lb\n if x0 > ub:\n x0 = ub\n\n count, tot = 0, 0.\n # iterate over [x0, ub] inclusive\n for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):\n count += x.size\n delta = np.sum(fun(x))\n tot += delta\n if abs(delta) < tolerance * x.size:\n break\n if count > maxcount:\n warnings.warn('expect(): sum did not converge', RuntimeWarning)\n return tot\n\n # iterate over [lb, x0)\n for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):\n count += x.size\n delta = np.sum(fun(x))\n tot += delta\n if abs(delta) < tolerance * x.size:\n break\n if count > maxcount:\n warnings.warn('expect(): sum did not converge', RuntimeWarning)\n break\n\n return tot\n\n\ndef _iter_chunked(x0, x1, chunksize=4, inc=1):\n \"\"\"Iterate from x0 to x1 in chunks of chunksize and steps inc.\n\n x0 must be finite, x1 need not be. In the latter case, the iterator is infinite.\n Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards\n (make sure to set inc < 0.)\n\n >>> [x for x in _iter_chunked(2, 5, inc=2)]\n [array([2, 4])]\n >>> [x for x in _iter_chunked(2, 11, inc=2)]\n [array([2, 4, 6, 8]), array([10])]\n >>> [x for x in _iter_chunked(2, -5, inc=-2)]\n [array([ 2, 0, -2, -4])]\n >>> [x for x in _iter_chunked(2, -9, inc=-2)]\n [array([ 2, 0, -2, -4]), array([-6, -8])]\n\n \"\"\"\n if inc == 0:\n raise ValueError('Cannot increment by zero.')\n if chunksize <= 0:\n raise ValueError('Chunk size must be positive; got %s.' % chunksize)\n\n s = 1 if inc > 0 else -1\n stepsize = abs(chunksize * inc)\n\n x = x0\n while (x - x1) * inc < 0:\n delta = min(stepsize, abs(x - x1))\n step = delta * s\n supp = np.arange(x, x + step, inc)\n x += step\n yield supp\n\n\ndef get_distribution_names(namespace_pairs, rv_base_class):\n \"\"\"\n Collect names of statistical distributions and their generators.\n\n Parameters\n ----------\n namespace_pairs : sequence\n A snapshot of (name, value) pairs in the namespace of a module.\n rv_base_class : class\n The base class of random variable generator classes in a module.\n\n Returns\n -------\n distn_names : list of strings\n Names of the statistical distributions.\n distn_gen_names : list of strings\n Names of the generators of the statistical distributions.\n Note that these are not simply the names of the statistical\n distributions, with a _gen suffix added.\n\n \"\"\"\n distn_names = []\n distn_gen_names = []\n for name, value in namespace_pairs:\n if name.startswith('_'):\n continue\n if name.endswith('_gen') and issubclass(value, rv_base_class):\n distn_gen_names.append(name)\n if isinstance(value, rv_base_class):\n distn_names.append(name)\n return distn_names, distn_gen_names\n" ]
[ [ "numpy.sum", "scipy.misc.doccer.docformat", "numpy.ones", "numpy.vectorize", "numpy.any", "numpy.asarray", "numpy.log", "scipy.misc.derivative", "numpy.isscalar", "numpy.isfinite", "numpy.extract", "scipy._lib.six.exec_", "numpy.reshape", "scipy._lib._util.getargspec_no_self", "numpy.seterr", "numpy.isnan", "scipy.special.entr", "scipy.special.xlogy", "scipy._lib._util.check_random_state", "numpy.argmax", "numpy.arange", "numpy.power", "numpy.all", "numpy.min", "numpy.max", "scipy.special.comb", "numpy.array", "scipy.integrate.quad", "numpy.cumsum", "numpy.isinf", "numpy.floor", "numpy.exp", "numpy.atleast_1d", "numpy.ravel", "scipy.optimize.brentq", "numpy.place", "numpy.shape", "numpy.product", "scipy.special.chndtr", "numpy.isneginf", "numpy.sqrt", "numpy.find_common_type", "scipy.special.kl_div", "scipy.special.ive" ] ]
tomztyang/DSGN
[ "135dabf96183f4502532dea0a45230bf9a23e2d8" ]
[ "dsgn/models/submodule.py" ]
[ "from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport math\nimport numpy as np\nfrom torch.nn import BatchNorm2d\n\ndef convbn(in_planes, out_planes, kernel_size, stride, pad, dilation, gn=False, groups=32):\n return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False),\n nn.BatchNorm2d(out_planes) if not gn else nn.GroupNorm(groups, out_planes))\n\n\ndef convbn_3d(in_planes, out_planes, kernel_size, stride, pad, gn=False, groups=32):\n return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride,bias=False),\n nn.BatchNorm3d(out_planes) if not gn else nn.GroupNorm(groups, out_planes))\n\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride, downsample, pad, dilation, gn=False):\n super(BasicBlock, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(inplanes, planes, 3, stride, pad, dilation, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(planes, planes, 3, 1, pad, dilation, gn=gn)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n\n if self.downsample is not None:\n x = self.downsample(x)\n\n out += x\n\n return out\n\nclass disparityregression(nn.Module):\n def __init__(self, maxdisp, cfg):\n super(disparityregression, self).__init__()\n self.disp = Variable(torch.Tensor(np.array(range(maxdisp))).cuda(), requires_grad=False)\n\n def forward(self, x, depth):\n out = torch.sum(x * depth[None, :, None, None],1)\n return out\n\nclass hourglass(nn.Module):\n def __init__(self, inplanes, gn=False):\n super(hourglass, self).__init__()\n\n self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1, gn=gn)\n\n self.conv3 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv5 = nn.Sequential(\n nn.ConvTranspose3d(inplanes * 2, inplanes * 2, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm3d(inplanes * 2) if not gn else nn.GroupNorm(32, inplanes * 2)) # +conv2\n\n self.conv6 = nn.Sequential(\n nn.ConvTranspose3d(inplanes * 2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm3d(inplanes) if not gn else nn.GroupNorm(32, inplanes)) # +x\n\n def forward(self, x, presqu, postsqu):\n\n out = self.conv1(x) # in:1/4 out:1/8\n pre = self.conv2(out) # in:1/8 out:1/8\n if postsqu is not None:\n pre = F.relu(pre + postsqu, inplace=True)\n else:\n pre = F.relu(pre, inplace=True)\n\n out = self.conv3(pre) # in:1/8 out:1/16\n out = self.conv4(out) # in:1/16 out:1/16\n\n if presqu is not None:\n post = F.relu(self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8\n else:\n post = F.relu(self.conv5(out) + pre, inplace=True)\n\n out = self.conv6(post) # in:1/8 out:1/4\n\n return out, pre, post\n\nclass hourglass2d(nn.Module):\n def __init__(self, inplanes, gn=False):\n super(hourglass2d, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1, dilation=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1, dilation=1, gn=gn)\n\n self.conv3 = nn.Sequential(convbn(inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1, dilation=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1, dilation=1, gn=gn),\n nn.ReLU(inplace=True))\n\n self.conv5 = nn.Sequential(\n nn.ConvTranspose2d(inplanes * 2, inplanes * 2, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm2d(inplanes * 2) if not gn else nn.GroupNorm(32, inplanes * 2)) # +conv2\n\n self.conv6 = nn.Sequential(\n nn.ConvTranspose2d(inplanes * 2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,\n bias=False),\n nn.BatchNorm2d(inplanes) if not gn else nn.GroupNorm(32, inplanes)) # +x\n\n def forward(self, x, presqu, postsqu):\n\n out = self.conv1(x) # in:1/4 out:1/8\n pre = self.conv2(out) # in:1/8 out:1/8\n if postsqu is not None:\n pre = F.relu(pre + postsqu, inplace=True)\n else:\n pre = F.relu(pre, inplace=True)\n\n out = self.conv3(pre) # in:1/8 out:1/16\n out = self.conv4(out) # in:1/16 out:1/16\n\n if presqu is not None:\n post = F.relu(self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8\n else:\n post = F.relu(self.conv5(out) + pre, inplace=True)\n\n out = self.conv6(post) # in:1/8 out:1/4\n\n return out, pre, post\n\nclass feature_extraction(nn.Module):\n def __init__(self, cfg):\n super(feature_extraction, self).__init__()\n\n self.cfg = cfg\n self.RPN3D_ENABLE = self.cfg.RPN3D_ENABLE\n self.cat_img_feature = getattr(self.cfg, 'cat_img_feature', False)\n self.rpn_onemore_conv = getattr(self.cfg, 'RPN_ONEMORE_CONV', False)\n self.rpn_onemore_dim = getattr(self.cfg, 'RPN_ONEMORE_DIM', 256)\n self.img_feature_relu = getattr(self.cfg, 'img_feature_relu', True)\n self.branch = getattr(self.cfg, 'branch', True)\n\n self.backbone = getattr(self.cfg, 'backbone', 'reslike-det-small')\n if self.backbone == 'reslike-det':\n first_dim = 64\n dims = [64, 128, 192, 256]\n nr_convs = [3, 6, 12, 4]\n branch_dim = 32\n lastconv_dim = [256, 32]\n elif self.backbone == 'reslike-det-small':\n first_dim = 64\n dims = [32, 64, 128, 192]\n nr_convs = [3, 6, 12, 4]\n branch_dim = 32\n lastconv_dim = [256, 32]\n else:\n raise ValueError('Invalid backbone {}.'.format(self.backbone))\n\n self.inplanes = first_dim\n\n self.firstconv = nn.Sequential(convbn(3, first_dim, 3, 2, 1, 1, gn=cfg.GN if first_dim >= 32 else False),\n nn.ReLU(inplace=True),\n convbn(first_dim, first_dim, 3, 1, 1, 1, gn=cfg.GN if first_dim >= 32 else False),\n nn.ReLU(inplace=True),\n convbn(first_dim, first_dim, 3, 1, 1, 1, gn=cfg.GN if first_dim >= 32 else False),\n nn.ReLU(inplace=True))\n\n self.layer1 = self._make_layer(BasicBlock, dims[0], nr_convs[0], 1,1,1, gn=cfg.GN if dims[0] >= 32 else False)\n self.layer2 = self._make_layer(BasicBlock, dims[1], nr_convs[1], 2,1,1, gn=cfg.GN) \n self.layer3 = self._make_layer(BasicBlock, dims[2], nr_convs[2], 1,1,1, gn=cfg.GN)\n self.layer4 = self._make_layer(BasicBlock, dims[3], nr_convs[3], 1,1,2, gn=cfg.GN)\n\n if self.branch:\n self.branch1 = nn.Sequential(nn.AvgPool2d((64, 64), stride=(64,64)),\n convbn(dims[3], branch_dim, 1, 1, 0, 1, gn=cfg.GN, groups=min(32, branch_dim)),\n nn.ReLU(inplace=True))\n\n self.branch2 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32,32)),\n convbn(dims[3], branch_dim, 1, 1, 0, 1, gn=cfg.GN, groups=min(32, branch_dim)),\n nn.ReLU(inplace=True))\n\n self.branch3 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16,16)),\n convbn(dims[3], branch_dim, 1, 1, 0, 1, gn=cfg.GN, groups=min(32, branch_dim)),\n nn.ReLU(inplace=True))\n\n self.branch4 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8,8)),\n convbn(dims[3], branch_dim, 1, 1, 0, 1, gn=cfg.GN, groups=min(32, branch_dim)),\n nn.ReLU(inplace=True))\n\n if self.branch:\n concat_dim = branch_dim * 4 + dims[1] + dims[3] + dims[2]\n else:\n concat_dim = dims[1] + dims[3] + dims[2]\n\n self.PlaneSweepVolume = getattr(cfg, 'PlaneSweepVolume', True)\n if self.PlaneSweepVolume:\n self.lastconv = nn.Sequential(convbn(concat_dim, lastconv_dim[0], 3, 1, 1, 1, gn=cfg.GN),\n nn.ReLU(inplace=True),\n nn.Conv2d(lastconv_dim[0], lastconv_dim[1], kernel_size=1, padding=0, stride = 1, bias=False))\n\n if self.cfg.RPN3D_ENABLE and self.cat_img_feature:\n if self.rpn_onemore_conv:\n rpnconvs = [convbn(concat_dim, self.rpn_onemore_dim, 3, 1, 1, 1, gn=cfg.GN),\n nn.ReLU(inplace=True),\n convbn(self.rpn_onemore_dim, self.cfg.RPN_CONVDIM, 3, 1, 1, 1, gn=cfg.GN, groups=(32 if self.cfg.RPN_CONVDIM % 32 == 0 else 16))]\n else:\n rpnconvs = [convbn(concat_dim, self.cfg.RPN_CONVDIM, 3, 1, 1, 1, gn=cfg.GN, groups=(32 if self.cfg.RPN_CONVDIM % 32 == 0 else 16))]\n if self.img_feature_relu:\n rpnconvs.append( nn.ReLU(inplace=True) )\n self.rpnconv = nn.Sequential( *rpnconvs )\n\n def _make_layer(self, block, planes, blocks, stride, pad, dilation, gn=False):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion) if not gn else nn.GroupNorm(32, planes * block.expansion))\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, pad, dilation, gn=gn))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,1,None,pad,dilation, gn=gn))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n output = self.firstconv(x) ; #print('conv1', output.shape) # (1, 32, 192, 624)\n output = self.layer1(output) ; #print('conv2', output.shape) # (1, 32, 192, 624)\n output_raw = self.layer2(output) ; #print('conv3', output_raw.shape) # (1, 64, 96, 312)\n output_mid = self.layer3(output_raw) ; #print('conv4', output.shape) # (1, 128, 96, 312)\n output_skip = self.layer4(output_mid) ; #print('conv5', output_skip.shape) # (1, 128, 96, 312)\n\n if self.branch:\n output_branch1 = self.branch1(output_skip) ; #print('b1', output_branch1.shape) # (1, 32, 1, 4) # avgpool 64\n output_branch1 = F.interpolate(output_branch1, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear', align_corners=self.cfg.align_corners) # (1, 32, 96, 312)\n\n output_branch2 = self.branch2(output_skip) ; #print('b2', output_branch2.shape)# (1, 32, 3, 9)\n output_branch2 = F.interpolate(output_branch2, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear', align_corners=self.cfg.align_corners)\n\n output_branch3 = self.branch3(output_skip) ; #print('b3', output_branch3.shape)# (1, 32, 6, 19)\n output_branch3 = F.interpolate(output_branch3, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear', align_corners=self.cfg.align_corners)\n\n output_branch4 = self.branch4(output_skip) ; #print('b4', output_branch4.shape)# (1, 32, 12, 39)\n output_branch4 = F.interpolate(output_branch4, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear', align_corners=self.cfg.align_corners)\n\n if self.branch:\n concat_feature = torch.cat((output_raw, output_mid, output_skip, output_branch4, output_branch3, output_branch2, output_branch1), 1) ; #print('cat', concat_feature.shape)\n else:\n concat_feature = torch.cat((output_raw, output_mid, output_skip), 1)\n \n if self.RPN3D_ENABLE and self.cat_img_feature:\n rpn_feature = self.rpnconv(concat_feature)\n else:\n rpn_feature = None\n\n if self.PlaneSweepVolume:\n output_feature = self.lastconv(concat_feature) ; #print('last', output_feature.shape)\n else:\n output_feature = None\n\n return output_feature, rpn_feature\n" ]
[ [ "torch.sum", "torch.nn.BatchNorm2d", "torch.nn.GroupNorm", "torch.nn.AvgPool2d", "torch.nn.BatchNorm3d", "torch.nn.ConvTranspose3d", "torch.nn.functional.relu", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.cat", "torch.nn.ReLU", "torch.nn.Conv3d", "torch.nn.ConvTranspose2d" ] ]
aegissystems/yolact
[ "29ee13f41a83b62a8946a86cb01e49d8fce0676c" ]
[ "utils/functions.py" ]
[ "import torch\nimport torch.nn as nn\nimport os\nimport math\nfrom collections import deque\nfrom pathlib import Path\nfrom YOLACT.layers.interpolate import InterpolateModule\n\nclass MovingAverage():\n \"\"\" Keeps an average window of the specified number of items. \"\"\"\n\n def __init__(self, max_window_size=1000):\n self.max_window_size = max_window_size\n self.reset()\n\n def add(self, elem):\n \"\"\" Adds an element to the window, removing the earliest element if necessary. \"\"\"\n if not math.isfinite(elem):\n print('Warning: Moving average ignored a value of %f' % elem)\n return\n \n self.window.append(elem)\n self.sum += elem\n\n if len(self.window) > self.max_window_size:\n self.sum -= self.window.popleft()\n \n def append(self, elem):\n \"\"\" Same as add just more pythonic. \"\"\"\n self.add(elem)\n\n def reset(self):\n \"\"\" Resets the MovingAverage to its initial state. \"\"\"\n self.window = deque()\n self.sum = 0\n\n def get_avg(self):\n \"\"\" Returns the average of the elements in the window. \"\"\"\n return self.sum / max(len(self.window), 1)\n\n def __str__(self):\n return str(self.get_avg())\n \n def __repr__(self):\n return repr(self.get_avg())\n \n def __len__(self):\n return len(self.window)\n\n\nclass ProgressBar():\n \"\"\" A simple progress bar that just outputs a string. \"\"\"\n\n def __init__(self, length, max_val):\n self.max_val = max_val\n self.length = length\n self.cur_val = 0\n \n self.cur_num_bars = -1\n self._update_str()\n\n def set_val(self, new_val):\n self.cur_val = new_val\n\n if self.cur_val > self.max_val:\n self.cur_val = self.max_val\n if self.cur_val < 0:\n self.cur_val = 0\n\n self._update_str()\n \n def is_finished(self):\n return self.cur_val == self.max_val\n\n def _update_str(self):\n num_bars = int(self.length * (self.cur_val / self.max_val))\n\n if num_bars != self.cur_num_bars:\n self.cur_num_bars = num_bars\n self.string = '█' * num_bars + '░' * (self.length - num_bars)\n \n def __repr__(self):\n return self.string\n \n def __str__(self):\n return self.string\n\n\ndef init_console():\n \"\"\"\n Initialize the console to be able to use ANSI escape characters on Windows.\n \"\"\"\n if os.name == 'nt':\n from colorama import init\n init()\n\n\nclass SavePath:\n \"\"\"\n Why is this a class?\n Why do I have a class for creating and parsing save paths?\n What am I doing with my life?\n \"\"\"\n\n def __init__(self, model_name:str, epoch:int, iteration:int):\n self.model_name = model_name\n self.epoch = epoch\n self.iteration = iteration\n\n def get_path(self, root:str=''):\n file_name = self.model_name + '_' + str(self.epoch) + '_' + str(self.iteration) + '.pth'\n return os.path.join(root, file_name)\n\n @staticmethod\n def from_str(path:str):\n file_name = os.path.basename(path)\n \n if file_name.endswith('.pth'):\n file_name = file_name[:-4]\n \n params = file_name.split('_')\n\n if file_name.endswith('interrupt'):\n params = params[:-1]\n \n model_name = '_'.join(params[:-2])\n epoch = params[-2]\n iteration = params[-1]\n \n return SavePath(model_name, int(epoch), int(iteration))\n\n @staticmethod\n def remove_interrupt(save_folder):\n for p in Path(save_folder).glob('*_interrupt.pth'):\n p.unlink()\n \n @staticmethod\n def get_interrupt(save_folder):\n for p in Path(save_folder).glob('*_interrupt.pth'): \n return str(p)\n return None\n \n @staticmethod\n def get_latest(save_folder, config):\n \"\"\" Note: config should be config.name. \"\"\"\n max_iter = -1\n max_name = None\n\n for p in Path(save_folder).glob(config + '_*'):\n path_name = str(p)\n\n try:\n save = SavePath.from_str(path_name)\n except:\n continue \n \n if save.model_name == config and save.iteration > max_iter:\n max_iter = save.iteration\n max_name = path_name\n\n return max_name\n\ndef make_net(in_channels, conf, include_last_relu=True):\n \"\"\"\n A helper function to take a config setting and turn it into a network.\n Used by protonet and extrahead. Returns (network, out_channels)\n \"\"\"\n def make_layer(layer_cfg):\n nonlocal in_channels\n \n # Possible patterns:\n # ( 256, 3, {}) -> conv\n # ( 256,-2, {}) -> deconv\n # (None,-2, {}) -> bilinear interpolate\n # ('cat',[],{}) -> concat the subnetworks in the list\n #\n # You know it would have probably been simpler just to adopt a 'c' 'd' 'u' naming scheme.\n # Whatever, it's too late now.\n if isinstance(layer_cfg[0], str):\n layer_name = layer_cfg[0]\n\n if layer_name == 'cat':\n nets = [make_net(in_channels, x) for x in layer_cfg[1]]\n layer = Concat([net[0] for net in nets], layer_cfg[2])\n num_channels = sum([net[1] for net in nets])\n else:\n num_channels = layer_cfg[0]\n kernel_size = layer_cfg[1]\n\n if kernel_size > 0:\n layer = nn.Conv2d(in_channels, num_channels, kernel_size, **layer_cfg[2])\n else:\n if num_channels is None:\n layer = InterpolateModule(scale_factor=-kernel_size, mode='bilinear', align_corners=False, **layer_cfg[2])\n else:\n layer = nn.ConvTranspose2d(in_channels, num_channels, -kernel_size, **layer_cfg[2])\n \n in_channels = num_channels if num_channels is not None else in_channels\n\n # Don't return a ReLU layer if we're doing an upsample. This probably doesn't affect anything\n # output-wise, but there's no need to go through a ReLU here.\n # Commented out for backwards compatibility with previous models\n # if num_channels is None:\n # return [layer]\n # else:\n return [layer, nn.ReLU(inplace=True)]\n\n # Use sum to concat together all the component layer lists\n net = sum([make_layer(x) for x in conf], [])\n if not include_last_relu:\n net = net[:-1]\n\n return nn.Sequential(*(net)), in_channels" ]
[ [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ConvTranspose2d" ] ]
bio-ontology-research-group/deeppheno
[ "8a58c6efcdd6861c2e5c380bb81690618e58b629" ]
[ "results.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport click as ck\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport sys\nfrom collections import deque\nimport time\nimport logging\nfrom sklearn.metrics import roc_curve, auc, matthews_corrcoef\nfrom scipy.spatial import distance\nfrom scipy import sparse\nimport math\nfrom utils import FUNC_DICT, Ontology, NAMESPACES\nfrom matplotlib import pyplot as plt\n\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\n\n\[email protected]()\[email protected](\n '--method', '-m', default='',\n help='model method')\[email protected](\n '--ont', '-o', default='organ',\n help='model method')\ndef main(method, ont):\n # res = {}\n # for fold in range(1,6):\n # with open(f'fold{fold}_data-cafa/predictions{method}.pkl.{ont}.res') as f:\n # lines = f.read().splitlines()\n # items = lines[-1].split(', ')\n # for item in items:\n # it = item.split(': ')\n # if it[0] not in res:\n # res[it[0]] = []\n # res[it[0]].append(float(it[1]))\n # with open(f'fold{fold}_data-cafa/predictions{method}.pkl.auc.{ont}.res') as f:\n # lines = f.read().splitlines()\n # auc = float(lines[-1])\n # if 'mauc' not in res:\n # res['mauc'] = []\n # res['mauc'].append(auc)\n \n # avg = {}\n # avg_err = {}\n # for key in res:\n # res[key] = np.array(res[key])\n # avg[key] = np.mean(res[key])\n # avg_err[key] = np.mean(np.abs(res[key] - avg[key]))\n \n # res_flat = {}\n # for fold in range(1,6):\n # with open(f'fold{fold}_data-cafa/predictions{method}.pkl_flat.pkl.{ont}.res') as f:\n # lines = f.read().splitlines()\n # items = lines[-1].split(', ')\n # for item in items:\n # it = item.split(': ')\n # if it[0] not in res_flat:\n # res_flat[it[0]] = []\n # res_flat[it[0]].append(float(it[1]))\n # with open(f'fold{fold}_data-cafa/predictions{method}.pkl_flat.pkl.auc.{ont}.res') as f:\n # lines = f.read().splitlines()\n # auc = float(lines[-1])\n # if 'mauc' not in res_flat:\n # res_flat['mauc'] = []\n # res_flat['mauc'].append(auc)\n \n # avg_flat = {}\n # avg_flat_err = {}\n # for key in res_flat:\n # res_flat[key] = np.array(res_flat[key])\n # avg_flat[key] = np.mean(res_flat[key])\n # avg_flat_err[key] = np.mean(np.abs(res_flat[key] - avg_flat[key]))\n\n # auc = avg_flat['mauc']\n # fmax = avg_flat['Fmax']\n # smin = avg_flat['Smin']\n # aupr = avg_flat['AUPR']\n # auce = avg_flat_err['mauc']\n # fmaxe = avg_flat_err['Fmax']\n # smine = avg_flat_err['Smin']\n # aupre = avg_flat_err['AUPR']\n # print(f'DeepPhenoFlat & {fmax:0.3f} $\\pm$ {fmaxe:0.3f} & {smin:0.3f} $\\pm$ {smine:0.3f} & {aupr:0.3f} $\\pm$ {aupre:0.3f} & {auc:0.3f} $\\pm$ {auce:0.3f} \\\\\\\\')\n # print('\\\\hline')\n\n # auc = avg['mauc']\n # fmax = avg['Fmax']\n # smin = avg['Smin']\n # aupr = avg['AUPR']\n # auce = avg_err['mauc']\n # fmaxe = avg_err['Fmax']\n # smine = avg_err['Smin']\n # aupre = avg_err['AUPR']\n # print(f'DeepPheno & {fmax:0.3f} $\\pm$ {fmaxe:0.3f} & {smin:0.3f} $\\pm$ {smine:0.3f} & {aupr:0.3f} $\\pm$ {aupre:0.3f} & {auc:0.3f} $\\pm$ {auce:0.3f} \\\\\\\\')\n\n # res_gd = {}\n # gd = {}\n # gd_err = {}\n # for fold in range(1,6):\n # with open(f'fold{fold}_data/sim_gene_disease{method}.txt.res') as f:\n # lines = f.read().splitlines()\n # res = lines[-1].split(' ')\n # for i, item in enumerate(res):\n # if i not in res_gd:\n # res_gd[i] = []\n # res_gd[i].append(float(item))\n # for key in res_gd:\n # res_gd[key] = np.array(res_gd[key])\n # gd[key] = np.mean(res_gd[key])\n # gd_err[key] = np.mean(np.abs(res_gd[key] - gd[key]))\n \n # print(f'{gd[0]:0.2f} {gd[1]:0.2f} {gd[2]:0.2f} {gd[3]:0.2f}')\n\n res_phenos = {}\n phenos = {}\n ph = {}\n ph_err = {}\n for fold in range(1,6):\n with open(f'fold{fold}_data/phenotype_results.tsv') as f:\n for line in f:\n it = line.strip().split('\\t')\n if it[0] not in res_phenos:\n res_phenos[it[0]] = []\n phenos[it[0]] = it\n res_phenos[it[0]].append(float(it[2]))\n for key in res_phenos:\n res_phenos[key] = np.array(res_phenos[key])\n ph[key] = np.mean(res_phenos[key])\n ph_err[key] = np.mean(np.abs(res_phenos[key] - ph[key]))\n \n res = []\n for key, it in phenos.items():\n res.append((it[0], it[1], ph[key], ph_err[key], it[3], it[4]))\n res = sorted(res, key=lambda x: x[2], reverse=True)\n with open('data/phenotype_results.tsv', 'w') as f:\n f.write('HP\\tLabel\\tFmax\\n')\n for it in res:\n f.write(f'{it[0]} & {it[1]} & {it[2]:.3f} $\\pm$ {it[3]:.3f} \\\\\\\\ \\n')\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.abs", "numpy.mean" ] ]
souravsingh/chainercv
[ "8f76510472bc95018c183e72f37bc6c34a89969c" ]
[ "tests/utils_tests/testing_tests/assertions_tests/test_assert_is_bbox_dataset.py" ]
[ "import numpy as np\nimport unittest\n\nfrom chainer.dataset import DatasetMixin\nfrom chainer import testing\n\nfrom chainercv.utils import assert_is_bbox_dataset\nfrom chainercv.utils import generate_random_bbox\n\n\nclass BboxDataset(DatasetMixin):\n\n def __init__(self, options=(), empty_bbox=False):\n self.options = options\n self.empty_bbox = empty_bbox\n\n def __len__(self):\n return 10\n\n def get_example(self, i):\n img = np.random.randint(0, 256, size=(3, 48, 64))\n if self.empty_bbox:\n n_bbox = 0\n else:\n n_bbox = np.random.randint(10, 20)\n bbox = generate_random_bbox(n_bbox, (48, 64), 5, 20)\n label = np.random.randint(0, 20, size=n_bbox).astype(np.int32)\n\n return (img, bbox, label) + self.options\n\n\nclass InvalidSampleSizeDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(\n InvalidSampleSizeDataset, self).get_example(i)[:3]\n return img, bbox\n\n\nclass InvalidImageDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(InvalidImageDataset, self).get_example(i)[:3]\n return img[0], bbox, label\n\n\nclass InvalidBboxDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(InvalidBboxDataset, self).get_example(i)[:3]\n bbox += 1000\n return img, bbox, label\n\n\nclass InvalidLabelDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(InvalidLabelDataset, self).get_example(i)[:3]\n label += 1000\n return img, bbox, label\n\n\nclass MismatchLengthDataset(BboxDataset):\n\n def get_example(self, i):\n img, bbox, label = super(\n MismatchLengthDataset, self).get_example(i)[:3]\n return img, bbox, label[1:]\n\n\[email protected](\n {'dataset': BboxDataset(), 'valid': True},\n {'dataset': BboxDataset(empty_bbox=True), 'valid': True},\n {'dataset': BboxDataset(('option',)), 'valid': True},\n {'dataset': InvalidSampleSizeDataset(), 'valid': False},\n {'dataset': InvalidImageDataset(), 'valid': False},\n {'dataset': InvalidBboxDataset(), 'valid': False},\n {'dataset': InvalidLabelDataset(), 'valid': False},\n {'dataset': MismatchLengthDataset(), 'valid': False},\n)\nclass TestAssertIsBboxDataset(unittest.TestCase):\n\n def test_assert_is_bbox_dataset(self):\n if self.valid:\n assert_is_bbox_dataset(self.dataset, 20)\n else:\n with self.assertRaises(AssertionError):\n assert_is_bbox_dataset(self.dataset, 20)\n\n\ntesting.run_module(__name__, __file__)\n" ]
[ [ "numpy.random.randint" ] ]
berlinchen7/EXOSIMS
[ "0c46945ae0b915f97157fb17de3eb344f5945e74" ]
[ "EXOSIMS/SurveySimulation/tieredScheduler_DD_SLSQP.py" ]
[ "from EXOSIMS.SurveySimulation.tieredScheduler_SLSQP import tieredScheduler_SLSQP\nimport EXOSIMS, os\nimport astropy.units as u\nimport astropy.constants as const\nimport numpy as np\nimport itertools\nfrom scipy import interpolate\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport time\nimport copy\nfrom EXOSIMS.util.deltaMag import deltaMag\n\nclass tieredScheduler_DD_SLSQP(tieredScheduler_SLSQP):\n \"\"\"tieredScheduler_DD_SLSQP - tieredScheduler Dual Detection SLSQP\n \n This class implements a version of the tieredScheduler that performs dual-band\n detections and uses the SLSQP scheduler as a base for inheritance.\n \"\"\"\n\n def __init__(self, **specs):\n \n tieredScheduler_SLSQP.__init__(self, **specs)\n \n\n def run_sim(self):\n \"\"\"Performs the survey simulation \n \n Returns:\n mission_end (string):\n Message printed at the end of a survey simulation.\n \n \"\"\"\n \n OS = self.OpticalSystem\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n Comp = self.Completeness\n \n # TODO: start using this self.currentSep\n # set occulter separation if haveOcculter\n self.currentSep = Obs.occulterSep\n \n # Choose observing modes selected for detection (default marked with a flag),\n det_modes = list(filter(lambda mode: 'imag' in mode['inst']['name'], OS.observingModes))\n # and for characterization (default is first spectro/IFS mode)\n spectroModes = list(filter(lambda mode: 'spec' in mode['inst']['name'], OS.observingModes))\n if np.any(spectroModes):\n char_mode = spectroModes[0]\n # if no spectro mode, default char mode is first observing mode\n else:\n char_mode = OS.observingModes[0]\n \n # Begin Survey, and loop until mission is finished\n self.logger.info('OB{}: survey beginning.'.format(TK.OBnumber+1))\n self.vprint('OB{}: survey beginning.'.format(TK.OBnumber+1))\n t0 = time.time()\n sInd = None\n occ_sInd = None\n cnt = 0\n\n while not TK.mission_is_over(OS, Obs, det_modes[0]):\n \n # Acquire the NEXT TARGET star index and create DRM\n prev_occ_sInd = occ_sInd\n old_sInd = sInd #used to save sInd if returned sInd is None\n waitTime = None\n DRM, sInd, occ_sInd, t_det, sd, occ_sInds, det_mode = self.next_target(sInd, occ_sInd, det_modes, char_mode)\n \n if det_mode is not None:\n true_t_det = t_det*det_mode['timeMultiplier'] + Obs.settlingTime + det_mode['syst']['ohTime']\n else:\n true_t_det = t_det\n\n if sInd != occ_sInd and sInd is not None:\n assert t_det != 0, \"Integration time can't be 0.\"\n\n if sInd is not None and (TK.currentTimeAbs.copy() + true_t_det) >= self.occ_arrives and occ_sInd != self.last_chard:\n sInd = occ_sInd\n if sInd == occ_sInd:\n self.ready_to_update = True\n\n time2arrive = self.occ_arrives - TK.currentTimeAbs.copy()\n \n if sInd is not None:\n cnt += 1\n\n # clean up revisit list when one occurs to prevent repeats\n if np.any(self.starRevisit) and np.any(np.where(self.starRevisit[:,0] == float(sInd))):\n s_revs = np.where(self.starRevisit[:,0] == float(sInd))[0]\n dt_max = 1.*u.week\n t_revs = np.where(self.starRevisit[:,1]*u.day - TK.currentTimeNorm.copy() < dt_max)[0]\n self.starRevisit = np.delete(self.starRevisit, np.intersect1d(s_revs,t_revs),0)\n\n # get the index of the selected target for the extended list\n if TK.currentTimeNorm.copy() > TK.missionLife and self.starExtended.shape[0] == 0:\n for i in range(len(self.DRM)):\n if np.any([x == 1 for x in self.DRM[i]['plan_detected']]):\n self.starExtended = np.hstack((self.starExtended, self.DRM[i]['star_ind']))\n self.starExtended = np.unique(self.starExtended)\n \n # Beginning of observation, start to populate DRM\n DRM['OB_nb'] = TK.OBnumber+1\n DRM['ObsNum'] = cnt\n DRM['star_ind'] = sInd\n pInds = np.where(SU.plan2star == sInd)[0]\n DRM['plan_inds'] = pInds.astype(int).tolist()\n\n if sInd == occ_sInd:\n # wait until expected arrival time is observed\n if time2arrive > 0*u.d:\n TK.advanceToAbsTime(self.occ_arrives)\n if time2arrive > 1*u.d:\n self.GAtime = self.GAtime + time2arrive.to('day')\n\n TK.obsStart = TK.currentTimeNorm.copy().to('day')\n\n self.logger.info(' Observation #%s, target #%s/%s with %s planet(s), mission time: %s'\\\n %(cnt, sInd+1, TL.nStars, len(pInds), TK.obsStart.round(2)))\n self.vprint(' Observation #%s, target #%s/%s with %s planet(s), mission time: %s'\\\n %(cnt, sInd+1, TL.nStars, len(pInds), TK.obsStart.round(2)))\n\n DRM['arrival_time'] = TK.currentTimeNorm.copy().to('day')\n \n if sInd != occ_sInd:\n self.starVisits[sInd] += 1\n # PERFORM DETECTION and populate revisit list attribute.\n # First store fEZ, dMag, WA\n if np.any(pInds):\n DRM['det_fEZ'] = SU.fEZ[pInds].to('1/arcsec2').value.tolist()\n DRM['det_dMag'] = SU.dMag[pInds].tolist()\n DRM['det_WA'] = SU.WA[pInds].to('mas').value.tolist()\n detected, det_fZ, det_systemParams, det_SNR, FA = self.observation_detection(sInd, t_det, det_mode)\n\n if np.any(detected):\n self.sInd_detcounts[sInd] += 1\n self.sInd_dettimes[sInd] = (self.sInd_dettimes.get(sInd) or []) + [TK.currentTimeNorm.copy().to('day')]\n self.vprint(' Det. results are: %s'%(detected))\n\n # update GAtime\n self.GAtime = self.GAtime + t_det.to('day')*self.GA_simult_det_fraction\n\n # populate the DRM with detection results\n DRM['det_time'] = t_det.to('day')\n DRM['det_status'] = detected\n DRM['det_SNR'] = det_SNR\n DRM['det_fZ'] = det_fZ.to('1/arcsec2')\n DRM['det_params'] = det_systemParams\n DRM['FA_det_status'] = int(FA)\n\n det_comp = Comp.comp_per_intTime(t_det, TL, sInd, det_fZ, self.ZodiacalLight.fEZ0, self.WAint[sInd], det_mode)[0]\n DRM['det_comp'] = det_comp\n DRM['det_mode'] = dict(det_mode)\n del DRM['det_mode']['inst'], DRM['det_mode']['syst']\n \n elif sInd == occ_sInd:\n self.last_chard = occ_sInd\n self.occ_starVisits[occ_sInd] += 1\n # PERFORM CHARACTERIZATION and populate spectra list attribute.\n occ_pInds = np.where(SU.plan2star == occ_sInd)[0]\n sInd = occ_sInd\n\n DRM['slew_time'] = self.occ_slewTime.to('day').value\n DRM['slew_angle'] = self.occ_sd.to('deg').value\n slew_mass_used = self.occ_slewTime*Obs.defburnPortion*Obs.flowRate\n DRM['slew_dV'] = (self.occ_slewTime*self.ao*Obs.defburnPortion).to('m/s').value\n DRM['slew_mass_used'] = slew_mass_used.to('kg')\n Obs.scMass = Obs.scMass - slew_mass_used\n DRM['scMass'] = Obs.scMass.to('kg')\n\n self.logger.info(' Starshade and telescope aligned at target star')\n self.vprint(' Starshade and telescope aligned at target star')\n\n # PERFORM CHARACTERIZATION and populate spectra list attribute\n characterized, char_fZ, char_systemParams, char_SNR, char_intTime = \\\n self.observation_characterization(sInd, char_mode)\n if np.any(characterized):\n self.vprint(' Char. results are: %s'%(characterized))\n else:\n # make sure we don't accidnetally double characterize\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + .01*u.d)\n assert char_intTime != 0, \"Integration time can't be 0.\"\n if np.any(occ_pInds):\n DRM['char_fEZ'] = SU.fEZ[occ_pInds].to('1/arcsec2').value.tolist()\n DRM['char_dMag'] = SU.dMag[occ_pInds].tolist()\n DRM['char_WA'] = SU.WA[occ_pInds].to('mas').value.tolist()\n DRM['char_mode'] = dict(char_mode)\n del DRM['char_mode']['inst'], DRM['char_mode']['syst']\n\n # update the occulter wet mass\n if OS.haveOcculter and char_intTime is not None:\n DRM = self.update_occulter_mass(DRM, sInd, char_intTime, 'char')\n char_comp = Comp.comp_per_intTime(char_intTime, TL, occ_sInd, char_fZ, self.ZodiacalLight.fEZ0, self.WAint[occ_sInd], char_mode)[0]\n DRM['char_comp'] = char_comp\n FA = False\n # populate the DRM with characterization results\n DRM['char_time'] = char_intTime.to('day') if char_intTime else 0.*u.day\n #DRM['char_counts'] = self.sInd_charcounts[sInd]\n DRM['char_status'] = characterized[:-1] if FA else characterized\n DRM['char_SNR'] = char_SNR[:-1] if FA else char_SNR\n DRM['char_fZ'] = char_fZ.to('1/arcsec2')\n DRM['char_params'] = char_systemParams\n # populate the DRM with FA results\n DRM['FA_det_status'] = int(FA)\n DRM['FA_char_status'] = characterized[-1] if FA else 0\n DRM['FA_char_SNR'] = char_SNR[-1] if FA else 0.\n DRM['FA_char_fEZ'] = self.lastDetected[sInd,1][-1]/u.arcsec**2 if FA else 0./u.arcsec**2\n DRM['FA_char_dMag'] = self.lastDetected[sInd,2][-1] if FA else 0.\n DRM['FA_char_WA'] = self.lastDetected[sInd,3][-1]*u.arcsec if FA else 0.*u.arcsec\n\n # add star back into the revisit list\n if np.any(characterized):\n char = np.where(characterized)[0]\n pInds = np.where(SU.plan2star == sInd)[0]\n smin = np.min(SU.s[pInds[char]])\n pInd_smin = pInds[np.argmin(SU.s[pInds[char]])]\n\n Ms = TL.MsTrue[sInd]\n sp = smin\n Mp = SU.Mp[pInd_smin]\n mu = const.G*(Mp + Ms)\n T = 2.*np.pi*np.sqrt(sp**3/mu)\n t_rev = TK.currentTimeNorm.copy() + T/2.\n\n self.goal_GAtime = self.GA_percentage * TK.currentTimeNorm.copy().to('day')\n goal_GAdiff = self.goal_GAtime - self.GAtime\n\n # allocate extra time to GA if we are falling behind\n if goal_GAdiff > 1*u.d and TK.currentTimeAbs.copy() < self.occ_arrives:\n GA_diff = min(self.occ_arrives - TK.currentTimeAbs.copy(), goal_GAdiff)\n self.vprint('Allocating time %s to general astrophysics'%(GA_diff))\n self.GAtime = self.GAtime + GA_diff\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + GA_diff)\n # allocate time if there is no target for the starshade\n elif goal_GAdiff > 1*u.d and (self.occ_arrives - TK.currentTimeAbs.copy()) < -5*u.d and not np.any(occ_sInds):\n self.vprint('No Available Occulter Targets: Allocating time %s to general astrophysics'%(goal_GAdiff))\n self.GAtime = self.GAtime + goal_GAdiff\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + goal_GAdiff)\n\n DRM['exoplanetObsTime'] = TK.exoplanetObsTime.copy()\n\n # Append result values to self.DRM\n self.DRM.append(DRM)\n\n # Calculate observation end time\n TK.obsEnd = TK.currentTimeNorm.copy().to('day')\n\n # With prototype TimeKeeping, if no OB duration was specified, advance\n # to the next OB with timestep equivalent to time spent on one target\n if np.isinf(TK.OBduration) and (TK.missionPortion < 1):\n self.arbitrary_time_advancement(TK.currentTimeNorm.to('day').copy() - DRM['arrival_time'])\n \n # With occulter, if spacecraft fuel is depleted, exit loop\n if Obs.scMass < Obs.dryMass:\n self.vprint('Total fuel mass exceeded at %s' %TK.obsEnd.round(2))\n break\n\n else:#sInd == None\n sInd = old_sInd#Retain the last observed star\n if(TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]): # currentTime is at end of OB\n #Conditional Advance To Start of Next OB\n if not TK.mission_is_over(OS, Obs,det_mode):#as long as the mission is not over\n TK.advancetToStartOfNextOB()#Advance To Start of Next OB\n elif(waitTime is not None):\n #CASE 1: Advance specific wait time\n success = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n self.vprint('waitTime is not None')\n else:\n startTimes = TK.currentTimeAbs.copy() + np.zeros(TL.nStars)*u.d # Start Times of Observations\n observableTimes = Obs.calculate_observableTimes(TL,np.arange(TL.nStars),startTimes,self.koMaps,self.koTimes,self.mode)[0]\n #CASE 2 If There are no observable targets for the rest of the mission\n if((observableTimes[(TK.missionFinishAbs.copy().value*u.d > observableTimes.value*u.d)*(observableTimes.value*u.d >= TK.currentTimeAbs.copy().value*u.d)].shape[0]) == 0):#Are there any stars coming out of keepout before end of mission\n self.vprint('No Observable Targets for Remainder of mission at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n #Manually advancing time to mission end\n TK.currentTimeNorm = TK.missionLife\n TK.currentTimeAbs = TK.missionFinishAbs\n else:#CASE 3 nominal wait time if at least 1 target is still in list and observable\n #TODO: ADD ADVANCE TO WHEN FZMIN OCURS\n inds1 = np.arange(TL.nStars)[observableTimes.value*u.d > TK.currentTimeAbs.copy().value*u.d]\n inds2 = np.intersect1d(self.intTimeFilterInds, inds1) #apply intTime filter\n inds3 = self.revisitFilter(inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)) #apply revisit Filter #NOTE this means stars you added to the revisit list \n self.vprint(\"Filtering %d stars from advanceToAbsTime\"%(TL.nStars - len(inds3)))\n oTnowToEnd = observableTimes[inds3]\n if not oTnowToEnd.value.shape[0] == 0: #there is at least one observableTime between now and the end of the mission\n tAbs = np.min(oTnowToEnd)#advance to that observable time\n else:\n tAbs = TK.missionStart + TK.missionLife#advance to end of mission\n tmpcurrentTimeNorm = TK.currentTimeNorm.copy()\n success = TK.advanceToAbsTime(tAbs)#Advance Time to this time OR start of next OB following this time\n self.vprint('No Observable Targets a currentTimeNorm= %.2f Advanced To currentTimeNorm= %.2f'%(tmpcurrentTimeNorm.to('day').value, TK.currentTimeNorm.to('day').value))\n \n\n else:\n dtsim = (time.time()-t0)*u.s\n mission_end = \"Mission complete: no more time available.\\n\"\\\n + \"Simulation duration: %s.\\n\" %dtsim.astype('int')\\\n + \"Results stored in SurveySimulation.DRM (Design Reference Mission).\"\n\n self.logger.info(mission_end)\n self.vprint(mission_end)\n\n return mission_end\n\n def next_target(self, old_sInd, old_occ_sInd, det_modes, char_mode):\n \"\"\"Finds index of next target star and calculates its integration time.\n \n This method chooses the next target star index based on which\n stars are available, their integration time, and maximum completeness.\n Returns None if no target could be found.\n \n Args:\n old_sInd (integer):\n Index of the previous target star for the telescope\n old_occ_sInd (integer):\n Index of the previous target star for the occulter\n det_modes (dict array):\n Selected observing mode for detection\n char_mode (dict):\n Selected observing mode for characterization\n \n Returns:\n DRM (dicts):\n Contains the results of survey simulation\n sInd (integer):\n Index of next target star. Defaults to None.\n occ_sInd (integer):\n Index of next occulter target star. Defaults to None.\n t_det (astropy Quantity):\n Selected star integration time for detection in units of day. \n Defaults to None.\n \n \"\"\"\n \n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n Comp = self.Completeness\n TL = self.TargetList\n Obs = self.Observatory\n TK = self.TimeKeeping\n SU = self.SimulatedUniverse\n \n # Create DRM\n DRM = {}\n \n # selecting appropriate koMap\n occ_koMap = self.koMaps[char_mode['syst']['name']]\n koMap = self.koMaps[det_modes[0]['syst']['name']]\n\n # In case of an occulter, initialize slew time factor\n # (add transit time and reduce starshade mass)\n assert OS.haveOcculter == True\n self.ao = Obs.thrust/Obs.scMass\n\n # Star indices that correspond with the given HIPs numbers for the occulter\n # XXX ToDo: print out HIPs that don't show up in TL\n HIP_sInds = np.where(np.in1d(TL.Name, self.occHIPs))[0]\n if TL.earths_only:\n HIP_sInds = np.union1d(HIP_sInds, self.promoted_stars).astype(int)\n sInd = None\n\n # Now, start to look for available targets\n while not TK.mission_is_over(OS, Obs, det_modes[0]):\n # allocate settling time + overhead time\n tmpCurrentTimeAbs = TK.currentTimeAbs.copy()\n tmpCurrentTimeNorm = TK.currentTimeNorm.copy()\n occ_tmpCurrentTimeAbs = TK.currentTimeAbs.copy()\n occ_tmpCurrentTimeNorm = TK.currentTimeNorm.copy()\n\n # 0 initialize arrays\n slewTimes = np.zeros(TL.nStars)*u.d\n fZs = np.zeros(TL.nStars)/u.arcsec**2\n dV = np.zeros(TL.nStars)*u.m/u.s\n intTimes = np.zeros(TL.nStars)*u.d\n occ_intTimes = np.zeros(TL.nStars)*u.d\n tovisit = np.zeros(TL.nStars, dtype=bool)\n occ_tovisit = np.zeros(TL.nStars, dtype=bool)\n sInds = np.arange(TL.nStars)\n\n # 1 Find spacecraft orbital START positions and filter out unavailable \n # targets. If occulter, each target has its own START position.\n sd = Obs.star_angularSep(TL, old_occ_sInd, sInds, tmpCurrentTimeAbs)\n obsTimes = Obs.calculate_observableTimes(TL, sInds, tmpCurrentTimeAbs, self.koMaps, self.koTimes, char_mode)\n slewTimes = Obs.calculate_slewTimes(TL, old_occ_sInd, sInds, sd, obsTimes, tmpCurrentTimeAbs)\n\n # 2.1 filter out totTimes > integration cutoff\n if len(sInds) > 0:\n occ_sInds = np.intersect1d(self.occ_intTimeFilterInds, sInds)\n if len(sInds) > 0:\n sInds = np.intersect1d(self.intTimeFilterInds, sInds)\n \n # Starttimes based off of slewtime\n occ_startTimes = occ_tmpCurrentTimeAbs.copy() + slewTimes\n occ_startTimesNorm = occ_tmpCurrentTimeNorm.copy() + slewTimes\n\n startTimes = tmpCurrentTimeAbs.copy() + np.zeros(TL.nStars)*u.d\n startTimesNorm = tmpCurrentTimeNorm.copy()\n\n # 2.5 Filter stars not observable at startTimes\n try:\n tmpIndsbool = list()\n for i in np.arange(len(occ_sInds)):\n koTimeInd = np.where(np.round(occ_startTimes[occ_sInds[i]].value) - self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(occ_koMap[occ_sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n sInds_occ_ko = occ_sInds[tmpIndsbool]\n occ_sInds = sInds_occ_ko[np.where(np.in1d(sInds_occ_ko, HIP_sInds))[0]]\n del tmpIndsbool\n except:#If there are no target stars to observe \n sInds_occ_ko = np.asarray([],dtype=int)\n occ_sInds = np.asarray([],dtype=int)\n\n try:\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n koTimeInd = np.where(np.round(startTimes[sInds[i]].value) - self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(koMap[sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except:#If there are no target stars to observe \n sInds = np.asarray([],dtype=int)\n\n # 2.9 Occulter target promotion step\n occ_sInds = self.promote_coro_targets(occ_sInds, sInds_occ_ko)\n\n # 3 Filter out all previously (more-)visited targets, unless in \n # revisit list, with time within some dt of start (+- 1 week)\n if len(sInds.tolist()) > 0:\n sInds = self.revisitFilter(sInds, TK.currentTimeNorm.copy())\n\n # revisit list, with time after start\n if np.any(occ_sInds):\n occ_tovisit[occ_sInds] = (self.occ_starVisits[occ_sInds] == self.occ_starVisits[occ_sInds].min())\n if self.occ_starRevisit.size != 0:\n dt_max = 1.*u.week\n dt_rev = TK.currentTimeNorm.copy() - self.occ_starRevisit[:,1]*u.day\n ind_rev = [int(x) for x in self.occ_starRevisit[dt_rev > 0, 0] if x in occ_sInds]\n occ_tovisit[ind_rev] = True\n occ_sInds = np.where(occ_tovisit)[0]\n\n # 4 calculate integration times for ALL preselected targets, \n # and filter out totTimes > integration cutoff\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, det_modes[0])\n maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife, OS.intCutoff)#Maximum intTime allowed\n\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, char_mode)\n occ_maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife, OS.intCutoff)#Maximum intTime allowed\n\n if len(occ_sInds) > 0:\n if self.int_inflection:\n fEZ = ZL.fEZ0\n WA = self.WAint\n occ_intTimes[occ_sInds] = self.calc_int_inflection(occ_sInds, fEZ, occ_startTimes, WA[occ_sInds], char_mode, ischar=True)\n totTimes = occ_intTimes*char_mode['timeMultiplier']\n occ_endTimes = occ_startTimes + totTimes\n else:\n # characterization_start = occ_startTimes\n occ_intTimes[occ_sInds] = self.calc_targ_intTime(occ_sInds, occ_startTimes[occ_sInds], char_mode) * (1 + self.charMargin)\n\n # Adjust integration time for stars with known earths around them\n for occ_star in occ_sInds:\n if occ_star in self.promoted_stars:\n occ_earths = np.intersect1d(np.where(SU.plan2star == occ_star)[0], self.known_earths).astype(int)\n if np.any(occ_earths):\n fZ = ZL.fZ(Obs, TL, occ_star, occ_startTimes[occ_star], char_mode)\n fEZ = SU.fEZ[occ_earths].to('1/arcsec2').value/u.arcsec**2\n dMag = SU.dMag[occ_earths]\n WA = SU.WA[occ_earths]\n earthlike_inttimes = OS.calc_intTime(TL, occ_star, fZ, fEZ, dMag, WA, char_mode) * (1 + self.charMargin)\n earthlike_inttime = earthlike_inttimes[(earthlike_inttimes < occ_maxIntTime)]\n if len(earthlike_inttime) > 0:\n occ_intTimes[occ_star] = np.max(earthlike_inttime)\n else:\n occ_intTimes[occ_star] = np.max(earthlike_inttimes)\n occ_endTimes = occ_startTimes + (occ_intTimes * char_mode['timeMultiplier']) + Obs.settlingTime + char_mode['syst']['ohTime']\n\n occ_sInds = occ_sInds[(occ_intTimes[occ_sInds] <= occ_maxIntTime)] # Filters targets exceeding maximum intTime\n occ_sInds = occ_sInds[(occ_intTimes[occ_sInds] > 0.0*u.d)] # Filters with an inttime of 0\n \n if occ_maxIntTime.value <= 0:\n occ_sInds = np.asarray([],dtype=int)\n\n if len(sInds.tolist()) > 0:\n intTimes[sInds] = self.calc_targ_intTime(sInds, startTimes[sInds], det_modes[0])\n sInds = sInds[np.where((intTimes[sInds] <= maxIntTime) & (intTimes[sInds] > 0.0*u.d))] # Filters targets exceeding end of OB\n endTimes = startTimes + intTimes\n \n if maxIntTime.value <= 0:\n sInds = np.asarray([],dtype=int)\n \n # 5.2 find spacecraft orbital END positions (for each candidate target), \n # and filter out unavailable targets\n if len(occ_sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try: # endTimes may exist past koTimes so we have an exception to hand this case\n tmpIndsbool = list()\n for i in np.arange(len(occ_sInds)):\n koTimeInd = np.where(np.round(occ_endTimes[occ_sInds[i]].value)-self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(occ_koMap[occ_sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n occ_sInds = occ_sInds[tmpIndsbool]\n del tmpIndsbool\n except:\n occ_sInds = np.asarray([],dtype=int)\n\n if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try: # endTimes may exist past koTimes so we have an exception to hand this case\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n koTimeInd = np.where(np.round(endTimes[sInds[i]].value)-self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\n tmpIndsbool.append(koMap[sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except:\n sInds = np.asarray([],dtype=int)\n\n # 5.3 Filter off current occulter target star from detection list\n if old_occ_sInd is not None:\n sInds = sInds[np.where(sInds != old_occ_sInd)[0]]\n occ_sInds = occ_sInds[np.where(occ_sInds != old_occ_sInd)[0]]\n\n # 6.1 Filter off any stars visited by the occulter 3 or more times\n if np.any(occ_sInds):\n occ_sInds = occ_sInds[np.where(self.occ_starVisits[occ_sInds] < self.occ_max_visits)[0]]\n\n # 6.2 Filter off coronograph stars with > 3 visits and no detections\n no_dets = np.logical_and((self.starVisits[sInds] > self.n_det_remove), (self.sInd_detcounts[sInds] == 0))\n sInds = sInds[np.where(np.invert(no_dets))[0]]\n\n max_dets = np.where(self.sInd_detcounts[sInds] < self.max_successful_dets)[0]\n sInds = sInds[max_dets]\n\n # 7 Filter off cornograph stars with too-long inttimes\n available_time = None\n if self.occ_arrives > TK.currentTimeAbs:\n available_time = self.occ_arrives - TK.currentTimeAbs.copy()\n if np.any(sInds[intTimes[sInds] < available_time]):\n sInds = sInds[intTimes[sInds] < available_time]\n\n # 8 remove occ targets on ignore_stars list\n occ_sInds = np.setdiff1d(occ_sInds, np.intersect1d(occ_sInds, self.ignore_stars))\n\n t_det = 0*u.d\n det_mode = copy.deepcopy(det_modes[0])\n occ_sInd = old_occ_sInd\n\n # 8 Choose best target from remaining\n # if the starshade has arrived at its destination, or it is the first observation\n if np.any(occ_sInds):\n if old_occ_sInd is None or ((TK.currentTimeAbs.copy() + t_det) >= self.occ_arrives and self.ready_to_update):\n occ_sInd = self.choose_next_occulter_target(old_occ_sInd, occ_sInds, occ_intTimes)\n if old_occ_sInd is None:\n self.occ_arrives = TK.currentTimeAbs.copy()\n else:\n self.occ_arrives = occ_startTimes[occ_sInd]\n self.occ_slewTime = slewTimes[occ_sInd]\n self.occ_sd = sd[occ_sInd]\n # if not np.any(sInds):\n # sInd = occ_sInd\n self.ready_to_update = False\n # self.occ_starVisits[occ_sInd] += 1\n elif not np.any(sInds):\n TK.advanceToAbsTime(TK.currentTimeAbs.copy() + 1*u.d)\n continue\n\n if occ_sInd is not None:\n sInds = sInds[np.where(sInds != occ_sInd)[0]]\n\n if np.any(sInds):\n\n # choose sInd of next target\n sInd = self.choose_next_telescope_target(old_sInd, sInds, intTimes[sInds])\n\n # Perform dual band detections if necessary\n if self.WAint[sInd] > det_modes[1]['IWA'] and self.WAint[sInd] < det_modes[1]['OWA']:\n det_mode['BW'] = det_mode['BW'] + det_modes[1]['BW']\n det_mode['inst']['sread'] = det_mode['inst']['sread'] + det_modes[1]['inst']['sread']\n det_mode['inst']['idark'] = det_mode['inst']['idark'] + det_modes[1]['inst']['idark']\n det_mode['inst']['CIC'] = det_mode['inst']['CIC'] + det_modes[1]['inst']['CIC']\n det_mode['syst']['optics'] = np.mean((det_mode['syst']['optics'], det_modes[1]['syst']['optics']))\n det_mode['instName'] = 'combined'\n\n t_det = self.calc_targ_intTime(np.array([sInd]), np.array([startTimes[sInd]]), det_mode)[0]\n\n if t_det > maxIntTime and maxIntTime > 0*u.d:\n t_det = maxIntTime\n if available_time is not None and available_time > 0*u.d:\n if t_det > available_time:\n t_det = available_time.copy().value * u.d\n else:\n sInd = None\n\n # if no observable target, call the TimeKeeping.wait() method\n if not np.any(sInds) and not np.any(occ_sInds):\n self.vprint('No Observable Targets at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n return DRM, None, None, None, None, None, None\n break\n\n else:\n self.logger.info('Mission complete: no more time available')\n self.vprint('Mission complete: no more time available')\n return DRM, None, None, None, None, None, None\n\n if TK.mission_is_over(OS, Obs, det_mode):\n self.logger.info('Mission complete: no more time available')\n self.vprint('Mission complete: no more time available')\n return DRM, None, None, None, None, None, None\n\n occ_earths = np.intersect1d(np.where(SU.plan2star == occ_sInd)[0], self.known_earths).astype(int)\n\n return DRM, sInd, occ_sInd, t_det, sd, occ_sInds, det_mode" ]
[ [ "numpy.intersect1d", "numpy.any", "numpy.asarray", "numpy.argmin", "numpy.logical_and", "numpy.in1d", "numpy.where", "numpy.union1d", "numpy.unique", "numpy.mean", "numpy.round", "numpy.zeros", "numpy.arange", "numpy.hstack", "numpy.max", "numpy.min", "numpy.array", "numpy.invert", "numpy.isinf", "numpy.sqrt" ] ]
turnmanh/scikit-opt
[ "8d9fc314081cc25b767e22a15db4f7ec9d20203b" ]
[ "sko/PSO.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/20\n# @Author : github.com/guofei9987\n\nimport numpy as np\nfrom sko.tools import func_transformer\nfrom .base import SkoBase\nfrom tqdm import trange\n\n\nclass PSO(SkoBase):\n \"\"\"\n Do PSO (Particle swarm optimization) algorithm.\n\n This algorithm was adapted from the earlier works of J. Kennedy and\n R.C. Eberhart in Particle Swarm Optimization [IJCNN1995]_.\n\n The position update can be defined as:\n\n .. math::\n\n x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)\n\n Where the position at the current step :math:`t` is updated using\n the computed velocity at :math:`t+1`. Furthermore, the velocity update\n is defined as:\n\n .. math::\n\n v_{ij}(t + 1) = w * v_{ij}(t) + c_{p}r_{1j}(t)[y_{ij}(t) − x_{ij}(t)]\n + c_{g}r_{2j}(t)[\\hat{y}_{j}(t) − x_{ij}(t)]\n\n Here, :math:`cp` and :math:`cg` are the cognitive and social parameters\n respectively. They control the particle's behavior given two choices: (1) to\n follow its *personal best* or (2) follow the swarm's *global best* position.\n Overall, this dictates if the swarm is explorative or exploitative in nature.\n In addition, a parameter :math:`w` controls the inertia of the swarm's\n movement.\n\n .. [IJCNN1995] J. Kennedy and R.C. Eberhart, \"Particle Swarm Optimization,\"\n Proceedings of the IEEE International Joint Conference on Neural\n Networks, 1995, pp. 1942-1948.\n\n Parameters\n --------------------\n func : function\n The func you want to do optimal\n dim : int\n Number of dimension, which is number of parameters of func.\n pop : int\n Size of population, which is the number of Particles. We use 'pop' to keep accordance with GA\n max_iter : int\n Max of iter iterations\n lb : array_like\n The lower bound of every variables of func\n ub : array_like\n The upper bound of every variables of func\n constraint_eq : tuple\n equal constraint. Note: not available yet.\n constraint_ueq : tuple\n unequal constraint\n Attributes\n ----------------------\n pbest_x : array_like, shape is (pop,dim)\n best location of every particle in history\n pbest_y : array_like, shape is (pop,1)\n best image of every particle in history\n gbest_x : array_like, shape is (1,dim)\n general best location for all particles in history\n gbest_y : float\n general best image for all particles in history\n gbest_y_hist : list\n gbest_y of every iteration\n\n\n Examples\n -----------------------------\n see https://scikit-opt.github.io/scikit-opt/#/en/README?id=_3-psoparticle-swarm-optimization\n \"\"\"\n\n def __init__(self, func, n_dim=None, pop=40, max_iter=150, lb=-1e5, ub=1e5, w=0.8, c1=0.5, c2=0.5,\n constraint_eq=tuple(), constraint_ueq=tuple(), verbose=False\n , dim=None):\n\n n_dim = n_dim or dim # support the earlier version\n\n self.func = func_transformer(func)\n self.w = w # inertia\n self.cp, self.cg = c1, c2 # parameters to control personal best, global best respectively\n self.pop = pop # number of particles\n self.n_dim = n_dim # dimension of particles, which is the number of variables of func\n self.max_iter = max_iter # max iter\n self.verbose = verbose # print the result of each iter or not\n\n self.lb, self.ub = np.array(lb) * np.ones(self.n_dim), np.array(ub) * np.ones(self.n_dim)\n assert self.n_dim == len(self.lb) == len(self.ub), 'dim == len(lb) == len(ub) is not True'\n assert np.all(self.ub > self.lb), 'upper-bound must be greater than lower-bound'\n\n self.has_constraint = bool(constraint_ueq)\n self.constraint_ueq = constraint_ueq\n self.is_feasible = np.array([True] * pop)\n\n self.X = np.random.uniform(low=self.lb, high=self.ub, size=(self.pop, self.n_dim))\n v_high = self.ub - self.lb\n self.V = np.random.uniform(low=-v_high, high=v_high, size=(self.pop, self.n_dim)) # speed of particles\n self.Y = self.cal_y() # y = f(x) for all particles\n self.pbest_x = self.X.copy() # personal best location of every particle in history\n self.pbest_y = np.array([[np.inf]] * pop) # best image of every particle in history\n self.gbest_x = self.pbest_x.mean(axis=0).reshape(1, -1) # global best location for all particles\n self.gbest_y = np.inf # global best y for all particles\n self.gbest_y_hist = [] # gbest_y of every iteration\n self.update_gbest()\n\n # record verbose values\n self.record_mode = False\n self.record_value = {'X': [], 'V': [], 'Y': []}\n self.best_x, self.best_y = self.gbest_x, self.gbest_y # history reasons, will be deprecated\n\n def check_constraint(self, x):\n # gather all unequal constraint functions\n for constraint_func in self.constraint_ueq:\n if constraint_func(x) > 0:\n return False\n return True\n\n def update_V(self):\n r1 = np.random.rand(self.pop, self.n_dim)\n r2 = np.random.rand(self.pop, self.n_dim)\n self.V = self.w * self.V + \\\n self.cp * r1 * (self.pbest_x - self.X) + \\\n self.cg * r2 * (self.gbest_x - self.X)\n\n def update_X(self):\n self.X = self.X + self.V\n self.X = np.clip(self.X, self.lb, self.ub)\n\n def cal_y(self):\n # calculate y for every x in X\n self.Y = self.func(self.X).reshape(-1, 1)\n return self.Y\n\n def update_pbest(self):\n '''\n personal best\n :return:\n '''\n self.need_update = self.pbest_y > self.Y\n for idx, x in enumerate(self.X):\n if self.need_update[idx]:\n self.need_update[idx] = self.check_constraint(x)\n\n self.pbest_x = np.where(self.need_update, self.X, self.pbest_x)\n self.pbest_y = np.where(self.need_update, self.Y, self.pbest_y)\n\n def update_gbest(self):\n '''\n global best\n :return:\n '''\n idx_min = self.pbest_y.argmin()\n if self.gbest_y > self.pbest_y[idx_min]:\n self.gbest_x = self.X[idx_min, :].copy()\n self.gbest_y = self.pbest_y[idx_min]\n\n def recorder(self):\n if not self.record_mode:\n return\n self.record_value['X'].append(self.X)\n self.record_value['V'].append(self.V)\n self.record_value['Y'].append(self.Y)\n\n def run(self, max_iter=None, precision=1e-7, N=20):\n '''\n precision: None or float\n If precision is None, it will run the number of max_iter steps\n If precision is a float, the loop will stop if continuous N difference between pbest less than precision\n N: int\n '''\n self.max_iter = max_iter or self.max_iter\n c = 0\n for iter_num in trange(self.max_iter, desc=\"step \"):\n self.update_V()\n self.recorder()\n self.update_X()\n self.cal_y()\n self.update_pbest()\n self.update_gbest()\n if precision is not None:\n tor_iter = np.amax(self.pbest_y) - np.amin(self.pbest_y)\n if tor_iter < precision:\n c = c + 1\n if c > N:\n break\n else:\n c = 0\n if self.verbose:\n print('Iter: {}, Best fit: {} at {}'.format(iter_num, self.gbest_y, self.gbest_x))\n\n self.gbest_y_hist.append(self.gbest_y)\n self.best_x, self.best_y = self.gbest_x, self.gbest_y\n return self.best_x, self.best_y\n\n fit = run\n" ]
[ [ "numpy.random.uniform", "numpy.ones", "numpy.amax", "numpy.amin", "numpy.all", "numpy.clip", "numpy.random.rand", "numpy.array", "numpy.where" ] ]
yuanz271/PyDSTool
[ "886c143cdd192aea204285f3a1cb4968c763c646" ]
[ "PyDSTool/Toolbox/optimizers/line_search/backtracking_search.py" ]
[ "\n# Matthieu Brucher\n# Last Change : 2007-08-26 19:43\n\nimport numpy\n\nclass BacktrackingSearch(object):\n \"\"\"\n The backtracking algorithm for enforcing Armijo rule\n \"\"\"\n def __init__(self, rho = 0.1, alpha_step = 1., alpha_factor = 0.5, **kwargs):\n \"\"\"\n Can have :\n - a coefficient for the Armijo rule (rho = 0.1)\n - an alpha factor to modulate the step (alpha_step = 1.)\n - an alpha factor < 1 that will decrease the step size until the rule is valid (alpha_factor = 0.5)\n \"\"\"\n self.rho = rho\n self.stepSize = alpha_step\n self.stepFactor = alpha_factor\n\n def __call__(self, origin, function, state, **kwargs):\n \"\"\"\n Tries to find an acceptable candidate\n \"\"\"\n direction = state['direction']\n if 'initial_alpha_step' in state:\n alpha = state['initial_alpha_step']\n else:\n alpha = self.stepSize\n\n f1temp = function(origin)\n gradient = state['gradient']\n while(True):\n ftemp = function(origin + alpha * direction)\n #Armijo rule\n if ftemp <= f1temp + self.rho * alpha * numpy.dot(gradient, direction):\n state['alpha_step'] = alpha\n return origin + alpha * direction\n alpha = alpha * self.stepFactor\n" ]
[ [ "numpy.dot" ] ]
JohanComparat/nbody-npt-functions
[ "a034db4e5a9b2f87dc42eeb6059c4dd280589e4a" ]
[ "bin/bin_SMHMr/MD10_add_Ms_2.py" ]
[ "# overall python packages\nimport glob\nimport astropy.io.fits as fits\nimport os\nimport time\nimport numpy as n\nimport sys \n# specific functions\nfrom scipy.stats import norm\n# dedicated packages\n#import StellarMass\n\nmeanSM= lambda Mh, z : n.log10(Mh * 2. * ( 0.0351 - 0.0247 * z/(1.+z)) / ((Mh/ (10**(11.79 + 1.5 * z/(1.+z))) )**(- 0.9 + 0.5 * z/(1.+z)) + ( Mh /(10**(11.79 + 1.5 * z/(1.+z))) )**(0.67 + 0.2 * z/(1.+z)) ) )\n\nfun = lambda mmm : norm.rvs( loc = mmm, scale = 0.15 )\n\n\ndef create_catalogs_out(fileList, z):\n\t\"\"\"\n\tAdds stellar mass using the Moster et al. 2013 model to the rockstar outputs. \n\t\"\"\"\n\tfor fileName in fileList:\n\t\tt0=time.time()\n\t\toutFile = fileName[:-5]+\"_Ms.fits\"\n\t\thd = fits.open(fileName)\n\t\tmean_SM = meanSM(10**hd[1].data['mvir']/0.6777, z)\n\t\t#print \"mean mgal\", mean_SM\n\t\tMgal_mvir_Mo13 = n.array([fun(el) for el in mean_SM]) # n.array(pool.starmap( fun, mean_SM ))\n\t\t#print \"res mgal\", Mgal_mvir_Mo13\n\t\t#print \"diff mgal - mvir\", n.mean(mean_SM-Mgal_mvir_Mo13) \n\t\t#print \"mean, std magl - mh\",n.mean(mean_SM-Mgal_mvir_Mo13), n.std(mean_SM-Mgal_mvir_Mo13)\n\t\tsel = (hd[1].data['mvir']>0)\n\t\t\n\t\tMgal_mvir_Mo13[sel==False] = n.zeros_like(Mgal_mvir_Mo13[sel==False])\n\t\t\n\t\tcol00 = fits.Column(name='stellar_mass_Mo13_mvir',format='D', unit='logMsun', array = Mgal_mvir_Mo13 )\n\t\tcol01 = fits.Column(name='stellar_mass_reliable', format='L', array = sel )\n\n\t\t#define the table hdu \n\t\tcolArray = []\n\t\tcolArray.append(hd[1].columns[0])\n\t\t# Mvir stellar mass\n\t\tcolArray.append(col00)\n\t\tcolArray.append(col01)\n\n\t\thdu_cols = fits.ColDefs(colArray)\n\t\ttb_hdu = fits.BinTableHDU.from_columns( hdu_cols )\n\n\t\t#define the header\n\t\tprihdr = fits.Header()\n\t\tprihdr['author'] = 'JC'\n\t\tprihdr['SAMfile'] = os.path.basename(fileName)\n\t\tprihdu = fits.PrimaryHDU(header=prihdr)\n\t\t#writes the file\n\t\tthdulist = fits.HDUList([prihdu, tb_hdu])\n\t\tif os.path.isfile(outFile):\n\t\t\tos.system(\"rm \"+outFile)\n\n\t\tthdulist.writeto(outFile)\n\t\tprint( time.time()-t0)\n\n# open the output file_type\nsumm = fits.open(os.path.join(os.environ[\"MD10\"], 'output_MD_1.0Gpc.fits'))[1].data\t\n\nfor ii in range(len(summ))[18:27]:\n\tprint( summ[ii])\n\tfileList = n.array(glob.glob(os.path.join(os.environ[\"MD10\"], 'work_agn', 'out_'+summ['snap_name'][ii]+'_SAM_Nb_?.fits')))\n\t#outFile = fileName[:-5]+\"_Ms.fits\"\n\tz = summ['redshift'][ii]\n\tprint( fileList)\n\tcreate_catalogs_out(fileList, z)\n\n\n" ]
[ [ "scipy.stats.norm.rvs", "numpy.zeros_like", "numpy.log10" ] ]
kellielu/ReAgent
[ "c538992672220453cdc95044def25c4e0691a8b0" ]
[ "reagent/optimizer/soft_update.py" ]
[ "#!/usr/bin/env python3\n\nimport torch\n\n\nclass SoftUpdate(torch.optim.Optimizer):\n def __init__(self, target_params, source_params, tau=0.1):\n \"\"\"\n Perform soft-update on target_params. Soft-update gradually blends\n source_params into target_params with this update equation:\n\n target_param = tau * source_param + (1 - tau) * target_param\n \"\"\"\n target_params = list(target_params)\n source_params = list(source_params)\n\n if len(target_params) != len(source_params):\n raise ValueError(\n \"target and source must have the same number of parameters\"\n )\n\n for t_param, s_param in zip(target_params, source_params):\n if t_param.shape != s_param.shape:\n raise ValueError(\n \"The shape of target parameter doesn't match that of the source\"\n )\n\n params = target_params + source_params\n defaults = dict(\n tau=tau, lr=1.0\n ) # set a dummy learning rate because optimizers are expected to have one\n super().__init__(params, defaults)\n\n for group in self.param_groups:\n tau = group[\"tau\"]\n if tau > 1.0 or tau < 0.0:\n raise ValueError(f\"tau should be in [0.0, 1.0]; got {tau}\")\n\n @classmethod\n def make_optimizer_scheduler(cls, target_params, source_params, tau):\n su = cls(target_params, source_params, tau)\n return {\"optimizer\": su}\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n params = group[\"params\"]\n n = len(params)\n tau = group[\"tau\"]\n for target_param, source_param in zip(params[: n // 2], params[n // 2 :]):\n if target_param is source_param:\n # skip soft-updating when the target network share s the parameter with\n # the network being train.\n continue\n new_param = tau * source_param.data + (1.0 - tau) * target_param.data\n target_param.data.copy_(new_param)\n return loss\n" ]
[ [ "torch.no_grad", "torch.enable_grad" ] ]
snandasena/udacity-dl
[ "7ea13ec7ebd992f1199f43bd5300782436ed71e5" ]
[ "src/direction_of_the_gradient.py" ]
[ "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport pickle\n\n\n# Read in an image\nimage = mpimg.imread('../images/signs_vehicles_xygrad.png')\n\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n\n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # 2) Take the gradient in x and y separately\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0,ksize=sobel_kernel )\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1,ksize=sobel_kernel )\n # 3) Take the absolute value of the x and y gradients\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient\n direction_gradient = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n # 5) Create a binary mask where direction thresholds are met\n binary_output = np.zeros_like(direction_gradient)\n binary_output[(direction_gradient >= thresh[0]) & (direction_gradient <= thresh[1])] = 1\n # 6) Return this mask as your binary_output image\n return binary_output\n\n\n# Run the function\ndir_binary = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))\n# Plot the result\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\nf.tight_layout()\nax1.imshow(image)\nax1.set_title('Original Image', fontsize=50)\nax2.imshow(dir_binary, cmap='gray')\nax2.set_title('Thresholded Grad. Dir.', fontsize=50)\nplt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\nplt.show()" ]
[ [ "numpy.zeros_like", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "numpy.absolute", "matplotlib.image.imread" ] ]
jakedolan443/search-algorithm-visualizer
[ "331c22886ef8017add16bc63a8e75df9643f4fe9" ]
[ "gui.py" ]
[ "import tkinter as tk\nimport tktools\nfrom algs.astar import astar\nfrom algs.dijkstra import dijkstra\nimport numpy as np\nimport threading\nfrom tkinter import filedialog\nimport random\nfrom grid import Grid\n\n\n\nclass Frame(tk.Frame):\n def __init__(self, *args):\n tk.Frame.__init__(self, *args)\n \n def get_root(self):\n return self.master.get_root()\n\nclass Mainframe(tk.Frame):\n def __init__(self, *args):\n tk.Frame.__init__(self, *args)\n self.get_root().mainframe = self\n \n self.images = {}\n self.images['start.png'] = tk.PhotoImage(file=\"images/start.png\")\n self.images['finish.png'] = tk.PhotoImage(file=\"images/finish.png\")\n self.images['wall.png'] = tk.PhotoImage(file=\"images/wall.png\")\n self.images['remove.png'] = tk.PhotoImage(file=\"images/remove.png\")\n \n \n gridframe = Frame(self)\n gridframe.pack(side=tk.RIGHT, anchor='n', expand=True, fill='y')\n menuframe = Frame(self)\n menuframe.pack(side=tk.LEFT, anchor='w', expand=True, fill='y')\n \n self.grid = Grid(gridframe)\n self.grid.pack(anchor='e')\n \n \n \n \n text = tk.Label(menuframe, text=\"\\n Search Algorithm \\nVisualizer\", font='System, 13')\n text.pack(side=tk.TOP)\n hyperlink = tktools.Hyperlink(menuframe, url=\"https://github.com/jakedolan443/search-algorithm-visualizer\", text=\"https://github.com/jakedolan443/\\nsearch-algorithm-visualizer\")\n hyperlink.pack()\n separation = tk.Label(menuframe, text=\"\\n\")\n separation.pack(fill='x')\n \n optionList = ('Dijkstra', 'Astar')\n self.v = tk.StringVar()\n self.v.set(optionList[0])\n self.v.trace(\"w\", self.set_alg)\n algmenu = tk.OptionMenu(menuframe, self.v, *optionList)\n algmenu.config(width=13, font='System, 8')\n algmenu.pack()\n \n status_label = tk.Label(menuframe, textvariable=self.get_root().status_var, font='System, 8')\n status_label.pack()\n \n separation = tk.Label(menuframe, text=\"\\n\")\n separation.pack(fill='x')\n \n action_menu = tk.LabelFrame(menuframe, text=\"Actions\")\n simulate_button = tk.Button(action_menu, text=\"Simulate\", font='System, 10', width=13, command=self.simulate)\n simulate_button.pack()\n reset_button = tk.Button(action_menu, text=\"Reset\", font='System, 10', width=13, command=self.grid.reset)\n reset_button.pack()\n action_menu.pack()\n separation = tk.Label(menuframe, text=\"\\n\")\n separation.pack(fill='x')\n \n togglemenu = tktools.ToggleMenu(menuframe, text=\"Tools\", command=self.set_tool)\n self.tool_menu = togglemenu\n togglemenu.pack()\n togglemenu.add_toggle(\"Start\", self.images['start.png'], \"start\")\n togglemenu.add_toggle(\"Finish\", self.images['finish.png'], \"finish\")\n togglemenu.add_toggle(\"Wall\", self.images['wall.png'], \"wall\")\n togglemenu.add_toggle(\"Remove\", self.images['remove.png'], \"remove\")\n menuframe.bind(\"<Button-1>\", lambda e: togglemenu.raise_all())\n separation = tk.Label(menuframe, text=\"\\n\")\n separation.pack(fill='x')\n \n wall_menu = tk.LabelFrame(menuframe, text=\"Grid\")\n clear_button = tk.Button(wall_menu, text=\"Clear\", font='System, 10', width=13, command=self.grid.draw)\n clear_button.pack()\n random_button = tk.Button(wall_menu, text=\"Random\", font='System, 10', width=13, command=self.grid.random_draw)\n random_button.pack()\n load_blueprint = tk.Button(wall_menu, text=\"Load Blueprint\", font='System, 10', width=13, command=self.import_grid)\n load_blueprint.pack()\n save_blueprint = tk.Button(wall_menu, text=\"Save Blueprint\", font='System, 10', width=13, command=self.export_grid)\n save_blueprint.pack()\n wall_menu.pack()\n \n def get_root(self):\n return self.master.get_root()\n \n def import_grid(self):\n filename = filedialog.askopenfilename(defaultextension=\"*.visgrid\", initialdir=\"templates/\", filetypes=[('Search Algorithm Grid File','*.visgrid')])\n f = open(\"{}\".format(filename), \"rb\")\n data = f.read().decode()\n f.close()\n \n \n metadata = data.split(\"///\\n\")[0]\n self.get_root().options['grid_size'][0], self.get_root().options['grid_size'][1] = int(metadata.split(\"x\")[0]), int(metadata.split(\"x\")[1]) \n \n grid = data.split(\"///\\n\")[1]\n grid = grid.split(\"\\n\")\n for i in range(len(grid)):\n grid[i] = grid[i].split(\",\")\n for j in range(len(grid[i])):\n grid[i][j] = int(grid[i][j])\n \n self.grid.draw(data=grid)\n \n def export_grid(self):\n metadata = \"{}x{}\".format(self.get_root().options['grid_size'][0], self.get_root().options['grid_size'][1])\n grid = self.grid.export_walls()\n grid = \"{}///\\n{}\".format(metadata, grid)\n\n f = filedialog.asksaveasfile(mode='wb', defaultextension=\"*.visgrid\", initialdir=\"templates/\", filetypes=[('Search Algorithm Grid File','*.visgrid')])\n if f is None: \n return\n f.write(grid.encode())\n f.close() \n \n def deselect_all(self):\n self.tool_menu.raise_all()\n \n def set_tool(self, tool):\n self.get_root().mode = tool\n \n def set_alg(self, *args):\n self.get_root().algorithm = self.v.get()\n \n def simulate(self):\n if not self.grid.in_search:\n self.tool_menu.raise_all()\n self.grid.reset()\n grid = self.grid.get()\n grid = np.asarray(grid)\n thread = threading.Thread(target = self.get_root().algorithms[self.get_root().algorithm], args = (self.grid, grid, self.get_root().coord_data['start'], self.get_root().coord_data['finish'], ))\n thread.start()\n \n\n\nclass Menu(tk.Menu):\n def __init__(self, *args):\n tk.Menu.__init__(self, *args)\n self.get_root().menu = self\n\n submenu = tk.Menu(self, tearoff=0)\n \n self.grid_size_var = tk.StringVar()\n submenu.add_radiobutton(label=\"8x8\", command=self._size_update, variable=self.grid_size_var)\n submenu.add_radiobutton(label=\"16x16\", command=self._size_update, variable=self.grid_size_var)\n submenu.add_radiobutton(label=\"32x32\", command=self._size_update, variable=self.grid_size_var)\n submenu.add_radiobutton(label=\"64x64\", command=self._size_update, variable=self.grid_size_var)\n submenu.add_radiobutton(label=\"128x128\", command=self._size_update, variable=self.grid_size_var)\n self.add_cascade(label=\"Grid\", menu=submenu)\n \n submenu = tk.Menu(self, tearoff=0)\n \n self.search_speed_var = tk.StringVar(); self.search_speed_var.set(\"1ms\")\n submenu.add_radiobutton(label=\"Max\", command=self._speed_update, variable=self.search_speed_var)\n submenu.add_radiobutton(label=\"1ms\", command=self._speed_update, variable=self.search_speed_var)\n submenu.add_radiobutton(label=\"10ms\", command=self._speed_update, variable=self.search_speed_var)\n submenu.add_radiobutton(label=\"100ms\", command=self._speed_update, variable=self.search_speed_var)\n\n self.add_cascade(label=\"Speed\", menu=submenu)\n \n def get_root(self):\n return self.master.get_root()\n\n def _speed_update(self):\n if self.search_speed_var.get() == \"Max\":\n self.search_speed_var.set(\"0\")\n self.get_root().options['speed'] = float(self.search_speed_var.get().split(\"ms\")[0])\n\n def _size_update(self):\n self.get_root().options['grid_size'] = (list(map(int, self.grid_size_var.get().split(\"x\"))))\n self.get_root().grid.draw()\n\n\n\n\nclass App(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.geometry(\"980x860\")\n self.title(\"Search Algorithm Visualizer\")\n self.tk_setPalette(background='#DDDDDD')\n self.resizable(False, False)\n \n self.status_var = tk.StringVar(); self.status_var.set(\"\\n\\n\")\n self.algorithm = \"Dijkstra\"\n self.algorithms = {\"Dijkstra\":dijkstra, \"Astar\":astar}\n self.options = {'grid_size':[64, 64], 'speed':0.1}\n \n menu = Menu(self)\n self.config(menu=menu)\n \n self.mode = None\n mainframe = Mainframe(self)\n mainframe.pack(fill='both', expand=True)\n \n \n \n self.mainloop()\n \n def get_root(self):\n return self\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.asarray" ] ]
modichirag/fm_eor
[ "1615fea4e2d617bb6ef00770a49698901227daa8" ]
[ "codes/cosmo4d/pmeshengine.py" ]
[ "from __future__ import absolute_import\n\nimport numpy\nfrom abopt.vmad2 import ZERO, Engine, statement, programme, CodeSegment, Literal\nfrom abopt.abopt2 import VectorSpace\nfrom pmesh.pm import ParticleMesh, RealField, ComplexField\n\ndef nyquist_mask(factor, v):\n # any nyquist modes are set to 0 if the transfer function is complex\n mask = (numpy.imag(factor) == 0) | \\\n ~numpy.bitwise_and.reduce([(ii == 0) | (ii == ni // 2) for ii, ni in zip(v.i, v.Nmesh)])\n return factor * mask\n\nclass ParticleMeshVectorSpace(VectorSpace):\n def __init__(self, pm, q):\n self.qshape = q.shape\n self.pm = pm\n\n def addmul(self, a, b, c, p=1):\n if isinstance(b, RealField):\n r = b.copy()\n r[...] = a + b * c ** p\n return r\n elif isinstance(b, ComplexField):\n r = b.copy()\n if isinstance(c, ComplexField):\n c = c.plain\n if isinstance(a, ComplexField):\n a = a.plain\n r.plain[...] = a + b.plain * c ** p\n return r\n elif numpy.isscalar(b):\n return a + b * c ** p\n elif isinstance(b, numpy.ndarray):\n assert len(b) == self.qshape[0]\n return a + b * c ** p\n else:\n raise TypeError(\"type unknown\")\n\n def dot(self, a, b):\n if type(a) != type(b):\n raise TypeError(\"type mismatch\")\n\n if isinstance(a, RealField):\n return a.cdot(b)\n elif isinstance(a, ComplexField):\n return a.cdot(b)\n elif isinstance(a, numpy.ndarray):\n assert len(a) == len(b)\n assert len(a) == self.qshape[0]\n return self.pm.comm.allreduce(a.dot(b))\n else:\n raise TypeError(\"type unknown\")\n\nclass ParticleMeshEngine(Engine):\n def __init__(self, pm, q=None):\n self.pm = pm\n if q is None:\n q = pm.generate_uniform_particle_grid(shift=0.0, dtype='f4')\n self.q = q\n self.vs = ParticleMeshVectorSpace(self.pm, self.q)\n\n @programme(ain=['s'], aout=['x'])\n def get_x(engine, s, x):\n code = CodeSegment(engine)\n code.add(x1='s', x2=Literal(engine.q), y='x')\n return code\n\n @statement(aout=['real'], ain=['complex'])\n def c2r(engine, real, complex):\n real[...] = complex.c2r()\n\n @c2r.defvjp\n def _(engine, _real, _complex):\n _complex[...] = _real.c2r_vjp()\n\n @c2r.defjvp\n def _(engine, real_, complex_):\n real_[...] = complex_.c2r()\n\n @statement(aout=['complex'], ain=['real'])\n def r2c(engine, complex, real):\n complex[...] = real.r2c()\n\n @r2c.defvjp\n def _(engine, _complex, _real):\n _real[...] = _complex.r2c_vjp()\n\n @r2c.defjvp\n def _(engine, complex_, real_):\n complex_[...] = real_.r2c()\n\n @statement(aout=['complex'], ain=['complex'])\n def decompress(engine, complex):\n return\n\n @decompress.defvjp\n def _(engine, _complex):\n _complex.decompress_vjp(out=Ellipsis)\n\n @decompress.defjvp\n def _(engine, complex_):\n pass # XXX: is this correct?\n\n @staticmethod\n def _lowpass_filter(k, v, Neff):\n k0s = 2 * numpy.pi / v.BoxSize\n mask = numpy.bitwise_and.reduce([abs(ki) <= Neff//2 * k0 for ki, k0 in zip(k, k0s)])\n return v * mask\n\n @statement(aout=['real'], ain=['real'])\n def lowpass(engine, real, Neff):\n real.r2c(out=Ellipsis).apply(\n lambda k, v: engine._lowpass_filter(k, v, Neff),\n out=Ellipsis).c2r(out=Ellipsis)\n\n @lowpass.defvjp\n def _(engine, _real, Neff):\n _real.c2r_vjp().apply(\n lambda k, v: engine._lowpass_filter(k, v, Neff),\n out=Ellipsis).r2c_vjp(out=Ellipsis)\n\n @lowpass.defjvp\n def _(engine, real_, Neff):\n real_.r2c().apply(\n lambda k, v: engine._lowpass_filter(k, v, Neff),\n out=Ellipsis).c2r(out=Ellipsis)\n\n @statement(aout=['layout'], ain=['x'])\n def decompose(engine, layout, x):\n pm = engine.pm\n layout[...] = pm.decompose(x)\n\n @decompose.defvjp\n def _(engine, _layout, _x):\n _x[...] = ZERO\n\n @decompose.defjvp\n def _(engine, layout_, x_):\n layout_[...] = ZERO\n\n @statement(aout=['mesh'], ain=['x', 'layout', 'mass'])\n def paint(engine, x, mesh, layout, mass=Literal(1.0)):\n pm = engine.pm\n N = pm.comm.allreduce(len(x))\n mesh[...] = pm.paint(x, mass=mass, layout=layout, hold=False)\n # to have 1 + \\delta on the mesh\n mesh[...][...] *= 1.0 * pm.Nmesh.prod() / N\n\n @paint.defvjp\n def _(engine, _x, _mesh, x, mass, _mass, layout, _layout):\n pm = engine.pm\n _layout[...] = ZERO\n N = pm.comm.allreduce(len(x))\n _x[...], _mass[...] = pm.paint_vjp(_mesh, x, layout=layout, mass=mass)\n _x[...][...] *= 1.0 * pm.Nmesh.prod() / N\n _mass[...][...] *= 1.0 * pm.Nmesh.prod() / N\n\n @paint.defjvp\n def _(engine, x_, mesh_, x, layout, layout_, mass, mass_):\n pm = engine.pm\n if x_ is ZERO: x_ = None\n if mass_ is ZERO: mass_ = None # force cast it to a scale 0\n mesh_[...] = pm.paint_jvp(x, v_mass=mass_, mass=mass, v_pos=x_, layout=layout)\n\n @statement(aout=['mesh'], ain=['x', 'layout', 'mass'])\n def paintdirect(engine, x, mesh, layout, mass=Literal(1.0)):\n pm = engine.pm\n N = pm.comm.allreduce(len(x))\n mesh[...] = pm.paint(x, mass=mass, layout=layout, hold=False)\n\n @paintdirect.defvjp\n def _(engine, _x, _mesh, x, mass, _mass, layout, _layout):\n pm = engine.pm\n _layout[...] = ZERO\n N = pm.comm.allreduce(len(x))\n _x[...], _mass[...] = pm.paint_vjp(_mesh, x, layout=layout, mass=mass)\n\n @paintdirect.defjvp\n def _(engine, x_, mesh_, x, layout, layout_, mass, mass_):\n pm = engine.pm\n if x_ is ZERO: x_ = None\n if mass_ is ZERO: mass_ = None # force cast it to a scale 0\n mesh_[...] = pm.paint_jvp(x, v_mass=mass_, mass=mass, v_pos=x_, layout=layout)\n\n\n @statement(aout=['value'], ain=['x', 'mesh', 'layout'])\n def readout(engine, value, x, mesh, layout, resampler=None):\n pm = engine.pm\n N = pm.comm.allreduce(len(x))\n value[...] = mesh.readout(x, layout=layout, resampler=resampler)\n\n @readout.defvjp\n def _(engine, _value, _x, _mesh, x, layout, mesh, resampler):\n pm = engine.pm\n _mesh[...], _x[...] = mesh.readout_vjp(x, _value, layout=layout, resampler=resampler)\n\n @readout.defjvp\n def _(engine, value_, x_, mesh_, x, layout, mesh, layout_, resampler):\n pm = engine.pm\n if mesh_ is ZERO: mesh_ = None\n if x_ is ZERO: x_ = None\n value_[...] = mesh.readout_jvp(x, v_self=mesh_, v_pos=x_, layout=layout, resampler=resampler)\n\n @statement(aout=['complex'], ain=['complex'])\n def transfer(engine, complex, tf):\n complex.apply(lambda k, v: nyquist_mask(tf(k), v) * v, out=Ellipsis)\n \n @transfer.defvjp\n def _(engine, tf, _complex):\n _complex.apply(lambda k, v: nyquist_mask(numpy.conj(tf(k)), v) * v, out=Ellipsis)\n\n @transfer.defjvp\n def _(engine, tf, complex_):\n complex_.apply(lambda k, v: nyquist_mask(tf(k), v) * v, out=Ellipsis)\n\n @statement(aout=['residual'], ain=['model'])\n def residual(engine, model, data, sigma, residual):\n \"\"\"\n residual = (model - data) / sigma\n\n J = 1 / sigma\n \"\"\"\n residual[...] = (model - data) / sigma\n\n @residual.defvjp\n def _(engine, _model, _residual, data, sigma):\n _model[...] = _residual / sigma\n\n @residual.defjvp\n def _(engine, model_, residual_, data, sigma):\n residual_[...] = model_ / sigma\n\n# @statement(ain=['vec'], aout=['scalar'])\n# def vec1_to_scalar(engine, vec1, scalar):\n# tmp = \n#\n# @vec1_to_scalar.defvjp\n# def _(engine, _attribute, _value, dim):\n# _value[...] = _attribute[..., dim]\n#\n# @vec1_to_scalar.defjvp\n# def _(engine, attribute_, value_, dim):\n# attribute_[..., dim] = value_\n#\n @statement(ain=['attribute', 'value'], aout=['attribute'])\n def assign_component(engine, attribute, value, dim):\n attribute[..., dim] = value\n\n @assign_component.defvjp\n def _(engine, _attribute, _value, dim):\n _value[...] = _attribute[..., dim]\n\n @assign_component.defjvp\n def _(engine, attribute_, value_, dim):\n attribute_[..., dim] = value_\n\n @statement(ain=['attribute', 'value'], aout=['attribute'])\n def assign_chunk(engine, attribute, value, start, end):\n attribute[..., start:end] = value\n\n @assign_chunk.defvjp\n def _(engine, _attribute, _value, start, end):\n _value[...] = _attribute[..., start:end]\n\n @assign_chunk.defjvp\n def _(engine, attribute_, value_, start, end):\n attribute_[..., start:end] = value_\n\n @statement(ain=['x'], aout=['y'])\n def assign(engine, x, y):\n y[...] = x.copy()\n\n @assign.defvjp\n def _(engine, _y, _x):\n _x[...] = _y\n\n @assign.defjvp\n def _(engine, y_, x_, x):\n try:\n y_[...] = x.copy()\n y_[...][...] = x_\n except:\n y_[...] = x_\n\n @statement(ain=['x1', 'x2'], aout=['y'])\n def add(engine, x1, x2, y):\n y[...] = x1 + x2\n\n @add.defvjp\n def _(engine, _y, _x1, _x2):\n _x1[...] = _y\n _x2[...] = _y\n\n @add.defjvp\n def _(engine, y_, x1_, x2_):\n y_[...] = x1_ + x2_\n\n @statement(aout=['y'], ain=['x1', 'x2'])\n def multiply(engine, x1, x2, y):\n y[...] = x1 * x2\n\n @multiply.defvjp\n def _(engine, _x1, _x2, _y, x1, x2):\n _x1[...] = _y * x2\n _x2[...] = _y * x1\n\n @multiply.defjvp\n def _(engine, x1_, x2_, y_, x1, x2):\n y_[...] = x1_ * x2 + x1 * x2_\n\n\n @statement(aout=['y'], ain=['x1', 'x2'])\n def divide(engine, x1, x2, y):\n y[...] = x1 / x2\n\n @divide.defvjp\n def _(engine, _x1, _x2, _y, x1, x2):\n _x1[...] = _y / x2\n _x2[...] = _y * x1 / x2**2 *-1\n\n @divide.defjvp\n def _(engine, x1_, x2_, y_, x1, x2):\n y_[...] = x1_ / x2 - x1 / x2**2 * x2_\n\n\n @statement(aout=['y'], ain=['x'])\n def matrix_cmul(engine, x, y, W):\n y[...] = numpy.dot(x, W)\n\n @matrix_cmul.defvjp\n def _(engine, _x, _y, W):\n _x[...] = numpy.dot(_y, W.T)\n\n @matrix_cmul.defjvp\n def _(engine, x_, y_, W):\n y_[...] = numpy.dot(x_, W)\n\n\n @statement(ain=['x'], aout=['y'])\n def to_scalar(engine, x, y):\n if isinstance(x, RealField):\n y[...] = x.cnorm()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-2 norm of complex is not a good idea, because the gradient propagation is ambiguous\")\n else:\n y[...] = engine.pm.comm.allreduce((x[...] ** 2).sum(dtype='f8'))\n\n @to_scalar.defvjp\n def _(engine, _y, _x, x):\n _x[...] = x * (2 * _y)\n\n @to_scalar.defjvp\n def _(engine, y_, x_, x):\n if isinstance(x, RealField):\n y_[...] = x.cdot(x_) * 2\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-2 norm of complex is not a good idea, because the gradient propagation is ambiguous\")\n else:\n y_[...] = engine.pm.comm.allreduce((x * x_).sum(dtype='f8')) * 2\n\n\n @statement(ain=['x'], aout=['y'])\n def L1norm(engine, x, y):\n if isinstance(x, RealField):\n y[...] = abs(x).csum()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-1 norm of complex is not a good idea\")\n else:\n y[...] = engine.pm.comm.allreduce(abs(x[...]).sum(dtype='f8'))\n\n @L1norm.defvjp\n def _(engine, _y, _x, x):\n _x[...] = x.copy()\n _x[...][...] = _y * numpy.sign(x)\n #print(type(_y), type(numpy.sign(x)), type(_y * numpy.sign(x)))\n\n @L1norm.defjvp\n def _(engine, y_, x_, x):\n if isinstance(x, RealField):\n y_[...] = ((x_) * numpy.sign(x)).csum()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-1 norm of complex is not a good idea, because the gradient propagation is ambiguous\")\n else:\n y_[...] = engine.pm.comm.allreduce((numpy.sign(x) * x_).sum(dtype='f8')) \n #y_[...] = engine.pm.comm.allreduce((x_).sum(dtype='f8')) \n\n\n\n @statement(ain=['x'], aout=['y'])\n def total(engine, x, y):\n if isinstance(x, RealField):\n y[...] = x.csum()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the total of complex is not a good idea\")\n else:\n y[...] = engine.pm.comm.allreduce((x[...]).sum(dtype='f8'))\n\n @total.defvjp\n def _(engine, _y, _x, x):\n _x[...] = x.copy()\n _x[...][...] = _y \n #print(type(_y), type(numpy.sign(x)), type(_y * numpy.sign(x)))\n\n @total.defjvp\n def _(engine, y_, x_, x):\n if isinstance(x, RealField):\n y_[...] = ((x_) ).csum()\n elif isinstance(x, ComplexField):\n raise TypeError(\"Computing the L-1 norm of complex is not a good idea, because the gradient propagation is ambiguous\")\n else:\n y_[...] = engine.pm.comm.allreduce((x_).sum(dtype='f8')) \n #y_[...] = engine.pm.comm.allreduce((x_).sum(dtype='f8')) \n\n\ndef check_grad(code, yname, xname, init, eps, rtol, atol=1e-12, verbose=False, toscalar=True):\n from numpy.testing import assert_allclose\n engine = code.engine\n comm = engine.pm.comm\n if isinstance(init[xname], numpy.ndarray):\n x = init[xname]\n if x.ndim == 2:\n cshape = engine.pm.comm.allreduce(x.shape[0]), x.shape[1]\n else:\n cshape = engine.pm.comm.allreduce(x.shape[0]),\n\n def cperturb(pos, ind, eps):\n pos = pos.copy()\n start = sum(comm.allgather(pos.shape[0])[:comm.rank])\n end = sum(comm.allgather(pos.shape[0])[:comm.rank + 1])\n if ind[0] >= start and ind[0] < end:\n ind1 = tuple([ind[i] - start if i == 0 else ind[i] for i in range(len(ind))])\n old = pos[ind1]\n coord = pos[ind[0]-start].copy()\n pos[ind1] = old + eps\n new = pos[ind1]\n else:\n old, new, coord = 0, 0, 0\n diff = comm.allreduce(new - old)\n return pos\n\n def cget(pos, ind):\n if pos is ZERO: return 0\n start = sum(comm.allgather(pos.shape[0])[:comm.rank])\n end = sum(comm.allgather(pos.shape[0])[:comm.rank + 1])\n if ind[0] >= start and ind[0] < end:\n ind1 = tuple([ind[i] - start if i == 0 else ind[i] for i in range(len(ind))])\n old = pos[ind1]\n else:\n old = 0\n return comm.allreduce(old)\n\n elif isinstance(init[xname], RealField):\n cshape = init[xname].cshape\n def cget(real, index):\n if real is ZERO: return 0\n return real.cgetitem(index)\n\n def cperturb(real, index, eps):\n old = real.cgetitem(index)\n r1 = real.copy()\n r1.csetitem(index, old + eps)\n return r1\n\n code = code.copy()\n if toscalar:\n code.to_scalar(x=yname, y='y')\n else:\n code.assign(x=yname, y='y')\n\n y, tape = code.compute('y', init=init, return_tape=True)\n vjp = tape.get_vjp()\n jvp = tape.get_jvp()\n\n _x = vjp.compute('_' + xname, init={'_y' : 1.0})\n\n center = init[xname]\n init2 = init.copy()\n ng_bg = []\n fg_bg = []\n for index in numpy.ndindex(*cshape):\n x1 = cperturb(center, index, eps)\n x0 = cperturb(center, index, -eps)\n analytic = cget(_x, index)\n init2[xname] = x1\n y1 = code.compute('y', init2)\n init2[xname] = x0\n y0 = code.compute('y', init2)\n\n base = (x1 - x0)\n y_ = jvp.compute('y_', init={xname + '_': base})\n\n #logger.DEBUG(\"CHECKGRAD: %s\" % (y1, y0, y1 - y0, get_pos(code.engine, _x, index) * 2 * eps))\n if verbose:\n print(\"CHECKGRAD: \", index, (x1 - x0)[...].max(), y, y1 - y0, y_, cget(_x, index) * 2 * eps)\n\n fg_bg.append([index, y_, cget(_x, index) * 2 * eps])\n\n ng_bg.append([index, y1 - y0, cget(_x, index) * 2 * eps])\n\n fg_bg = numpy.array(fg_bg, dtype='O')\n ng_bg = numpy.array(ng_bg, dtype='O')\n\n def errorstat(stat, rtol, atol):\n g1 = numpy.array([a[1] for a in stat])\n g2 = numpy.array([a[2] for a in stat])\n\n ag1 = abs(g1) + (abs(g1) == 0) * numpy.std(g1)\n ag2 = abs(g2) + (abs(g2) == 0) * numpy.std(g2)\n sig = (g1 - g2) / ((ag1 + ag2) * rtol + atol)\n bins = [-100, -50, -20, -1, 1, 20, 50, 100]\n d = numpy.digitize(sig, bins)\n return d\n\n d1 = errorstat(fg_bg, rtol, atol)\n\n d2 = errorstat(ng_bg, rtol * 10, atol)\n\n \n\n\n if (d1 != 4).any():\n print('ngbg = ', ng_bg)\n print('fgbg = ', fg_bg)\n #print('ngbg = ' ng_bg)\n raise AssertionError(\"FG_BG Bad gradients: %s \" % numpy.bincount(d1))\n\n\n if (d2 != 4).any():\n raise AssertionError(\"NG_BG Bad gradients: %s \" % numpy.bincount(d2))\n\n" ]
[ [ "numpy.sign", "numpy.bincount", "numpy.imag", "numpy.array", "numpy.std", "numpy.dot", "numpy.isscalar", "numpy.digitize", "numpy.ndindex" ] ]
thisisiron/nmt-attention-tf2
[ "ddcd3b4ed2a20d5a7a1eeac3292abb5e39a95bde" ]
[ "run.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport json\nimport datetime as dt\nimport time\n\nfrom argparse import ArgumentParser, Namespace\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf # TF 2.0\n\nfrom utils import load_dataset, load_vocab, convert_vocab, select_optimizer, loss_function\nfrom model import Encoder, Decoder, AttentionLayer\n\n\ndef test(args: Namespace):\n cfg = json.load(open(args.config_path, 'r', encoding='UTF-8'))\n\n batch_size = 1 # for predicting one sentence.\n\n encoder = Encoder(cfg['vocab_input_size'], cfg['embedding_dim'], cfg['units'], batch_size, 0)\n decoder = Decoder(cfg['vocab_target_size'], cfg['embedding_dim'], cfg['units'], cfg['method'], batch_size, 0)\n optimizer = select_optimizer(cfg['optimizer'], cfg['learning_rate'])\n\n ckpt = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder)\n manager = tf.train.CheckpointManager(ckpt, cfg['checkpoint_dir'], max_to_keep=3)\n ckpt.restore(manager.latest_checkpoint)\n\n while True:\n sentence = input('Input Sentence or If you want to quit, type Enter Key : ')\n\n if sentence == '':\n break\n\n sentence = re.sub(r\"(\\.\\.\\.|[?.!,¿])\", r\" \\1 \", sentence)\n sentence = re.sub(r'[\" \"]+', \" \", sentence)\n\n sentence = '<s> ' + sentence.lower().strip() + ' </s>'\n\n input_vocab = load_vocab('./data/', 'en')\n target_vocab = load_vocab('./data/', 'de')\n\n input_lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='', oov_token='<unk>')\n input_lang_tokenizer.word_index = input_vocab\n\n target_lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='', oov_token='<unk>')\n target_lang_tokenizer.word_index = target_vocab\n\n convert_vocab(input_lang_tokenizer, input_vocab)\n convert_vocab(target_lang_tokenizer, target_vocab)\n\n inputs = [input_lang_tokenizer.word_index[i] if i in input_lang_tokenizer.word_index else input_lang_tokenizer.word_index['<unk>'] for i in sentence.split(' ')]\n inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],\n maxlen=cfg['max_len_input'],\n padding='post')\n\n inputs = tf.convert_to_tensor(inputs)\n\n result = ''\n\n enc_hidden = encoder.initialize_hidden_state()\n enc_cell = encoder.initialize_cell_state()\n enc_state = [[enc_hidden, enc_cell], [enc_hidden, enc_cell], [enc_hidden, enc_cell], [enc_hidden, enc_cell]]\n\n enc_output, enc_hidden = encoder(inputs, enc_state)\n\n dec_hidden = enc_hidden\n #dec_input = tf.expand_dims([target_lang_tokenizer.word_index['<eos>']], 0)\n dec_input = tf.expand_dims([target_lang_tokenizer.word_index['<s>']], 1)\n\n print('dec_input:', dec_input)\n\n h_t = tf.zeros((batch_size, 1, cfg['embedding_dim']))\n\n for t in range(int(cfg['max_len_target'])):\n predictions, dec_hidden, h_t = decoder(dec_input,\n dec_hidden,\n enc_output,\n h_t)\n\n # predeictions shape == (1, 50002)\n\n predicted_id = tf.argmax(predictions[0]).numpy()\n print('predicted_id', predicted_id)\n\n result += target_lang_tokenizer.index_word[predicted_id] + ' '\n\n if target_lang_tokenizer.index_word[predicted_id] == '</s>':\n print('Early stopping')\n break\n\n dec_input = tf.expand_dims([predicted_id], 1)\n print('dec_input:', dec_input)\n\n print('<s> ' + result)\n print(sentence)\n sys.stdout.flush()\n\n\ndef train(args: Namespace):\n input_tensor, target_tensor, input_lang_tokenizer, target_lang_tokenizer = load_dataset('./data/', args.max_len, limit_size=None)\n\n max_len_input = len(input_tensor[0])\n max_len_target = len(target_tensor[0])\n\n print('max len of each seq:', max_len_input, ',', max_len_target)\n\n input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=args.dev_split)\n\n # init hyperparameter\n EPOCHS = args.epoch\n batch_size = args.batch_size\n steps_per_epoch = len(input_tensor_train) // batch_size\n embedding_dim = args.embedding_dim\n units = args.units\n vocab_input_size = len(input_lang_tokenizer.word_index) + 1\n vocab_target_size = len(target_lang_tokenizer.word_index) + 1\n BUFFER_SIZE = len(input_tensor_train)\n learning_rate = args.learning_rate\n\n setattr(args, 'max_len_input', max_len_input)\n setattr(args, 'max_len_target', max_len_target)\n\n setattr(args, 'steps_per_epoch', steps_per_epoch)\n setattr(args, 'vocab_input_size', vocab_input_size)\n setattr(args, 'vocab_target_size', vocab_target_size)\n setattr(args, 'BUFFER_SIZE', BUFFER_SIZE)\n\n dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)\n dataset = dataset.batch(batch_size)\n\n print('dataset shape (batch_size, max_len):', dataset)\n \n encoder = Encoder(vocab_input_size, embedding_dim, units, batch_size, args.dropout)\n decoder = Decoder(vocab_target_size, embedding_dim, units, args.method, batch_size, args.dropout)\n\n optimizer = select_optimizer(args.optimizer, learning_rate)\n\n loss_object = tf.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')\n\n @tf.function\n def train_step(_input, _target, enc_state):\n loss = 0\n\n with tf.GradientTape() as tape:\n enc_output, enc_state = encoder(_input, enc_state)\n\n dec_hidden = enc_state\n\n dec_input = tf.expand_dims([target_lang_tokenizer.word_index['<s>']] * batch_size, 1)\n\n # First input feeding definition\n h_t = tf.zeros((batch_size, 1, embedding_dim))\n\n for idx in range(1, _target.shape[1]):\n # idx means target character index.\n predictions, dec_hidden, h_t = decoder(dec_input,\n dec_hidden,\n enc_output,\n h_t)\n\n # tf.print(tf.argmax(predictions, axis=1))\n\n loss += loss_function(loss_object, _target[:, idx], predictions)\n\n dec_input = tf.expand_dims(_target[:, idx], 1)\n\n batch_loss = (loss / int(_target.shape[1]))\n\n variables = encoder.trainable_variables + decoder.trainable_variables\n\n gradients = tape.gradient(loss, variables)\n\n optimizer.apply_gradients(zip(gradients, variables))\n\n return batch_loss\n\n # Setting checkpoint\n now_time = dt.datetime.now().strftime(\"%m%d%H%M\")\n checkpoint_dir = './training_checkpoints/' + now_time\n setattr(args, 'checkpoint_dir', checkpoint_dir) \n checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\n checkpoint = tf.train.Checkpoint(optimizer=optimizer,\n encoder=encoder,\n decoder=decoder)\n \n os.makedirs(checkpoint_dir, exist_ok=True)\n\n # saving information of the model\n with open('{}/config.json'.format(checkpoint_dir), 'w', encoding='UTF-8') as fout:\n json.dump(vars(args), fout, indent=2, sort_keys=True)\n \n min_total_loss = 1000\n\n for epoch in range(EPOCHS):\n start = time.time()\n\n enc_hidden = encoder.initialize_hidden_state()\n enc_cell = encoder.initialize_cell_state()\n enc_state = [[enc_hidden, enc_cell], [enc_hidden, enc_cell], [enc_hidden, enc_cell], [enc_hidden, enc_cell]]\n\n total_loss = 0\n\n for(batch, (_input, _target)) in enumerate(dataset.take(steps_per_epoch)):\n batch_loss = train_step(_input, _target, enc_state)\n total_loss += batch_loss\n\n if batch % 10 == 0:\n print('Epoch {}/{} Batch {}/{} Loss {:.4f}'.format(epoch + 1,\n EPOCHS,\n batch + 10,\n steps_per_epoch,\n batch_loss.numpy()))\n\n print('Epoch {}/{} Total Loss per epoch {:.4f} - {} sec'.format(epoch + 1,\n EPOCHS,\n total_loss / steps_per_epoch,\n time.time() - start))\n\n # saving checkpoint\n if min_total_loss > total_loss / steps_per_epoch:\n print('Saving checkpoint...')\n min_total_loss = total_loss / steps_per_epoch\n checkpoint.save(file_prefix=checkpoint_prefix)\n\n print('\\n')\n\n\ndef main():\n pass\n\n\nif __name__=='__main__':\n main()\n" ]
[ [ "tensorflow.zeros", "tensorflow.losses.SparseCategoricalCrossentropy", "tensorflow.expand_dims", "tensorflow.GradientTape", "tensorflow.keras.preprocessing.sequence.pad_sequences", "tensorflow.convert_to_tensor", "tensorflow.train.CheckpointManager", "tensorflow.keras.preprocessing.text.Tokenizer", "tensorflow.argmax", "tensorflow.train.Checkpoint", "tensorflow.data.Dataset.from_tensor_slices", "sklearn.model_selection.train_test_split" ] ]
DaceT/dask-sql
[ "c545f2bf9a786b0e9ff7f68c90da4dcc39cdcd73" ]
[ "dask_sql/physical/rel/logical/sample.py" ]
[ "import logging\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom dask_sql.datacontainer import DataContainer\nfrom dask_sql.physical.rel.base import BaseRelPlugin\n\nif TYPE_CHECKING:\n import dask_sql\n from dask_sql.java import org\n\nlogger = logging.getLogger(__name__)\n\n\nclass SamplePlugin(BaseRelPlugin):\n \"\"\"\n Sample is used on TABLESAMPLE clauses.\n It returns only a fraction of the table, given by the\n number in the arguments.\n There exist two algorithms, SYSTEM or BERNOULLI.\n\n SYSTEM is a very fast algorithm, which works on partition\n level: a partition is kept with a probability given by the\n percentage. This algorithm will - especially for very small\n numbers of partitions - give wrong results. Only choose\n it when you really have too much data to apply BERNOULLI\n (which might never be the case in real world applications).\n\n BERNOULLI samples each row separately and will still\n give only an approximate fraction, but much closer to\n the expected.\n \"\"\"\n\n class_name = \"org.apache.calcite.rel.core.Sample\"\n\n def convert(\n self, rel: \"org.apache.calcite.rel.RelNode\", context: \"dask_sql.Context\"\n ) -> DataContainer:\n (dc,) = self.assert_inputs(rel, 1, context)\n df = dc.df\n cc = dc.column_container\n\n parameters = rel.getSamplingParameters()\n is_bernoulli = parameters.isBernoulli()\n fraction = float(parameters.getSamplingPercentage())\n seed = parameters.getRepeatableSeed() if parameters.isRepeatable() else None\n\n if is_bernoulli:\n df = df.sample(frac=fraction, replace=False, random_state=seed)\n else:\n random_state = np.random.RandomState(seed)\n random_choice = random_state.choice(\n [True, False],\n size=df.npartitions,\n replace=True,\n p=[fraction, 1 - fraction],\n )\n\n if random_choice.any():\n df = df.partitions[random_choice]\n else:\n df = df.head(0, compute=False)\n\n return DataContainer(df, cc)\n" ]
[ [ "numpy.random.RandomState" ] ]
ibaiGorordo/depthai-experiments
[ "cde67e277120ddac815cbad6360695759cca900f" ]
[ "gen2-efficientnet-classification/main.py" ]
[ "#!/usr/bin/env python3\nimport argparse\nfrom pathlib import Path\nimport sys\n\nimport blobconverter\nimport cv2\nimport depthai as dai\nimport numpy as np\nfrom classes import class_names\n\n# Get Argument First\nparser = argparse.ArgumentParser()\nparser.add_argument('-nd', '--no-debug', action=\"store_true\", help=\"Prevent debug output\")\nparser.add_argument('-cam', '--camera', action=\"store_true\", help=\"Use DepthAI 4K RGB camera for inference (conflicts with -vid)\")\nparser.add_argument('-vid', '--video', type=str, help=\"Path to video file to be used for inference (conflicts with -cam)\")\nargs = parser.parse_args()\n\n\n\n# NOTE: video must be of size 224 x 224. We will resize this on the\n# host, but you could also use ImageManip node to do it on device\n\n# Link video in with the detection network\n \nif not args.camera and not args.video:\n raise RuntimeError(\"No source selected. Please use either \\\"-cam\\\" to use RGB camera as a source or \\\"-vid <path>\\\" to run on video\")\n\ndebug = not args.no_debug\ncamera = not args.video\nlabels = class_names()\n\n\n# Start defining a pipeline\npipeline = dai.Pipeline()\n\n# NeuralNetwork\nprint(\"Creating Neural Network...\")\ndetection_nn = pipeline.create(dai.node.NeuralNetwork)\ndetection_nn.setBlobPath(blobconverter.from_zoo(name=\"efficientnet-b0\"))\n\nif camera:\n print(\"Creating Color Camera...\")\n cam_rgb = pipeline.create(dai.node.ColorCamera)\n cam_rgb.setPreviewSize(224,224)\n cam_rgb.setInterleaved(False)\n cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)\n cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)\n cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)\n\n cam_xout = pipeline.create(dai.node.XLinkOut)\n cam_xout.setStreamName(\"rgb\")\n cam_rgb.preview.link(cam_xout.input)\n cam_rgb.preview.link(detection_nn.input)\nelse:\n face_in = pipeline.create(dai.node.XLinkIn)\n face_in.setStreamName(\"in_nn\")\n face_in.out.link(detection_nn.input)\n\n# Create outputs\nxout_nn = pipeline.create(dai.node.XLinkOut)\nxout_nn.setStreamName(\"nn\")\ndetection_nn.out.link(xout_nn.input)\n\nframe = None\nbboxes = []\n\n\ndef to_tensor_result(packet):\n return {\n tensor.name: np.array(packet.getLayerFp16(tensor.name)).reshape(tensor.dims)\n for tensor in packet.getRaw().tensors\n }\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\n\ndef to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray:\n resized = cv2.resize(arr, shape)\n return resized.transpose(2, 0, 1)\n\n\n# Pipeline defined, now the device is assigned and pipeline is started\nwith dai.Device(pipeline) as device:\n\n # Output queues will be used to get the rgb frames and nn data from the outputs defined above\n if camera:\n q_rgb = device.getOutputQueue(name=\"rgb\", maxSize=1, blocking=False)\n else:\n cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute()))\n \n\n detection_in = device.getInputQueue(\"in_nn\")\n q_nn = device.getOutputQueue(name=\"nn\", maxSize=1, blocking=False)\n\n\n def should_run():\n return cap.isOpened() if args.video else True\n\n\n def get_frame():\n if camera:\n in_rgb = q_rgb.get()\n new_frame = np.array(in_rgb.getData()).reshape((3, in_rgb.getHeight(), in_rgb.getWidth())).transpose(1, 2, 0).astype(np.uint8)\n new_frame = cv2.cvtColor(new_frame, cv2.COLOR_BGR2RGB)\n return True, np.ascontiguousarray(new_frame)\n else:\n return cap.read()\n\n\n \n result = None\n\n while should_run():\n read_correctly, frame = get_frame()\n\n if not read_correctly:\n break\n\n if not camera:\n nn_data = dai.NNData()\n nn_data.setLayer(\"input\", to_planar(frame, (224, 224)))\n detection_in.send(nn_data)\n\n in_nn = q_nn.tryGet()\n\n if in_nn is not None:\n data = softmax(in_nn.getFirstLayerFp16())\n result_conf = np.max(data)\n if result_conf > 0.2:\n result = {\n \"name\": labels[np.argmax(data)],\n \"conf\": round(100 * result_conf, 2)\n }\n else:\n result = None\n\n if debug:\n frame_main = frame.copy()\n if result is not None:\n cv2.putText(frame_main, \"{}\".format(result[\"name\"]), (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0))\n cv2.putText(frame_main, \"Confidence: {}%\".format(result[\"conf\"]), (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0))\n \n cv2.imshow(\"rgb\", cv2.resize(frame_main, (400, 400)))\n \n if cv2.waitKey(1) == ord('q'):\n break\n elif result is not None:\n print(\"{} ({}%)\".format(result[\"name\"], result[\"conf\"]))\n" ]
[ [ "numpy.ascontiguousarray", "numpy.max", "numpy.argmax" ] ]
lamorton/FIDASIM
[ "d4f68c17d4fcb08107768346d47bee7a4bf0586c" ]
[ "lib/python/fidasim/utils.py" ]
[ "#!/bin/sh\n\"exec\" \"$FIDASIM_DIR/deps/python\" \"$0\" \"$@\"\n# -*- coding: utf-8 -*-\n\n#+#FIDASIM Utilities\n#+This file contains useful FIDASIM utilities\n#+***\nfrom __future__ import print_function\nimport os\nfrom os.path import dirname\nimport subprocess\nimport platform\nimport numpy as np\nimport copy\nimport h5py\nimport efit\nfrom scipy.io import netcdf\nfrom scipy.interpolate import interp1d, interp2d, NearestNDInterpolator\nfrom scipy.spatial import Delaunay\nimport matplotlib.pyplot as plt\n\ndef get_fidasim_dir():\n \"\"\"\n #+#get_fidasim_dir\n #+ Gets FIDASIM install directory\n #+***\n #+##Output Arguments\n #+ **directory**: FIDASIM install directory.\n #+##Example Usage\n #+```python\n #+>>> fida_dir = get_fidasim_dir()\n #+```\n \"\"\"\n\n directory = dirname(dirname(dirname(dirname(os.path.abspath(__file__)))))\n\n return directory\n\ndef get_version(fidasim_dir):\n \"\"\"\n #+#get_version\n #+ Gets FIDASIM version number from git.\n #+ Falls back to reading VERSION file when git is not available\n #+***\n #+##Input Arguments\n #+ **fidasim_dir**: FIDASIM install directory\n #+\n #+##Output Arguments\n #+ **version**: FIDAIM version number.\n #+\n #+##Example Usage\n #+```python\n #+>>> version = get_version(get_fidasim_dir())\n #+```\n \"\"\"\n version = ''\n alt = False\n\n if platform.system() == 'Windows':\n alt = True\n else:\n # Location of .git folder\n git_dir = r'{}{}.git'.format(fidasim_dir, os.path.sep)\n\n # git is installed if git_file is a file\n proc = subprocess.Popen('command -v git', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n git_file = proc.communicate()[0].decode('utf-8')\n git_file = git_file.replace('\\n', '')\n\n # Check that .git folder is present and git is installed\n if os.path.isfile(git_file) and os.path.isdir(git_dir):\n try:\n version = subprocess.check_output(['git', '--git-dir={}'.format(git_dir), 'describe', '--tags', '--always', '--dirty'])\n version = version.replace('\\n', '')\n except:\n alt = True\n else:\n alt = True\n\n # If above didn't work, read version file\n if alt:\n # Git 'version' filepath\n ver_file = '{}{}VERSION'.format(fidasim_dir, os.path.sep)\n\n if os.path.isfile(ver_file):\n with open(ver_file) as f:\n version = f.read()\n\n return version\n\ndef aabb_intersect(rc, dr, r0, d0):\n \"\"\"\n #+#aabb_intersect\n #+Calculates intersection length of a ray and an axis aligned bounding box (AABB)\n #+***\n #+##Input Arguments\n #+ **rc**: Center of AABB\n #+\n #+ **dr**: [length, width, height] of AABB\n #+\n #+ **r0**: starting point of ray\n #+\n #+ **d0**: direction of ray\n #+\n #+##Output Arguments\n #+ **intersect**: Intersection length of ray and AABB\n #+\n #+ **ri**: Optional, ray enterence point\n #+\n #+ **rf**: Optional, ray exit point\n #+\n #+##Example Usage\n #+```python\n #+>>> intersect, r_enter, r_exit = aabb_intersect([0,0,0], [1,1,1], [-1,0,0], [1,0,0])\n #+>>> print(intersect)\n #+ 1.0\n #+>>> print(r_enter)\n #+ -0.5 0.0 0.0\n #+>>> print(r_exit)\n #+ 0.5 0.0 0.0\n #+```\n \"\"\"\n v0 = d0 / np.sqrt(np.sum(d0 ** 2.))\n\n # There are 6 sides to a cube/grid\n side_inter = np.zeros(6)\n\n # Intersection points of ray with planes defined by grid\n ipnts = np.zeros((3, 6))\n\n # Find whether ray intersects each side\n for i in range(6):\n j = int(np.floor(i / 2))\n ind = np.arange(3, dtype=int)\n ind = ind[ind != j]\n if np.abs(v0[j]) > 0.: # just v0[j] != 0 right?\n # Intersection point with plane\n ipnts[:, i] = r0 + v0 * (((rc[j] + (np.mod(i, 2) - 0.5) * dr[j]) - r0[j]) / v0[j])\n\n # Check if point on plane is within grid side\n if (np.abs(ipnts[ind[0], i] - rc[ind[0]]) <= 0.5 * dr[ind[0]]) and \\\n (np.abs(ipnts[ind[1], i] - rc[ind[1]]) <= 0.5 * dr[ind[1]]):\n side_inter[i] = 1\n\n intersect = 0.0\n r_enter = copy.deepcopy(r0)\n r_exit = copy.deepcopy(r0)\n ind = np.arange(side_inter.size)\n ind = ind[side_inter != 0]\n nw = side_inter[ind].size\n if nw >= 2:\n #Find two unique intersection points\n nunique = 0\n for i in range(nw - 1):\n if np.sum(ipnts[:, ind[0]] == ipnts[:, ind[i + 1]]) != 3:\n ind = [ind[0], ind[i + 1]]\n nunique = 2\n break\n\n if nunique == 2:\n vi = ipnts[:, ind[1]] - ipnts[:, ind[0]]\n vi = vi / np.sqrt(np.sum(vi ** 2.))\n dot_prod = np.sum(v0 * vi)\n if dot_prod > 0.0:\n r_enter = ipnts[:, ind[0]]\n r_exit = ipnts[:, ind[1]]\n else:\n r_enter = ipnts[:, ind[1]]\n r_exit = ipnts[:, ind[0]]\n\n # Calculate intersection length\n intersect = np.sqrt(np.sum((r_exit - r_enter) ** 2.))\n\n return intersect, r_enter, r_exit\n\ndef tb_zyx(alpha, beta, gamma):\n \"\"\"\n #+#tb_zyx\n #+Calculates Tait-Bryan z-y'-x\" active rotation matrix given rotation angles `alpha`,`beta`,`gamma` in radians\n #+***\n #+##Arguments\n #+ **alpha**: rotation angle about z [radians]\n #+\n #+ **beta**: rotation angle about y' [radians]\n #+\n #+ **gamma**: rotation angle about x\" [radians]\n #+\n #+##Return Value\n #+ Rotation Matrix [prefida](|url|/sourcefile/prefida.pro.html)\n #+\n #+##Example Usage\n #+```python\n #+ >>> rot_mat = tb_zyx(np.pi/2, 0.0, np.pi/3)\n #+```\n \"\"\"\n sa = np.sin(alpha)\n ca = np.cos(alpha)\n sb = np.sin(beta)\n cb = np.cos(beta)\n sg = np.sin(gamma)\n cg = np.cos(gamma)\n\n r = np.zeros((3, 3))\n\n r[0, 0] = ca * cb\n r[0, 1] = ca * sb * sg - cg * sa\n r[0, 2] = sa * sg + ca * cg * sb\n r[1, 0] = cb * sa\n r[1, 1] = ca * cg + sa * sb * sg\n r[1, 2] = cg * sa * sb - ca * sg\n r[2, 0] = -sb\n r[2, 1] = cb * sg\n r[2, 2] = cb * cg\n\n return r\n\ndef uvw_to_xyz(alpha, beta, gamma, uvw, origin=np.zeros(3)):\n \"\"\"\n #+#uvw_to_xyz\n #+ Express non-rotated coordinate `uvw` in rotated `xyz` coordinates\n #+***\n #+##Arguments\n #+ **alpha**: Rotation angle about z [radians]\n #+\n #+ **beta**: Rotation angle about y' [radians]\n #+\n #+ **gamma**: Rotation angle about x\" [radians]\n #+\n #+ **uvw**: Point in rotated coordinate system, (3, n)\n #+\n #+##Keyword Arguments\n #+ **origin**: Origin of rotated coordinate system in non-rotated (uvw) coordinates, (3)\n #+\n #+##Output Arguments\n #+ **xyz**: 'uvw' in 'xyz' coordinates\n #+\n #+##Example Usage\n #+```python\n #+>>> xyz = uvw_to_xyz(np.pi/2., 0.0, np.pi/3., uvw, origin=[.1, .2, 0.])\n #+```\n \"\"\"\n\n # Make np arrays\n uvw = np.array(uvw, dtype=float)\n origin = np.array(origin, dtype=float)\n\n # Do checks as this code does not allow multiple points to be entered (yet)\n if uvw.ndim == 2:\n s = uvw.shape\n if s[0] != 3:\n raise ValueError('uvw must be (3, n), but it has shape {}'.format(uvw.shape))\n n = s[1]\n elif uvw.ndim == 1:\n if uvw.size != 3:\n raise ValueError('uvw must have length 3, but it has length {}'.format(uvw.size))\n n = 1\n else:\n raise ValueError('uvw must be (3) or (3, n)')\n\n if origin.ndim != 1:\n raise ValueError('origin must be 1D, but it has shape {}'.format(origin.shape))\n\n if origin.size != 3:\n raise ValueError('origin must have length 3, but it has length {}'.format(origin.size))\n\n # Shift origin\n uvw_shifted = uvw - np.squeeze(np.tile(origin, (n, 1)).T)\n\n # Get rotation matrix\n r = tb_zyx(alpha, beta, gamma)\n\n # Apply rotation matrix\n xyz = np.dot(r.T, uvw_shifted)\n\n return xyz\n\ndef xyz_to_uvw(alpha, beta, gamma, xyz, origin = np.zeros(3)):\n \"\"\"\n #+##`xyz_to_uvw(alpha, beta, gamma, xyz, origin=[0,0,0])`\n #+Express rotated coordinate `xyz` in non-rotated `uvw` coordinates\n #+###Arguments\n #+ **alpha**: Rotation angle about z [radians]\n #+\n #+ **beta**: Rotation angle about y' [radians]\n #+\n #+ **gamma**: Rotation angle about x\" [radians]\n #+\n #+ **xyz**: Point in rotated coordinate system\n #+\n #+###Keyword Arguments\n #+ **origin**: Origin of rotated coordinate system in non-rotated (uvw) coordinates.\n #+\n #+###Example Usage\n #+```python\n #+>>> uvw = xyz_to_uvw(np.pi/2,0.0,np.pi/3,xyz)\n #+```\n \"\"\"\n xyz = np.array(xyz)\n\n # Do checks as this code does not allow multiple points to be entered (yet)\n if xyz.ndim == 2:\n s = xyz.shape\n if s[0] != 3:\n raise ValueError('xyz must be (3, n), but it has shape {}'.format(xyz.shape))\n n = s[1]\n elif xyz.ndim == 1:\n if xyz.size != 3:\n raise ValueError('xyz must have length 3, but it has length {}'.format(xyz.size))\n n = 1\n else:\n raise ValueError('xyz must be (3) or (3, n)')\n\n if origin.ndim != 1:\n raise ValueError('origin must be 1D, but it has shape {}'.format(origin.shape))\n\n if origin.size != 3:\n raise ValueError('origin must have length 3, but it has length {}'.format(origin.size))\n\n R = tb_zyx(alpha,beta,gamma)\n\n uvw = np.dot(R, xyz)\n\n return uvw + np.squeeze(np.tile(origin, (n, 1)).T)\n\ndef line_basis(r0, v0):\n \"\"\"\n #+#line_basis\n #+Calculates basis from a line with +x in the direction of line\n #+***\n #+##Arguments\n #+ **r0**: Starting point of line [cm]\n #+\n #+ **v0**: Direction of line\n #+\n #+##Example Usage\n #+```python\n #+>>> basis = line_basis([0,0,0],[0,-1,0])\n #+>>> x = np.dot(basis,np.array([1,1,0])) ;Transforms a point in line-space ([1,1,0]) to real space\n #+>>> x\n #+ [1, -1, 0]\n #+```\n \"\"\"\n r0 = np.array(r0)\n v0 = np.array(v0)\n rf = r0 + v0\n dis = np.sqrt(np.sum(v0**2))\n beta = np.arcsin((r0[2] - rf[2])/dis)\n alpha = np.arctan2((rf[1] - r0[1]),(rf[0]-r0[0]))\n\n R = tb_zyx(alpha,beta,0.0)\n return R\n\ndef rz_grid(rmin, rmax, nr, zmin, zmax, nz, phimin=0.0, phimax=0.0, nphi=1):\n \"\"\"\n #+#rz_grid\n #+Creates interpolation grid\n #+***\n #+##Arguments\n #+ **rmin**: Minimum radius [cm]\n #+\n #+ **rmax**: Maximum radius [cm]\n #+\n #+ **nr**: Number of radii\n #+\n #+ **zmin**: Minimum Z value [cm]\n #+\n #+ **zmax**: Maximum Z value [cm]\n #+\n #+ **nz**: Number of Z values\n #+\n #+ **phimin**: Minimum Phi value [rad]\n #+\n #+ **phimax**: Maximum Phi value [rad]\n #+\n #+ **nphi**: Number of Phi values \n #+\n #+##Return Value\n #+Interpolation grid dictionary\n #+\n #+##Example Usage\n #+```python\n #+>>> grid = rz_grid(0,200.0,200,-100,100,200,phimin=4*np.pi/3,phimax=5*np.pi/3,nphi=5)\n #+```\n \"\"\"\n dr = (rmax - rmin) / nr\n dz = (zmax - zmin) / nz\n dphi = (phimax - phimin) / nphi\n r = rmin + dr * np.arange(nr, dtype=np.float64)\n z = zmin + dz * np.arange(nz, dtype=np.float64)\n phi = phimin + dphi * np.arange(nphi, dtype=np.float64)\n\n r2d = np.tile(r, (nz, 1)).T\n z2d = np.tile(z, (nr, 1))\n\n grid = {'r2d': r2d,\n 'z2d': z2d,\n 'r': r,\n 'z': z,\n 'phi': phi,\n 'nr': nr,\n 'nz': nz,\n 'nphi': nphi}\n\n return grid\n\ndef colored(text, color): #, on_color=None, attrs=None):\n \"\"\"\n #+#colored\n #+ Return text string formatting for color in terminal\n #+***\n #+##Input Arguments\n #+ **text**: String to be colored\n #+\n #+ **color**: Desired color of string. Red, green, yellow, blue, magenta, cyan, or white.\n #+\n #+##Output Arguments\n #+ **text**: Text formated to have \"color\" in terminal.\n #+##Example Usage\n #+```python\n #+>>> text = colored(\"Text to be red\", 'red')\n #+>>> print(text)\n #+```\n \"\"\"\n # Copyright (c) 2008-2011 Volvox Development Team\n #\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n #\n # The above copyright notice and this permission notice shall be included in\n # all copies or substantial portions of the Software.\n #\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n # THE SOFTWARE.\n #\n # Author: Konstantin Lepa <[email protected]>\n COLORS = dict(list(zip(['grey',\n 'red',\n 'green',\n 'yellow',\n 'blue',\n 'magenta',\n 'cyan',\n 'white',],\n list(range(30, 38)))))\n\n RESET = '\\033[0m'\n\n if os.getenv('ANSI_COLORS_DISABLED') is None:\n fmt_str = '\\033[%dm%s'\n\n text = fmt_str % (COLORS[color], text)\n\n text += RESET\n\n return text\n\ndef info(string):\n \"\"\"\n #+#info\n #+Print a informational message\n #+***\n #+##Arguments\n #+ **str**: message\n #+\n #+##Example Usage\n #+```python\n #+>>> info(\"This is an informative message\")\n #+```\n \"\"\"\n print(colored('INFO: ' + string, 'cyan'))\n\ndef warn(string):\n \"\"\"\n #+#warn\n #+Print a warning message\n #+***\n #+##Arguments\n #+ **string**: message\n #+\n #+##Example Usage\n #+```python\n #+>>> warn(\"This may be a problem\")\n #+```\n \"\"\"\n print(colored('WARNING: ' + string, 'magenta'))\n\ndef error(string, halt=False):\n \"\"\"\n #+#error\n #+Print a error message\n #+***\n #+##Arguments\n #+ **string**: message\n #+\n #+##Keyword Arguments\n #+ **halt**: Halt program execution\n #+\n #+##Example Usage\n #+```python\n #+>>> error(\"Error message\")\n #+```\n \"\"\"\n print(colored('ERROR: {}'.format(string), 'red'))\n\n if halt:\n raise Exception()\n\ndef success(string):\n \"\"\"\n #+#success\n #+Print a success message\n #+***\n #+##Arguments\n #+ **string**: message\n #+\n #+##Example Usage\n #+```python\n #+>>> success(\"Yay!!!\")\n #+```\n \"\"\"\n print(colored('SUCCESS: ' + string, 'green'))\n\ndef beam_grid(nbi, rstart,\n nx=None, ny=None, nz=None, dv=8.0,\n length=100.0, width=80.0, height=80.0):\n \"\"\"\n #+#beam_grid\n #+ Calculates settings for a grid that aligns with the neutral beam.\n #+***\n #+##Arguments\n #+ **nbi**: [Neutral beam geometry structure](|url|/page/03_technical/01_prefida_inputs.html#neutral-beam-geometry-structure)\n #+\n #+ **rstart**: Radial start position of beam grid [cm]\n #+\n #+##Keyword Arguments\n #+ **dV**: Cell volume [\\(cm^3\\)]: Defaults to 8.0\n #+\n #+ **nx**: Number of cells in length: Default determined by `dV`\n #+\n #+ **ny**: Number of cells in width: Default determined by `dV`\n #+\n #+ **nz**: Number of cells in height: Default determined by `dV`\n #+\n #+ **length**: Length of grid along beam sightline. [cm]: Defaults to 100 cm\n #+\n #+ **width**: Width of grid [cm]: Defaults to 100 cm\n #+\n #+ **height**: Height of grid [cm]: Defaults 80 cm\n #+\n #+##Return Value\n #+ Structure containing beam grid settings suitable for the Namelist File\n #+\n #+##Example Usage\n #+```python\n #+>>> grid = beam_grid(nbi,200.0,nx=100,ny=50,nz=50,length=100,width=50,height=50)\n #+```\n \"\"\"\n\n if width < nbi['widy']:\n warn(\"Grid width is smaller then the source width\")\n print(\"width: {}\".format(width))\n print(\"source width: {}\".format(nbi['widy']))\n\n if height < nbi['widz']:\n warn(\"Grid height is smaller then the source height\")\n print(\"height: {}\".format(height))\n print(\"source height: {}\".format(nbi['widz']))\n\n dv3 = dv ** (1. / 3.)\n\n if nx is None:\n nx = round(length / dv3)\n\n if ny is None:\n ny = round(width / dv3)\n\n if nz is None:\n nz = round(height / dv3)\n\n xmin = 0.\n xmax = length\n ymin = -width / 2.\n ymax = width / 2.\n zmin = -height / 2.\n zmax = height / 2.\n\n src = nbi['src']\n axis = nbi['axis'] / np.sqrt(np.sum(nbi['axis'] ** 2))\n pos = src + 100. * axis\n\n if np.sqrt(src[0] ** 2 + src[1] ** 2) < rstart:\n error(\"Source radius cannot be less then rstart\", halt=True)\n\n dis = np.sqrt(np.sum((src - pos) ** 2.0))\n beta = np.arcsin((src[2] - pos[2]) / dis)\n alpha = np.arctan2((pos[1] - src[1]), (pos[0] - src[0]))\n gamma = 0.\n a = axis[0] ** 2 + axis[1] ** 2\n b = 2. * (src[0] * axis[0] + src[1] * axis[1])\n c = src[0] ** 2 + src[1] ** 2 - rstart ** 2\n t = (-b - np.sqrt(b ** 2 - 4. * a * c)) / (2. * a)\n origin = src + t * axis\n\n beam_grid = {'nx': nx,\n 'ny': ny,\n 'nz': nz,\n 'xmin': xmin,\n 'xmax': xmax,\n 'ymin': ymin,\n 'ymax': ymax,\n 'zmin': zmin,\n 'zmax': zmax,\n 'alpha': alpha,\n 'beta': beta,\n 'gamma': gamma,\n 'origin': origin}\n\n return beam_grid\n\ndef write_data(h5_obj, dic, desc=dict(), units=dict(), name=''):\n \"\"\"\n #+#write_data\n #+ Write h5 datasets with attributes 'description' and 'units'\n #+***\n #+##Arguments\n #+ **h5_obj**: An h5 file or group object from h5py\n #+\n #+ **dic**: Dict of data to save as h5 datasets\n #+\n #+##Keyword Arguments\n #+ **name**: Name/description of dic for clarity in raising errors\n #+\n #+ **desc**: Dict with same keys as dic describing each item in dic\n #+\n #+ **units**: Dict with same keys as dic providing units of data in dic, doesn't have to be all keys of dic.\n #+\n #+##Example Usage\n #+```python\n #+>>> write_data(h5_obj, dic, desc, units)\n #+```\n \"\"\"\n for key in dic:\n if isinstance(dic[key], dict):\n h5_grp = h5_obj.create_group(key)\n write_data(h5_grp, dic[key])\n continue\n\n # Transpose data to match expected by Fortran and historically provided by IDL\n if isinstance(dic[key], np.ndarray):\n if dic[key].ndim >= 2:\n dic[key] = dic[key].T\n\n # Make strings of fixed length as required by Fortran.\n # See http://docs.h5py.org/en/latest/strings.html#fixed-length-ascii\n if isinstance(dic[key], str):\n dic[key] = np.string_(dic[key])\n\n # Create dataset\n ds = h5_obj.create_dataset(key, data = dic[key])\n\n # Add descrption attribute\n if key in desc:\n ds.attrs['description'] = desc[key]\n\n # Add units attribute (if present)\n if key in units:\n ds.attrs['units'] = units[key]\n\ndef read_geqdsk(filename, grid, poloidal=False):\n \"\"\"\n #+#read_geqdsk\n #+Reads an EFIT GEQDSK file\n #+***\n #+##Arguments\n #+ **filename**: GEQDSK file\n #+\n #+ **grid**: Interpolation grid\n #+\n #+##Keyword Arguments\n #+ **poloidal**: Return rho_p (sqrt(normalized poloidal flux)) instead of rho (sqrt(normalized toroidal flux))\n #+\n #+##Return Value\n #+Electronmagnetic fields structure, rho, btipsign\n #+\n #+##Example Usage\n #+```python\n #+>>> fields, rho, btipsign = read_geqdsk(\"./g133223.00200\",grid)\n #+```\n \"\"\"\n dims = grid['r2d'].shape\n r_pts = grid['r2d'].flatten()/100\n z_pts = grid['z2d'].flatten()/100\n g = efit.readg(filename)\n btipsign = np.sign(g[\"current\"]*g[\"bcentr\"])\n\n fpol = g[\"fpol\"]\n psiaxis = g[\"ssimag\"]\n psiwall = g[\"ssibry\"]\n r = g[\"r\"]\n z = g[\"z\"]\n\n psi_arr = np.linspace(psiaxis, psiwall, len(fpol))\n fpol_itp = interp1d(psi_arr, fpol, 'cubic', fill_value=fpol[-1],bounds_error=False)\n psirz_itp = interp2d(r, z, g[\"psirz\"], 'cubic')\n\n if poloidal:\n rhogrid = np.array([psirz_itp(rr,zz) for (rr,zz) in zip(r_pts,z_pts)]).reshape(dims)\n rhogrid = np.sqrt((rhogrid - g[\"ssimag\"])/(g[\"ssibry\"] - g[\"ssimag\"]))\n else:\n rhogrid=efit.rho_rz(g,r_pts,z_pts,norm=True).reshape(dims)\n\n br = np.array([psirz_itp(rr,zz,dy=1)/rr for (rr,zz) in zip(r_pts,z_pts)]).reshape(dims)\n bz = np.array([-psirz_itp(rr,zz,dx=1)/rr for (rr,zz) in zip(r_pts,z_pts)]).reshape(dims)\n bt = np.array([fpol_itp(psirz_itp(rr,zz))/rr for (rr,zz) in zip(r_pts,z_pts)]).reshape(dims)\n\n er = br*0\n ez = bz*0\n et = bt*0\n\n mask = np.ones(dims,dtype=np.int32)\n\n equil = {\"time\":0.0,\"data_source\":os.path.abspath(filename), \"mask\":mask,\n \"br\":br,\"bt\":bt,\"bz\":bz,\"er\":er,\"et\":et,\"ez\":ez}\n\n return equil, rhogrid, btipsign\n\ndef read_ncdf(filename, vars=None):\n '''\n #+#read_ncdf\n #+Reads a flat NetCDF file\n #+***\n #+##Arguments\n #+ **filename**: NetCDF file\n #+\n #+##Keyword Arguments\n #+ **vars**: List of variables to read\n #+\n #+##Return Value\n #+Structure containing NetCDF variables\n #+\n #+##Example Usage\n #+```python\n #+>>> a = read_ncdf(\"./123324H01_fi_1.cdf\")\n #+```\n '''\n\n d = dict()\n d['err'] = 1\n if os.path.isfile(filename):\n d['err'] = 0\n f = netcdf.netcdf_file(filename, 'r', mmap=False)\n variables = f.variables\n if vars != None:\n for k in vars:\n # need to check case sensitibity\n if k in variables.keys():\n v = variables[k]\n if tuple() == v.shape:\n d[k] = v.getValue()\n else:\n d[k] = v[:]\n else:\n for k,v in variables.items():\n if tuple() == v.shape:\n d[k] = v.getValue()\n else:\n d[k] = v[:]\n f.close()\n else:\n error('FILE DOES NOT EXIST: '+filename)\n\n return d\n\ndef extract_transp_plasma(filename, intime, grid, rhogrid,\n dn0out=None, scrapeoff=None,rho_scrapeoff=0.1):\n '''\n #+#extract_transp_plasma\n #+Extracts `plasma` structure from a TRANSP run\n #+***\n #+##Arguments\n #+ **filename**: TRANSP output file e.g. [TRANSP_RUNID].CDF\n #+\n #+ **intime**: Time of interest [s]\n #+\n #+ **grid**: Interpolation grid\n #+\n #+ **rhogrid**: sqrt(normalized torodial flux) mapped onto the interpolation grid\n #+\n #+##Keyword Arguments\n #+ **dn0out**: Wall Neutral density value `dn0out` variable in transp namelist\n #+\n #+ **scrapeoff**: scrapeoff decay length\n #+\n #+ **rho_scrapeoff**: scrapeoff length, default = 0.1\n #+\n #+##Example Usage\n #+```python\n #+>>> plasma = extract_transp_plasma(\"./142332H01.CDF\", 1.2, grid, rho)\n #+```\n '''\n\n var_list = [\"X\",\"TRFLX\",\"TFLUX\",\"TIME\",\"NE\",\"NH\",\"ND\",\"NT\",\"NIMP\",\"TE\",\"TI\",\"ZEFFI\",\"OMEGA\",\"DN0WD\",\"XZIMP\"]\n\n zz = read_ncdf(filename, vars=var_list)\n\n t = zz['TIME']\n idx = np.argmin(abs(t-intime))\n time = t[idx].astype('float64')\n\n print(' * Selecting profiles at :', time, ' s') #pick the closest timeslice to TOI\n\n impurity_charge = np.max(zz[\"XZIMP\"]).astype(\"int16\")\n transp_ne = zz['NE'][idx,:] #cm^-3\n transp_nimp = zz['NIMP'][idx,:] #cm^-3\n transp_nn = zz['DN0WD'][idx,:] #cm^-3\n\n if 'NH' in zz:\n transp_nh = zz['NH'][idx,:] #cm^-3\n else:\n transp_nh = 0*transp_ne\n\n if 'ND' in zz:\n transp_nd = zz['ND'][idx,:] #cm^-3\n else:\n transp_nd = 0*transp_ne\n\n if 'NT' in zz:\n transp_nt = zz['NT'][idx,:] #cm^-3\n else:\n transp_nt = 0*transp_ne\n\n transp_te = zz['TE'][idx,:]*1.e-3 # kev\n transp_ti = zz['TI'][idx,:]*1.e-3 # kev\n transp_zeff = zz['ZEFFI'][idx,:]\n rho_cb = np.sqrt(zz['TRFLX'][idx,:]/zz['TFLUX'][idx])\n # center each rho b/c toroidal flux is at cell boundary\n rho = 0.e0*rho_cb\n rho[0] = 0.5*rho_cb[0]\n for i in range(len(rho_cb)-1):\n rho[i+1] = rho_cb[i+1] - 0.5*(rho_cb[i+1] - rho_cb[i])\n\n if 'OMEGA' not in zz.keys():\n error('OMEGA not found in TRANSP file. Assuming no plasma rotation')\n transp_omega=0.0*transp_te\n else:\n transp_omega = zz['OMEGA'][idx,:] # rad/s\n\n if dn0out == None:\n dn0out = transp_nn[-1]\n if scrapeoff == None:\n scrapeoff = 0.0\n\n if scrapeoff > 0.0:\n drho = abs(rho[-1] - rho[-2])\n rho_sc = rho[-1] + drho*(range(np.ceil(rho_scrapeoff/drho)) + 1)\n sc = np.exp(-(rho_sc - rho[-1])/scrapeoff)\n transp_ne = np.append(transp_ne,transp_ne[-1]*sc)\n transp_nimp = np.append(transp_nimp,transp_nimp[-1]*sc)\n transp_nh = np.append(transp_nh,transp_nh[-1]*sc)\n transp_nd = np.append(transp_nd,transp_nd[-1]*sc)\n transp_nt = np.append(transp_nt,transp_nt[-1]*sc)\n transp_te = np.append(transp_te,transp_te[-1]*sc)\n transp_ti = np.append(transp_ti,transp_ti[-1]*sc)\n transp_nn = np.append(transp_nn,0*sc + dn0out)\n transp_zeff = np.append(transp_zeff, (transp_zeff[-1]-1)*sc + 1)\n transp_omega = np.append(transp_omega,transp_omega[-1]*sc)\n rho = np.append(rho, rho_sc)\n\n profiles = {\"rho\":rho,\n \"dene\":np.where(transp_ne > 0, transp_ne, 0.0),\n \"denimp\":np.where(transp_nimp > 0, transp_nimp, 0.0),\n\t\t\"denn\":np.where(transp_nn > 0, transp_nn, 0.0),\n \"te\":np.where(transp_te > 0, transp_te, 0.0),\n \"ti\":np.where(transp_ti > 0, transp_ti, 0.0),\n \"zeff\":np.where(transp_zeff > 1.0, transp_zeff, 1.0),\n \"omega\":transp_omega}\n if 'NH' in zz:\n profiles['denh'] = np.where(transp_nh > 0, transp_nh, 0.0)\n if 'ND' in zz:\n profiles['dend'] = np.where(transp_nd > 0, transp_nd, 0.0)\n if 'NT' in zz:\n profiles['dent'] = np.where(transp_nt > 0, transp_nt, 0.0)\n\n\n # Interpolate onto r-z grid\n dims = rhogrid.shape\n f_dene = interp1d(rho,transp_ne,fill_value='extrapolate')\n dene = f_dene(rhogrid)\n dene = np.where(dene > 0.0, dene, 0.0).astype('float64')\n\n f_denimp = interp1d(rho,transp_nimp,fill_value='extrapolate')\n denimp = f_denimp(rhogrid)\n denimp = np.where(denimp > 0.0, denimp, 0.0).astype('float64')\n\n f_denh = interp1d(rho,transp_nh,fill_value='extrapolate')\n denh = f_denh(rhogrid)\n denh = np.where(denh > 0.0, denh, 0.0).astype('float64')\n\n f_dend = interp1d(rho,transp_nd,fill_value='extrapolate')\n dend = f_dend(rhogrid)\n dend = np.where(dend > 0.0, dend, 0.0).astype('float64')\n\n f_dent = interp1d(rho,transp_nt,fill_value='extrapolate')\n dent = f_dent(rhogrid)\n dent = np.where(dent > 0.0, dent, 0.0).astype('float64')\n\n f_denn = interp1d(rho,np.log(transp_nn),fill_value=np.nan,bounds_error=False)\n log_denn = f_denn(rhogrid)\n denn = np.where(~np.isnan(log_denn), np.exp(log_denn), 0.0).astype('float64')\n\n f_te = interp1d(rho,transp_te,fill_value='extrapolate')\n te = f_te(rhogrid)\n te = np.where(te > 0, te, 0.0).astype('float64')\n\n f_ti = interp1d(rho,transp_ti,fill_value='extrapolate')\n ti = f_ti(rhogrid)\n ti = np.where(ti > 0, ti, 0.0).astype('float64')\n\n f_zeff = interp1d(rho,transp_zeff, fill_value=1.0, bounds_error=False)\n zeff = f_zeff(rhogrid)\n zeff = np.where(zeff > 1, zeff, 1.0).astype('float64')\n\n f_omega = interp1d(rho,transp_omega,fill_value='extrapolate')\n vt = grid['r2d']*f_omega(rhogrid).astype('float64')\n vr = np.zeros(dims,dtype='float64')\n vz = np.zeros(dims,dtype='float64')\n\n max_rho = max(abs(rho))\n\n mask = np.zeros(dims,dtype='int')\n w = np.where(rhogrid <= max_rho) #where we have profiles\n mask[w] = 1\n\n deni = np.concatenate((denh.reshape(1,dims[0],dims[1]),\n dend.reshape(1,dims[0],dims[1]),\n dent.reshape(1,dims[0],dims[1])),axis=0)\n\n ai = np.array([1.007276466879e0, 2.013553212745e0,3.01550071632e0])\n w_ai = [a in zz for a in ['NH','ND','NT']]\n\n # SAVE IN PROFILES STRUCTURE\n plasma={\"data_source\":os.path.abspath(filename),\"time\":time,\"impurity_charge\":int(impurity_charge),\n \"nthermal\":int(np.sum(w_ai)), \"species_mass\":ai[w_ai], \"deni\":deni[w_ai,:,:],\"profiles\":profiles,\n \"mask\":mask,\"dene\":dene,\"denimp\":denimp,\"denn\":denn,\"te\":te,\"ti\":ti,\n \"vr\":vr,\"vt\":vt,\"vz\":vz,\"zeff\":zeff}\n\n return plasma\n\ndef read_nubeam(filename, grid, e_range=(), p_range=(), btipsign=-1, species=1):\n \"\"\"\n #+#read_nubeam\n #+Reads NUBEAM fast-ion distribution function\n #+***\n #+##Arguments\n #+ **filename**: NUBEAM guiding center fast-ion distribution function file e.g. 159245H01_fi_1.cdf\n #+\n #+ **grid**: Interpolation grid\n #+\n #+##Keyword Arguments\n #+ **btipsign**: Sign of the dot product of the magnetic field and plasma current\n #+\n #+ **e_range**: Energy range to consider\n #+\n #+ **p_range**: Pitch range to consider\n #+\n #+ **species**: Fast-ion species number. Defaults to 1\n #+\n #+##Return Value\n #+Distribution structure\n #+\n #+##Example Usage\n #+```python\n #+>>> dist = read_nubeam(\"./159245H02_fi_1.cdf\",grid,btipsign=-1)\n #+```\n \"\"\"\n\n species_var = \"SPECIES_{}\".format(species)\n sstr = read_ncdf(filename,vars=[species_var])[species_var].tostring().decode('UTF-8')\n print(\"Species: \"+sstr)\n var = read_ncdf(filename, vars=[\"TIME\",\"R2D\",\"Z2D\",\"E_\"+sstr,\"A_\"+sstr,\"F_\"+sstr,\"RSURF\",\"ZSURF\",\"BMVOL\"])\n\n ngrid = len(var[\"R2D\"])\n\n try:\n time = var[\"TIME\"][0]\n except:\n time = var[\"TIME\"]\n\n r2d = var[\"R2D\"]\n z2d = var[\"Z2D\"]\n rsurf = var[\"RSURF\"].T\n zsurf = var[\"ZSURF\"].T\n bmvol = var[\"BMVOL\"]\n pitch = var[\"A_\"+sstr]\n energy = var[\"E_\"+sstr]*1e-3\n fbm = var[\"F_\"+sstr].T*1e3\n fbm = np.where(fbm > 0.0, 0.5*fbm, 0.0) #0.5 to convert to pitch instead of solid angle d_omega/4pi\n\n if btipsign < 0:\n fbm = fbm[:,::-1,:] #reverse pitch elements\n\n if not e_range:\n e_range = (np.min(energy), np.max(energy))\n\n if not p_range:\n p_range = (np.min(pitch), np.max(pitch))\n\n # Trim distribution according to e/p_range\n we = np.logical_and(energy >= e_range[0], energy <= e_range[1])\n wp = np.logical_and(pitch >= p_range[0], pitch <= p_range[1])\n energy = energy[we]\n nenergy = len(energy)\n pitch = pitch[wp]\n npitch = len(pitch)\n fbm = fbm[we,:,:]\n fbm = fbm[:,wp,:]\n dE = np.abs(energy[1] - energy[0])\n dp = np.abs(pitch[1] - pitch[0])\n emin, emax = np.maximum(np.min(energy) - 0.5*dE, 0.0), np.max(energy) + 0.5*dE\n pmin, pmax = np.maximum(np.min(pitch) - 0.5*dp, -1.0), np.minimum(np.max(pitch)+0.5*dp, 1.0)\n\n print('Energy min/max: ', emin, emax)\n print('Pitch min/max: ',pmin, pmax)\n\n nr = grid[\"nr\"]\n nz = grid[\"nz\"]\n r = grid[\"r\"]\n z = grid[\"z\"]\n rgrid = grid[\"r2d\"]\n zgrid = grid[\"z2d\"]\n dr = np.abs(r[1] - r[0])\n dz = np.abs(z[1] - z[0])\n\n fdens = np.sum(fbm,axis=(0,1))*dE*dp\n ntot = np.sum(fdens*bmvol)\n print('Ntotal in phase space: ',ntot)\n\n tri = Delaunay(np.vstack((r2d,z2d)).T) # Triangulation for barycentric interpolation\n pts = np.array([xx for xx in zip(r2d,z2d)])\n itp = NearestNDInterpolator(pts,np.arange(ngrid)) #to find indices outside simplices\n\n points = np.array([xx for xx in zip(rgrid.flatten(),zgrid.flatten())])\n t = tri.find_simplex(points)\n\n denf = np.zeros((nr,nz))\n fbm_grid = np.zeros((nenergy,npitch,nr,nz))\n for (ind,tt) in enumerate(t):\n i,j = np.unravel_index(ind,(nr,nz))\n if tt == -1:\n ii = int(itp(r[i],z[j]))\n denf[i,j] = fdens[ii]\n fbm_grid[:,:,i,j] = fbm[:,:,ii]\n else:\n b = tri.transform[tt,:2].dot(np.transpose(points[ind] - tri.transform[tt,2]))\n s = tri.simplices[tt,:]\n #perform barycentric linear interpolation\n denf[i,j] = b[0]*fdens[s[0]] + b[1]*fdens[s[1]] + (1 - np.sum(b))*fdens[s[2]]\n fbm_grid[:,:,i,j] = b[0]*fbm[:,:,s[0]] + b[1]*fbm[:,:,s[1]] + (1-np.sum(b))*fbm[:,:,s[2]]\n\n denf[denf < 0] = 0\n\n # Correct for points outside of seperatrix\n rmaxis = np.mean(rsurf[:,0])\n zmaxis = np.mean(zsurf[:,0])\n r_sep = rsurf[:,-1]\n z_sep = zsurf[:,-1]\n\n #plt.triplot(r2d,z2d,tri.simplices.copy())\n #plt.plot(r2d,z2d,'o')\n #plt.plot(r_sep,z_sep)\n #plt.show()\n x_bdry = r_sep - rmaxis\n y_bdry = z_sep - zmaxis\n r_bdry = np.sqrt(x_bdry**2 + y_bdry**2)\n theta_bdry = np.arctan2(y_bdry,x_bdry)\n theta_bdry = np.where(theta_bdry < 0.0, theta_bdry + 2*np.pi, theta_bdry) #[0,2pi]\n w = np.argsort(theta_bdry)\n theta_bdry = theta_bdry[w]\n r_bdry = r_bdry[w]\n theta_bdry, w = np.unique(theta_bdry,return_index=True)\n r_bdry = r_bdry[w]\n itp = interp1d(theta_bdry,r_bdry,'cubic',fill_value='extrapolate')\n\n x_pts = grid[\"r2d\"] - rmaxis\n y_pts = grid[\"z2d\"] - zmaxis\n r_pts = np.sqrt(x_pts**2 + y_pts**2)\n theta_pts = np.arctan2(y_pts,x_pts)\n theta_pts = np.where(theta_pts < 0.0, theta_pts + 2*np.pi, theta_pts) #[0,2pi]\n r_bdry_itp = itp(theta_pts)\n\n w = r_pts >= r_bdry_itp + 2\n denf[w] = 0.0\n fbm_grid[:,:,w] = 0.0\n\n # enforce correct normalization\n ntot_denf = 2*np.pi*dr*dz*np.sum(r*np.sum(denf,axis=1))\n denf = denf*(ntot/ntot_denf)\n ntot_fbm = (2*np.pi*dE*dp*dr*dz)*np.sum(r*np.sum(fbm_grid,axis=(0,1,3)))\n fbm_grid = fbm_grid*(ntot/ntot_denf)\n\n\n fbm_dict={\"type\":1,\"time\":time,\"nenergy\":nenergy,\"energy\":energy,\"npitch\":npitch,\n \"pitch\":pitch,\"f\":fbm_grid,\"denf\":denf,\"data_source\":os.path.abspath(filename)}\n\n return fbm_dict\n\ndef nubeam_geometry(nubeam, angle=0.0, verbose=False):\n \"\"\"\n #+#nubeam_geometry\n #+Calculates the FIDASIM beam geometry from the beam geometry variables in the TRANSP/NUBEAM namelist\n #+***\n #+##Arguments\n #+ **NUBEAM**: Dictionary containing the following\n #+\n #+ **NUBEAM[\"NAME\"]**: Ion source name\n #+\n #+ **NUBEAM[\"NBSHAP\"]**: Ion source shape 1=rectangular, 2=circular\n #+\n #+ **NUBEAM[\"FOCLZ\"]**: Vertical focal length [cm]\n #+\n #+ **NUBEAM[\"FOCLR\"]**: Horizontal focal length [cm]\n #+\n #+ **NUBEAM[\"DIVZ\"]**: Vertical divergence [rad]\n #+\n #+ **NUBEAM[\"DIVR\"]**: Horizontal divergence [rad]\n #+\n #+ **NUBEAM[\"BMWIDZ\"]**: Ion source half height [cm]\n #+\n #+ **NUBEAM[\"BMWIDR\"]**: Ion source half width [cm]\n #+\n #+ **NUBEAM[\"RTCENA\"]**: Radius of tangency point [cm]\n #+\n #+ **NUBEAM[\"XLBTNA\"]**: Distance from center of beam source grid to tangency point [cm]\n #+\n #+ **NUBEAM[\"XBZETA\"]**: Torodial angle [deg] Positive angles defined to be in the counter-clockwise direction\n #+\n #+ **NUBEAM[\"XYBSCA\"]**: Elevation above/below vacuum vessel midplane of center of beam source grid [cm]\n #+\n #+ **NUBEAM[\"NLJCCW\"]**: Orientation of Ip. 1 for True/Counter-clockwise current, 0 or -1 for False/Clock-wise current\n #+\n #+ **NUBEAM[\"NLCO\"]**: 1 for Co-beam, 0 or -1 for Counter-beam\n #+\n #+ **NUBEAM[\"NBAPSHA\"]**: Vector of aperture shapes 1=rectangular, 2=circular\n #+\n #+ **NUBEAM[\"XLBAPA\"]**: Vector of distances from center of beam source grid to the aperture plane [cm]\n #+\n #+ **NUBEAM[\"XYBAPA\"]**: Vector of elevation above/below vacuum vessel midplane of beam centerline at aperture [cm]\n #+\n #+ **NUBEAM[\"RAPEDGA\"]**: Vector of aperture half-widths [cm]\n #+\n #+ **NUBEAM[\"XZPEDGA\"]**: Vector of aperture half-heights [cm]\n #+\n #+ **NUBEAM[\"XRAPOFFA\"]**: Vector of horizontal (y) offsets relative to the +x aligned beam centerline [cm]\n #+\n #+ **NUBEAM[\"XZAPOFFA\"]**: Vector of vertical (z) offsets relative to the +x aligned beam centerline [cm]\n #+\n #+##Keyword Arguments\n #+ **angle**: Angle to add to XBZETA to rotate the beams into correct coordinates [deg]\n #+\n #+ **verbose**: Print out positions\n #+\n #+##Return Value\n #+ Neutral beam structure\n #+\n #+##Example Usage\n #+```python\n #+>>> nbi = nubeam_geometry(nubeam)\n #+```\n \"\"\"\n\n if nubeam[\"NLCO\"] == 0:\n nubeam[\"NLCO\"] = -1\n\n if \"NLJCCW\" in nubeam:\n if nubeam[\"NLJCCW\"] == 0:\n nubeam[\"NLJCCW\"] = -1\n else:\n warn(\"Current orientation not specified. Assuming Counter-clockwise.\")\n nubeam[\"NLJCCW\"] = 1\n\n phi_s = (nubeam[\"XBZETA\"] + angle)*np.pi/180.0\n zs = nubeam[\"XYBSCA\"]\n za = nubeam[\"XYBAPA\"][0]\n alpha = np.arcsin((zs-za)/nubeam[\"XLBAPA\"][0])\n pdst = nubeam[\"XLBTNA\"]*np.cos(alpha)\n rs = np.sqrt(nubeam[\"RTCENA\"]**2 + pdst**2)\n dat = nubeam[\"XLBTNA\"] - nubeam[\"XLBAPA\"][0]\n pdat = dat*np.cos(alpha)\n ra = np.sqrt(nubeam[\"RTCENA\"]**2 + pdat**2.0)\n beta_s = np.arccos(nubeam[\"RTCENA\"]/rs)\n beta_a = np.arccos(nubeam[\"RTCENA\"]/ra)\n phi_a = phi_s + nubeam[\"NLJCCW\"]*nubeam[\"NLCO\"]*(beta_s-beta_a)\n\n src = np.array([rs*np.cos(phi_s), rs*np.sin(phi_s),zs])\n aper_src = np.array([ra*np.cos(phi_a), ra*np.sin(phi_a),za])\n axis = (aper_src - src)\n axis = axis/np.sqrt(np.sum(axis**2))\n pos = src + axis*nubeam[\"XLBTNA\"]\n\n if verbose:\n print('Source position: ',src)\n print('1st Aperture position: ',aper_src)\n print('Tangency position: ', pos)\n\n nbi = {\"data_source\":\"TRANSP/NUBEAM namelist\",\"name\":nubeam[\"NAME\"],\n \"shape\":nubeam[\"NBSHAP\"],\"src\":src,\"axis\":axis,\n \"focy\":nubeam[\"FOCLR\"],\"focz\":nubeam[\"FOCLZ\"],\n \"divy\":np.repeat(nubeam[\"DIVR\"],3),\n \"divz\":np.repeat(nubeam[\"DIVZ\"],3),\n \"widy\":nubeam[\"BMWIDR\"], \"widz\":nubeam[\"BMWIDZ\"],\n \"naperture\":len(nubeam[\"NBAPSHA\"]),\"ashape\":nubeam[\"NBAPSHA\"],\n \"awidy\":nubeam[\"RAPEDGA\"],\"awidz\":nubeam[\"XZPEDGA\"],\n \"aoffy\":nubeam[\"XRAPOFFA\"],\"aoffz\":nubeam[\"XZAPOFFA\"],\n \"adist\":nubeam[\"XLBAPA\"] }\n\n return nbi\n" ]
[ [ "numpy.ones", "numpy.sum", "scipy.interpolate.interp1d", "numpy.argsort", "scipy.interpolate.interp2d", "numpy.log", "scipy.io.netcdf.netcdf_file", "numpy.unravel_index", "numpy.vstack", "numpy.transpose", "numpy.append", "numpy.logical_and", "numpy.abs", "numpy.cos", "numpy.arccos", "numpy.isnan", "numpy.where", "numpy.unique", "numpy.mean", "numpy.tile", "numpy.ceil", "numpy.zeros", "numpy.string_", "numpy.repeat", "numpy.mod", "numpy.arange", "numpy.max", "numpy.min", "numpy.array", "numpy.arctan2", "numpy.arcsin", "numpy.sign", "numpy.floor", "numpy.exp", "numpy.sqrt", "numpy.sin", "numpy.dot" ] ]
laceyg/ternarynet
[ "b17744c2aba3aba7e7e72decb3b8a02792d33b54" ]
[ "tools/tensorpack/tensorpack/tfutils/gradproc.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# File: gradproc.py\n# Author: Yuxin Wu <[email protected]>\n\nimport tensorflow as tf\nfrom abc import ABCMeta, abstractmethod\nimport re\nimport inspect\nfrom ..utils import logger\nfrom .symbolic_functions import rms\nfrom .summary import add_moving_summary\n\n__all__ = ['GradientProcessor', 'SummaryGradient', 'CheckGradient',\n 'ScaleGradient', 'MapGradient', 'apply_grad_processors',\n 'GlobalNormClip']\n\ndef apply_grad_processors(grads, gradprocs):\n \"\"\"\n :param grads: list of (grad, var).\n :param gradprocs: list of `GradientProcessor` instances.\n :returns: list of (grad, var) went through the processors\n \"\"\"\n g = []\n for grad, var in grads:\n if grad is None:\n logger.warn(\"No Gradient w.r.t {}\".format(var.op.name))\n else:\n g.append((grad, var))\n for proc in gradprocs:\n g = proc.process(g)\n return g\n\nclass GradientProcessor(object):\n __metaclass__ = ABCMeta\n\n def process(self, grads):\n \"\"\"\n Process the symbolic gradients.\n\n :param grads: list of (grad, var)\n :returns: symbolic gradients with the same type as input\n \"\"\"\n with tf.name_scope(type(self).__name__):\n return self._process(grads)\n\n @abstractmethod\n def _process(self, grads):\n pass\n\n\nclass GlobalNormClip(GradientProcessor):\n def __init__(self, global_norm):\n \"\"\" Clip by global norm\n Note that the global norm is the sum of norm for **all** gradients\n \"\"\"\n self._norm = global_norm\n\n def _process(self, grads):\n g = [k[0] for k in grads]\n v = [k[1] for k in grads]\n g, _ = tf.clip_by_global_norm(g, self._norm, name='clip_by_global_norm')\n return list(zip(g, v))\n\nclass MapGradient(GradientProcessor):\n \"\"\"\n Apply a function on all gradient if the name matches regex.\n Keep the other gradients unchanged.\n \"\"\"\n def __init__(self, func, regex='.*'):\n \"\"\"\n :param func: takes a grad or (grad, var) pair and returns a grad. If return None, the\n gradient is discarded.\n :param regex: used to match variables. default to match all variables.\n \"\"\"\n args = inspect.getargspec(func).args\n arg_num = len(args) - inspect.ismethod(func)\n assert arg_num in [1, 2], \\\n \"The function must take 1 or 2 arguments! ({})\".format(args)\n if arg_num == 1:\n self.func = lambda grad, var: func(grad)\n else:\n self.func = func\n\n if not regex.endswith('$'):\n regex = regex + '$'\n self.regex = regex\n\n def _process(self, grads):\n ret = []\n for grad, var in grads:\n if re.match(self.regex, var.op.name):\n grad = self.func(grad, var)\n if grad is not None:\n ret.append((grad, var))\n else:\n ret.append((grad, var))\n return ret\n\n_summaried_gradient = set()\n\nclass SummaryGradient(MapGradient):\n \"\"\"\n Summary history and RMS for each graident variable\n \"\"\"\n def __init__(self):\n super(SummaryGradient, self).__init__(self._mapper)\n\n def _mapper(self, grad, var):\n name = var.op.name\n if name not in _summaried_gradient:\n _summaried_gradient.add(name)\n tf.histogram_summary(name + '/grad', grad)\n add_moving_summary(rms(grad, name=name + '/rms'))\n return grad\n\nclass CheckGradient(MapGradient):\n \"\"\"\n Check for numeric issue.\n \"\"\"\n def __init__(self):\n super(CheckGradient, self).__init__(self._mapper)\n\n def _mapper(self, grad, var):\n # this is very slow.... see #3649\n #op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)\n grad = tf.check_numerics(grad, 'CheckGradient-' + var.op.name)\n return grad\n\nclass ScaleGradient(MapGradient):\n \"\"\"\n Scale certain gradient by a multiplier\n \"\"\"\n def __init__(self, multipliers, log=True):\n \"\"\"\n :param multipliers: list of (regex, float)\n :param log: whether to do logging or not\n \"\"\"\n if not isinstance(multipliers, list):\n multipliers = [multipliers]\n self.multipliers = multipliers\n self._log = log\n super(ScaleGradient, self).__init__(self._mapper)\n\n def _mapper(self, grad, var):\n varname = var.op.name\n for regex, val in self.multipliers:\n # always match against the whole name\n if not regex.endswith('$'):\n regex = regex + '$'\n\n if re.match(regex, varname):\n if self._log:\n logger.info(\"Apply lr multiplier {} for {}\".format(val, varname))\n if val != 0: # skip zero to speed up\n return grad * val\n else:\n return None\n return grad\n" ]
[ [ "tensorflow.histogram_summary", "tensorflow.clip_by_global_norm", "tensorflow.check_numerics" ] ]
ScSteffen/neuralEntropy
[ "796e0b38ac9c01f59772d49be3368b8ac9ad24d7" ]
[ "experimental/sandBoxGeneral.py" ]
[ "### imports\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import initializers\n\nimport src.utils as utils\n\n# import tensorflow.keras.backend as K\nimport matplotlib.pyplot as plt\n\nfrom src.utils import finiteDiff, integrate, loadData, evaluateModel\n\nplt.style.use(\"kitish\")\n\n\n# ------ Code starts here --------\n\ndef main():\n y = [6.51778e-55,\n 9.20148e-53,\n 1.1754e-50,\n 1.35858e-48,\n 1.42087e-46,\n 1.3446e-44,\n 1.15134e-42,\n 8.92042e-41,\n 6.2537e-39,\n 3.96697e-37,\n 2.27694e-35,\n 1.18254e-33,\n 5.5571e-32,\n 2.36294e-30,\n 9.09133e-29,\n 3.165e-27,\n 9.96986e-26,\n 2.84168e-24,\n 7.3288e-23,\n 1.71025e-21,\n 3.61126e-20,\n 6.89965e-19,\n 1.1928e-17,\n 1.86585e-16,\n 2.64093e-15,\n 3.38226e-14,\n 3.91948e-13,\n 4.1098e-12,\n 3.89927e-11,\n 3.34747e-10,\n 2.60028e-09,\n 1.82766e-08,\n 1.16236e-07,\n 6.6889e-07,\n 3.4829e-06,\n 1.64096e-05,\n 6.99559e-05,\n 0.00026985,\n 0.000941867,\n 0.0029746,\n 0.00850037,\n 0.0219795,\n 0.0514242,\n 0.108865,\n 0.208536,\n 0.361445,\n 0.566858,\n 0.80441,\n 1.03288,\n 1.20004,\n 1.26157,\n 1.20004,\n 1.03288,\n 0.80441,\n 0.566858,\n 0.361445,\n 0.208536,\n 0.108865,\n 0.0514242,\n 0.0219795,\n 0.00850037,\n 0.0029746,\n 0.000941867,\n 0.00026985,\n 6.99559e-05,\n 1.64096e-05,\n 3.4829e-06,\n 6.6889e-07,\n 1.16236e-07,\n 1.82766e-08,\n 2.60028e-09,\n 3.34747e-10,\n 3.89927e-11,\n 4.1098e-12,\n 3.91948e-13,\n 3.38226e-14,\n 2.64093e-15,\n 1.86585e-16,\n 1.1928e-17,\n 6.89965e-19,\n 3.61126e-20,\n 1.71025e-21,\n 7.3288e-23,\n 2.84168e-24,\n 9.96986e-26,\n 3.165e-27,\n 9.09133e-29,\n 2.36294e-30,\n 5.5571e-32,\n 1.18254e-33,\n 2.27694e-35,\n 3.96697e-37,\n 6.2537e-39,\n 8.92042e-41,\n 1.15134e-42,\n 1.3446e-44,\n 1.42087e-46,\n 1.35858e-48,\n 1.1754e-50,\n 9.20148e-53]\n x = np.linspace(-5, 5, 100)\n plt.plot(x, y)\n plt.show()\n int = sum(y) / 10;\n print(int)\n # --- Set Parameters ---\n batchSize = 64\n epochCount = 5000\n filename1 = 'models/sandbox/best_model_linear.h5'\n filename2 = 'models/sandbox/best_model_tscheb.h5'\n\n nwHeight = 8\n nwWidth = 5\n inputDim = 1\n nPts = 5000\n maxIter = 1000\n\n # test Data\n [xTest, yTest] = createTrainingData(nPts * 100, -5, 5, mode=\"linear\")\n # xTest = xTest[1::2]\n # yTest = yTest[1::2]\n\n ### linear data\n [xL, yL] = createTrainingData(maxIter * 3, -5, 5, mode=\"linear\") # samples data between -1 and 1\n [xT, yT] = [xL, yL] # utils.shuffleTrainData(x, y)\n\n model1 = createModelRelu(nwWidth, nwHeight, inputDim)\n # model1.load_weights(filenameInit)\n\n multistepTraining(xL, yL, model1, maxIter, epochCount, batchSize)\n\n return 0\n\n\ndef multistepTraining(xT, yT, model, maxIter, epochs, batchSize):\n filename1 = 'models/sandbox/best_model_linear.h5'\n trainLen = xT.shape[0]\n mc_best = tf.keras.callbacks.ModelCheckpoint(filename1, monitor='loss', mode='min',\n save_best_only=True,\n verbose=2)\n xTList = list(xT)\n yTList = list(yT)\n\n yList = []\n xList = []\n\n ypred = model(xT)\n ypredArray = np.asarray(ypred)\n yDiff = np.linalg.norm(ypredArray - yT, axis=0, ord=2)\n newY = np.amax(yDiff)\n newIdx = np.where(yDiff == newY)[0]\n\n yList.append([yTList.pop(0)])\n yList.append([yTList.pop(-1)])\n xList.append([xTList.pop(0)])\n xList.append([xTList.pop(-1)])\n\n for iter in range(0, maxIter):\n xarr = np.asarray(xList)\n yarr = np.asarray(yList)\n history = model.fit(x=xarr, y=yarr,\n validation_split=0.0,\n epochs=epochs,\n batch_size=batchSize,\n verbose=0)\n\n print(\"Trained on iteration: \" + str(iter))\n\n # Get new data an evaluate current data\n ypred = model(np.asarray(xTList))\n ypredArray = np.asarray(ypred)\n tmp = np.asarray(yTList).reshape(ypredArray.shape)\n yDiff = ypredArray - tmp\n yDiff = np.absolute(yDiff)\n newY = np.amax(yDiff)\n newIdxes = np.where(yDiff == newY)\n newIdx = newIdxes[0]\n\n utils.plot1D(np.asarray(xTList), [np.asarray(yTList), ypredArray, yDiff], [\"y\", \"model\", \"difference\"],\n\n '../models/sandbox/prediction' + str(iter),\n log=False)\n\n # sort points\n\n utils.plot1D(xarr, [yarr], [\"Interpolation points\"],\n '../models/sandbox/datapts' + str(iter),\n log=False, linetypes=['*'])\n\n # print histories\n utils.plot1D(history.epoch, [history.history['loss']],\n [\"model loss\"],\n '../models/sandbox/traininghistory' + str(iter),\n log=True, linetypes=['-', '--'])\n\n yList.append([yTList.pop(newIdx[0])])\n xList.append([xTList.pop(newIdx[0])])\n return 0\n\n\ndef createTrainingData(nPts, a=-1, b=1, mode=\"linear\"):\n if (mode == \"tscheb\"):\n x = np.zeros((nPts,))\n degN = nPts - 1\n for k in range(0, nPts):\n tmp = np.cos((1 + 2 * (degN - k)) / (2 * (degN + 1)) * np.pi)\n x[k] = a + (tmp + 1) / 2 * (b - a)\n\n else: # (mode == \"linear\"):\n x = np.linspace(a, b, nPts)\n\n y = rungeFunc(x)\n\n return [x, y]\n\n\ndef rungeFunc(x):\n return 1 / (1 + x * x)\n\n\ndef quadFunc(x):\n return x * x\n\n\ndef createModel(nwWidth, nwHeight, inputDim): # Build the network:\n\n # basic dense network\n # Define the input\n\n # Weight initializer for sofplus after K Kumar\n input_stddev = np.sqrt((1 / inputDim) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))\n hidden_stddev = np.sqrt((1 / nwWidth) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))\n\n hiddenInitializer = initializers.RandomNormal(mean=0., stddev=hidden_stddev)\n inputLayerInitializer = initializers.RandomNormal(mean=0., stddev=input_stddev)\n # hiddenInitializer = initializers.Zeros()\n # inputLayerInitializer = initializers.Zeros()\n\n biasInitializer = initializers.Zeros()\n\n #### input layer ####\n input_ = keras.Input(shape=(inputDim,))\n hidden = layers.Dense(nwWidth, activation=\"softplus\", kernel_initializer=inputLayerInitializer,\n bias_initializer=biasInitializer)(input_)\n\n # hidden Layer\n for idx in range(0, nwHeight):\n hidden = layers.Dense(nwWidth, activation=\"softplus\", kernel_initializer=hiddenInitializer,\n bias_initializer=biasInitializer)(hidden)\n\n output_ = layers.Dense(1, activation=None, kernel_initializer=inputLayerInitializer,\n bias_initializer=biasInitializer)(hidden)\n\n # Create the model\n model = keras.Model(inputs=[input_], outputs=[output_], name=\"model1\")\n model.summary()\n\n model.compile(loss=\"mean_squared_error\", optimizer='adam', metrics=['mean_absolute_error'])\n\n return model\n\n\ndef createModelRelu(nwWidth, nwHeight, inputDim): # Build the network:\n\n # basic dense network\n # Define the input\n\n # Weight initializer for sofplus after K Kumar\n input_stddev = np.sqrt((1 / inputDim) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))\n hidden_stddev = np.sqrt((1 / nwWidth) * (1 / ((1 / 2) ** 2)) * (1 / (1 + np.log(2) ** 2)))\n\n hiddenInitializer = initializers.RandomNormal(mean=0., stddev=hidden_stddev)\n inputLayerInitializer = initializers.RandomNormal(mean=0., stddev=input_stddev)\n # hiddenInitializer = initializers.Zeros()\n # inputLayerInitializer = initializers.Zeros()\n\n biasInitializer = initializers.Zeros()\n\n #### input layer ####\n input_ = keras.Input(shape=(inputDim,))\n hidden = layers.Dense(nwWidth, activation=\"softplus\", kernel_initializer=inputLayerInitializer,\n bias_initializer=biasInitializer, kernel_regularizer='l1_l2')(input_)\n\n # hidden Layer\n for idx in range(0, nwHeight):\n hidden = layers.Dense(nwWidth, activation=\"softplus\", kernel_initializer=hiddenInitializer,\n bias_initializer=biasInitializer, kernel_regularizer='l1_l2')(hidden)\n\n output_ = layers.Dense(1, activation=None, kernel_initializer=inputLayerInitializer,\n bias_initializer=biasInitializer, kernel_regularizer='l1_l2')(hidden)\n\n # Create the model\n model = keras.Model(inputs=[input_], outputs=[output_], name=\"model1\")\n model.summary()\n\n model.compile(loss=\"mean_squared_error\", optimizer='adam', metrics=['mean_absolute_error'])\n\n return model\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.style.use", "numpy.linalg.norm", "numpy.zeros", "numpy.asarray", "tensorflow.keras.Model", "numpy.cos", "tensorflow.keras.initializers.Zeros", "matplotlib.pyplot.show", "numpy.log", "tensorflow.keras.initializers.RandomNormal", "numpy.absolute", "tensorflow.keras.layers.Dense", "numpy.amax", "matplotlib.pyplot.plot", "numpy.where", "tensorflow.keras.callbacks.ModelCheckpoint", "numpy.linspace", "tensorflow.keras.Input" ] ]
amzkit/nilmtk
[ "9c9add85f3e85ee549412b0a50d46781f7bfbe22" ]
[ "nilmtk/dataset_converters/combed/convert_combed.py" ]
[ "from __future__ import print_function, division\nfrom os.path import join, isdir, dirname, abspath\nfrom os import getcwd\nimport os\nfrom sys import getfilesystemencoding\nfrom inspect import currentframe, getfile, getsourcefile\nfrom collections import OrderedDict\nfrom six import iteritems\n\nimport pandas as pd\nfrom nilm_metadata import convert_yaml_to_hdf5\n\nfrom nilmtk.datastore import Key\nfrom nilmtk.measurement import LEVEL_NAMES\nfrom nilmtk.utils import check_directory_exists, get_datastore\n\n#{\"load_type\": {\"floor/wing\":meter_number_in_nilmtk}\nacad_block_meter_mapping = {'Building Total Mains': {'0': 1},\n 'Lifts': {'0': 2},\n 'Floor Total': {'1': 3, '2': 4, '3': 5, '4': 6, '5': 7},\n 'AHU': {'0': 8, '1': 9, '2': 10, '5': 11},\n 'Light': {'3': 12},\n 'Power Sockets': {'3': 13},\n 'UPS Sockets': {'3': 14}}\n\nlecture_block_meter_mapping = {'Building Total Mains': {'0': 1},\n 'Floor Total': {'0': 2, '1': 3, '2': 4},\n 'AHU': {'1': 5, '2': 6, '3': 7}}\n\noverall_dataset_mapping = OrderedDict({'Academic Block': acad_block_meter_mapping,\n 'Lecture Block': lecture_block_meter_mapping})\n\nbuilding_number_mapping = {'Academic Block': 1, 'Lecture Block': 2}\n\n\ncolumn_mapping = OrderedDict({\n 'Power': ('power', 'active'),\n 'Energy': ('energy', 'active'),\n 'Current': ('current', '')})\n\n\ndef convert_combed(combed_path, output_filename, format='HDF'):\n \"\"\"\n Parameters\n ----------\n combed_path : str\n The root path of the combed dataset.\n output_filename : str\n The destination HDF5 filename (including path and suffix).\n \"\"\"\n\n check_directory_exists(combed_path)\n\n # Open store\n store = get_datastore(output_filename, format, mode='w')\n\n any_file_converted = False\n \n for building_name, building_mapping in iteritems(overall_dataset_mapping):\n for load_name, load_mapping in iteritems(building_mapping):\n for load_mapping_path, meter_number in iteritems(load_mapping):\n building_number = building_number_mapping[building_name]\n key = Key(building=building_number, meter=meter_number)\n dfs = []\n for attribute in column_mapping.keys():\n filename_attribute = join(combed_path, building_name, load_name, load_mapping_path, \"%s.csv\" %attribute)\n if not os.path.isfile(filename_attribute):\n # File not found directly in the combed_path provided\n # Try adding 'iiitd' to it\n filename_attribute = join(combed_path, 'iiitd', building_name, load_name, load_mapping_path, \"%s.csv\" %attribute)\n \n if os.path.isfile(filename_attribute):\n exists = True\n print(filename_attribute)\n df = pd.read_csv(filename_attribute, names=[\"timestamp\", attribute])\n df.index = pd.to_datetime(df[\"timestamp\"], unit='ms')\n df = df.drop(\"timestamp\", 1)\n dfs.append(df)\n else:\n exists = False\n \n if exists:\n total = pd.concat(dfs, axis=1)\n total = total.tz_localize('UTC').tz_convert('Asia/Kolkata')\n total.columns = pd.MultiIndex.from_tuples([column_mapping[x] for x in total.columns])\n total.columns.set_names(LEVEL_NAMES, inplace=True)\n assert total.index.is_unique\n store.put(str(key), total)\n any_file_converted = True\n \n if not any_file_converted:\n raise RuntimeError('No files converted, did you specify the correct path?')\n \n convert_yaml_to_hdf5(join(_get_module_directory(), 'metadata'),\n output_filename)\n\n print(\"Done converting COMBED to HDF5!\")\n\n \ndef _get_module_directory():\n # Taken from http://stackoverflow.com/a/6098238/732596\n path_to_this_file = dirname(getfile(currentframe()))\n if not isdir(path_to_this_file):\n encoding = getfilesystemencoding()\n path_to_this_file = dirname(unicode(__file__, encoding))\n if not isdir(path_to_this_file):\n abspath(getsourcefile(lambda _: None))\n if not isdir(path_to_this_file):\n path_to_this_file = getcwd()\n assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'\n return path_to_this_file\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.MultiIndex.from_tuples", "pandas.concat" ] ]
skailuxspa/OpenCV-Tools
[ "ef0d16533722779e3a3dd40de494e28c1aa1a39e" ]
[ "calibration.py" ]
[ "import numpy as np\r\nimport cv2\r\n\r\ncalibration_state = 0\r\ndebug_state = 1\r\nrunning_state = 2\r\nstate = calibration_state\r\n\r\ncalibration_frame_max = 100\r\ncalibration_frame_current = 0\r\nHmin, Hmax, Hmean, Hstdv = 0, 0, 0, 0\r\nSmin, Smax, Smean, Sstdv = 0, 0, 0, 0\r\nlower_bound, upper_bound = 0, 0\r\n\r\nbeta_1 = 2.5\r\nbeta_2 = 2.5\r\n\r\nchroma_mask = 0\r\n\r\ndef initialize_calibration():\r\n print(\"restarting calibration\")\r\n calibration_frame_max = 100\r\n calibration_frame_current = 0\r\n Hmin, Hmax, Hmean, Hstdv = 0, 0, 0, 0\r\n Smin, Smax, Smean, Sstdv = 0, 0, 0, 0\r\n state = calibration_state\r\n \r\ndef calculate_bounds():\r\n Hmin = np.clip(Hmean - ((beta_1/100) * Hstdv), 0, 255)\r\n Hmax = np.clip(Hmean + ((beta_1/100) * Hstdv), 0, 255)\r\n Smin = np.clip(Smean - ((beta_2/100) * Sstdv), 0, 255)\r\n Smax = np.clip(Smean + ((beta_2/100) * Sstdv), 0, 255)\r\n lower_bound = np.array([Hmin, Smin, 50], dtype=np.uint8)\r\n upper_bound = np.array([Hmax, Smax, 255], dtype=np.uint8)\r\n chroma_mask = cv2.inRange(frame_hsv, lower_bound, upper_bound)\r\n\r\ndef change_b1(x):\r\n print(\"beta 1:\", x)\r\n print(Hmin, Hmax, Hmean, Hstdv)\r\n beta_1 = x\r\n\r\ndef change_b2(x):\r\n print(\"beta 2:\", x)\r\n print(Smin, Smax, Smean, Sstdv)\r\n beta_2 = x\r\n\r\ncv2.namedWindow(\"Sliders\")\r\ncv2.createTrackbar(\"Beta 1\", \"Sliders\", 6, 10, change_b1)\r\ncv2.createTrackbar(\"Beta 2\", \"Sliders\", 6, 10, change_b2)\r\n\r\ncap = cv2.VideoCapture(1)\r\n\r\nwhile(True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n if state is calibration_state:\r\n print(\"Current calibration frame:\", calibration_frame_current)\r\n #split hsv channels\r\n h, s, v = cv2.split(frame_hsv)\r\n\r\n #calculate mean and stdv for current frames h and s channels\r\n buffer_Hmean, buffer_Hstdv = cv2.meanStdDev(h)\r\n buffer_Smean, buffer_Sstdv = cv2.meanStdDev(s)\r\n \r\n #accumulate the buffers\r\n Hmean += buffer_Hmean\r\n Hstdv += buffer_Hstdv\r\n Smean += buffer_Smean\r\n Sstdv += buffer_Sstdv\r\n\r\n calibration_frame_current += 1\r\n if calibration_frame_current is calibration_frame_max - 1:\r\n #calibration algorithm\r\n Hmean = Hmean / calibration_frame_max\r\n Hstdv = Hstdv / calibration_frame_max\r\n Smean = Smean / calibration_frame_max\r\n Sstdv = Sstdv / calibration_frame_max\r\n \r\n Hmin = np.clip(Hmean - (beta_1 * Hstdv), 0, 255)\r\n Hmax = np.clip(Hmean + (beta_1 * Hstdv), 0, 255)\r\n Smin = np.clip(Smean - (beta_2 * Sstdv), 0, 255)\r\n Smax = np.clip(Smean + (beta_2 * Sstdv), 0, 255)\r\n lower_bound = np.array([Hmin, Smin, 0], dtype=np.uint8)\r\n upper_bound = np.array([Hmax, Smax, 255], dtype=np.uint8)\r\n chroma_mask = 255 - cv2.inRange(frame_hsv, lower_bound, upper_bound)\r\n kernel = np.ones((3,3), np.uint8)\r\n chroma_mask = cv2.morphologyEx(chroma_mask, cv2.MORPH_OPEN, kernel)\r\n\r\n #next state change\r\n state = debug_state\r\n print(\"Hmean:\", Hmean, \"Hstdv:\", Hstdv, \"Hmin:\", Hmin, \"Hmax:\", Hmax)\r\n print(\"Smean:\", Smean, \"Sstdv:\", Sstdv, \"Smin:\", Smin, \"Smax:\", Smax)\r\n print(\"going to debug state\")\r\n\r\n elif state is debug_state:\r\n Hmin = np.clip(Hmean - (beta_1 * Hstdv), 0, 255)\r\n Hmax = np.clip(Hmean + (beta_1 * Hstdv), 0, 255)\r\n Smin = np.clip(Smean - (beta_2 * Sstdv), 0, 255)\r\n Smax = np.clip(Smean + (beta_2 * Sstdv), 0, 255)\r\n lower_bound = np.array([Hmin, Smin, 0], dtype=np.uint8)\r\n upper_bound = np.array([Hmax, Smax, 255], dtype=np.uint8)\r\n chroma_mask = 255 - cv2.inRange(frame_hsv, lower_bound, upper_bound)\r\n kernel = np.ones((3,3), np.uint8)\r\n chroma_mask = cv2.morphologyEx(chroma_mask, cv2.MORPH_OPEN, kernel)\r\n #chroma_mask = cv2.erode(chroma_mask, kernel, iterations = 1)\r\n# elif state is running_state:\r\n\r\n if state is calibration_state:\r\n cv2.imshow(\"asdf\", frame)\r\n elif state is debug_state:\r\n calibrated_frame = cv2.bitwise_and(frame, frame, mask=chroma_mask)\r\n cv2.imshow(\"asdf\", calibrated_frame)\r\n cv2.imshow(\"mask\", chroma_mask)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('c'):\r\n #restarting calibration\r\n print(\"restarting calibration\")\r\n calibration_frame_max = 100\r\n calibration_frame_current = 0\r\n Hmin, Hmax, Hmean, Hstdv = 0, 0, 0, 0\r\n Smin, Smax, Smean, Sstdv = 0, 0, 0, 0\r\n state = calibration_state\r\n #initialize_calibration()\r\n\r\n # Quit the thing\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# When everything done, release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()" ]
[ [ "numpy.array", "numpy.ones", "numpy.clip" ] ]