repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
kuolunwang/DoorGym
[ "d9fbb67382756e659025b640857ede3a3735fb1d" ]
[ "a2c_ppo_acktr/envs.py" ]
[ "import os\nimport sys\n\nimport gym\nimport numpy as np\nimport torch\nfrom gym.spaces.box import Box\n\nfrom baselines import bench\nfrom baselines.common.atari_wrappers import make_atari, wrap_deepmind\nfrom baselines.common.vec_env.vec_env import \\\n VecEnvWrapper, VecEnv, CloudpickleWrapper, clear_mpi_env_vars\nfrom baselines.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom baselines.common.vec_env.shmem_vec_env import ShmemVecEnv, _subproc_worker\nfrom baselines.common.vec_env.vec_normalize import \\\n VecNormalize as VecNormalize_\nfrom baselines.common.vec_env.util import dict_to_obs, obs_space_info, obs_to_dict\n\nimport multiprocessing as mp\nimport ctypes\nfrom baselines import logger\n\n_NP_TO_CT = {np.float64: ctypes.c_double,\n np.float32: ctypes.c_float,\n np.int32: ctypes.c_int32,\n np.int8: ctypes.c_int8,\n np.uint8: ctypes.c_char,\n np.bool: ctypes.c_bool}\n\ntry:\n import dm_control2gym\nexcept ImportError:\n pass\n\ntry:\n import roboschool\nexcept ImportError:\n pass\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pass\n\n# Derived from\n# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/shmem_vec_env.py\nclass ShmemVecEnv_DR(ShmemVecEnv):\n def __init__(self, env_fns, spaces=None, context='spawn'):\n \"\"\"\n If you don't specify observation_space, we'll have to create a dummy\n environment to get it.\n \"\"\"\n\n ctx = mp.get_context(context)\n if spaces:\n observation_space, action_space = spaces\n else:\n logger.log('Creating dummy env object to get spaces')\n with logger.scoped_configure(format_strs=[]):\n dummy = env_fns[0]()\n observation_space, action_space = dummy.observation_space, dummy.action_space\n #dummy.close()\n try:\n self.visionnet_input = dummy.env.env.env.visionnet_input\n self.nn = dummy.env.env.env.nn\n self.xml_path = dummy.env.env.env.xml_path\n if dummy.env.env.env.unity:\n dummy.env.env.env.close() ## HACK>>>\n except Exception as e:\n print(e)\n pass\n del dummy\n\n VecEnv.__init__(self, len(env_fns), observation_space, action_space)\n self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space)\n self.obs_bufs = [\n {k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys}\n for _ in env_fns]\n self.parent_pipes = []\n self.procs = []\n with clear_mpi_env_vars():\n for env_fn, obs_buf in zip(env_fns, self.obs_bufs):\n wrapped_fn = CloudpickleWrapper(env_fn)\n parent_pipe, child_pipe = ctx.Pipe()\n proc = ctx.Process(target=_subproc_worker,\n args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))\n proc.daemon = True\n self.procs.append(proc)\n self.parent_pipes.append(parent_pipe)\n proc.start()\n child_pipe.close()\n self.waiting_step = False\n self.viewer = None\n\n def step_async(self, actions):\n assert len(actions) == len(self.parent_pipes)\n for pipe, act in zip(self.parent_pipes, actions):\n pipe.send(('step', act))\n\n def step_wait(self):\n outs = [pipe.recv() for pipe in self.parent_pipes]\n obs, rews, dones, infos = zip(*outs)\n return self._decode_obses(obs), np.array(rews), np.array(dones), infos\n\ndef make_env(env_id, seed, rank, log_dir, allow_early_resets, env_kwargs=None):\n def _thunk():\n if env_id.find(\"doorenv\")>-1:\n env = gym.make(env_id, **env_kwargs) #\n env._max_episode_steps = 512\n if env_kwargs['unity']:\n env.env.init(rank)\n elif env_id.find('Fetch')>-1:\n env = gym.make(env_id, reward_type=\"sparse\")\n env = gym.wrappers.FlattenDictWrapper(env, dict_keys=['observation', 'desired_goal'])\n else:\n if env_id.startswith(\"dm\"):\n _, domain, task = env_id.split('.')\n env = dm_control2gym.make(domain_name=domain, task_name=task)\n else:\n env = gym.make(env_id) \n env._max_episode_steps = 512\n is_atari = hasattr(gym.envs, 'atari') and isinstance(\n env.unwrapped, gym.envs.atari.atari_env.AtariEnv)\n if is_atari:\n env = make_atari(env_id)\n\n env.seed(seed + rank)\n\n obs_shape = env.observation_space.shape\n\n if str(env.__class__.__name__).find('TimeLimit') >= 0:\n env = TimeLimitMask(env)\n\n if log_dir is not None:\n env = bench.Monitor(\n env,\n os.path.join(log_dir, str(rank)),\n allow_early_resets=allow_early_resets)\n\n if is_atari:\n if len(env.observation_space.shape) == 3:\n env = wrap_deepmind(env)\n elif len(env.observation_space.shape) == 3:\n raise NotImplementedError(\n \"CNN models work only for atari,\\n\"\n \"please use a custom wrapper for a custom pixel input env.\\n\"\n \"See wrap_deepmind for an example.\")\n\n # If the input has shape (W,H,3), wrap for PyTorch convolutions\n obs_shape = env.observation_space.shape\n if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:\n env = TransposeImage(env, op=[2, 0, 1])\n\n return env\n\n return _thunk\n\ndef make_vec_envs(env_name,\n seed,\n num_processes,\n gamma,\n log_dir,\n device,\n allow_early_resets,\n num_frame_stack=None,\n env_kwargs=None,):\n envs = [\n make_env(env_name, seed, i, log_dir, allow_early_resets, env_kwargs)\n for i in range(num_processes)\n ]\n\n if len(envs) > 1:\n envs = ShmemVecEnv_DR(envs, context='fork')\n else:\n envs = DummyVecEnv(envs)\n\n if len(envs.observation_space.shape) == 1:\n if gamma is None:\n envs = VecNormalize(envs, ret=False)\n else:\n envs = VecNormalize(envs, gamma=gamma)\n\n envs = VecPyTorch(envs, device)\n\n if num_frame_stack is not None:\n envs = VecPyTorchFrameStack(envs, num_frame_stack, device)\n elif len(envs.observation_space.shape) == 3:\n envs = VecPyTorchFrameStack(envs, 4, device)\n\n return envs\n\n\n# Checks whether done was caused my timit limits or not\nclass TimeLimitMask(gym.Wrapper):\n def step(self, action):\n obs, rew, done, info = self.env.step(action)\n if done and self.env._max_episode_steps == self.env._elapsed_steps:\n info['bad_transition'] = True\n\n return obs, rew, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\n# Can be used to test recurrent policies for Reacher-v2\nclass MaskGoal(gym.ObservationWrapper):\n def observation(self, observation):\n if self.env._elapsed_steps > 0:\n observation[-2:] = 0\n return observation\n\n\nclass TransposeObs(gym.ObservationWrapper):\n def __init__(self, env=None):\n \"\"\"\n Transpose observation space (base class)\n \"\"\"\n super(TransposeObs, self).__init__(env)\n\n\nclass TransposeImage(TransposeObs):\n def __init__(self, env=None, op=[2, 0, 1]):\n \"\"\"\n Transpose observation space for images\n \"\"\"\n super(TransposeImage, self).__init__(env)\n assert len(op) == 3, f\"Error: Operation, {str(op)}, must be dim3\"\n self.op = op\n obs_shape = self.observation_space.shape\n self.observation_space = Box(\n self.observation_space.low[0, 0, 0],\n self.observation_space.high[0, 0, 0], [\n obs_shape[self.op[0]], obs_shape[self.op[1]],\n obs_shape[self.op[2]]\n ],\n dtype=self.observation_space.dtype)\n\n def observation(self, ob):\n return ob.transpose(self.op[0], self.op[1], self.op[2])\n\n\nclass VecPyTorch(VecEnvWrapper):\n def __init__(self, venv, device):\n \"\"\"Return only every `skip`-th frame\"\"\"\n super(VecPyTorch, self).__init__(venv)\n self.device = device\n # TODO: Fix data types\n\n def reset(self):\n obs = self.venv.reset()\n obs = torch.from_numpy(obs).float().to(self.device)\n return obs\n\n def step_async(self, actions):\n if isinstance(actions, torch.LongTensor):\n # Squeeze the dimension for discrete actions\n actions = actions.squeeze(1)\n actions = actions.cpu().numpy()\n self.venv.step_async(actions)\n\n def step_wait(self):\n obs, reward, done, info = self.venv.step_wait()\n obs = torch.from_numpy(obs).float().to(self.device)\n reward = torch.from_numpy(reward).unsqueeze(dim=1).float()\n return obs, reward, done, info\n\n\nclass VecNormalize(VecNormalize_):\n def __init__(self, *args, **kwargs):\n super(VecNormalize, self).__init__(*args, **kwargs)\n self.training = True\n\n def _obfilt(self, obs, update=True):\n obfilt = False\n if obfilt:\n if self.ob_rms:\n if self.training and update:\n self.ob_rms.update(obs)\n obs = np.clip((obs - self.ob_rms.mean) /\n np.sqrt(self.ob_rms.var + self.epsilon),\n -self.clipob, self.clipob)\n return obs\n return obs\n\n\n def train(self):\n self.training = True\n\n def eval(self):\n self.training = False\n\n\n# Derived from\n# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py\nclass VecPyTorchFrameStack(VecEnvWrapper):\n def __init__(self, venv, nstack, device=None):\n self.venv = venv\n self.nstack = nstack\n\n wos = venv.observation_space # wrapped ob space\n self.shape_dim0 = wos.shape[0]\n\n low = np.repeat(wos.low, self.nstack, axis=0)\n high = np.repeat(wos.high, self.nstack, axis=0)\n\n if device is None:\n device = torch.device('cpu')\n self.stacked_obs = torch.zeros((venv.num_envs, ) +\n low.shape).to(device)\n\n observation_space = gym.spaces.Box(\n low=low, high=high, dtype=venv.observation_space.dtype)\n VecEnvWrapper.__init__(self, venv, observation_space=observation_space)\n\n def step_wait(self):\n obs, rews, news, infos = self.venv.step_wait()\n self.stacked_obs[:, :-self.shape_dim0] = \\\n self.stacked_obs[:, self.shape_dim0:]\n for (i, new) in enumerate(news):\n if new:\n self.stacked_obs[i] = 0\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs, rews, news, infos\n\n def reset(self):\n obs = self.venv.reset()\n if torch.backends.cudnn.deterministic:\n self.stacked_obs = torch.zeros(self.stacked_obs.shape)\n else:\n self.stacked_obs.zero_()\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs\n\n def close(self):\n self.venv.close()\n" ]
[ [ "numpy.sqrt", "torch.zeros", "numpy.repeat", "torch.from_numpy", "numpy.prod", "numpy.array", "torch.device" ] ]
jakubzadrozny/pixelnerf
[ "989894044a7943c34ac0b29f431fc211d5837fd8" ]
[ "src/model/custom_encoder.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .. import util\n\n\nclass ConvEncoder(nn.Module):\n \"\"\"\n Basic, extremely simple convolutional encoder\n \"\"\"\n\n def __init__(\n self,\n dim_in=3,\n norm_layer=util.get_norm_layer(\"group\"),\n padding_type=\"reflect\",\n use_leaky_relu=True,\n use_skip_conn=True,\n ):\n super().__init__()\n self.dim_in = dim_in\n self.norm_layer = norm_layer\n self.activation = nn.LeakyReLU() if use_leaky_relu else nn.ReLU()\n self.padding_type = padding_type\n self.use_skip_conn = use_skip_conn\n\n # TODO: make these configurable\n first_layer_chnls = 64\n mid_layer_chnls = 128\n last_layer_chnls = 128\n n_down_layers = 3\n self.n_down_layers = n_down_layers\n\n self.conv_in = nn.Sequential(\n nn.Conv2d(dim_in, first_layer_chnls, kernel_size=7, stride=2, bias=False),\n norm_layer(first_layer_chnls),\n self.activation,\n )\n\n chnls = first_layer_chnls\n for i in range(0, n_down_layers):\n conv = nn.Sequential(\n nn.Conv2d(chnls, 2 * chnls, kernel_size=3, stride=2, bias=False),\n norm_layer(2 * chnls),\n self.activation,\n )\n setattr(self, \"conv\" + str(i), conv)\n\n deconv = nn.Sequential(\n nn.ConvTranspose2d(\n 4 * chnls, chnls, kernel_size=3, stride=2, bias=False\n ),\n norm_layer(chnls),\n self.activation,\n )\n setattr(self, \"deconv\" + str(i), deconv)\n chnls *= 2\n\n self.conv_mid = nn.Sequential(\n nn.Conv2d(chnls, mid_layer_chnls, kernel_size=4, stride=4, bias=False),\n norm_layer(mid_layer_chnls),\n self.activation,\n )\n\n self.deconv_last = nn.ConvTranspose2d(\n first_layer_chnls, last_layer_chnls, kernel_size=3, stride=2, bias=True\n )\n\n self.dims = [last_layer_chnls]\n\n def forward(self, x):\n x = util.same_pad_conv2d(x, padding_type=self.padding_type, layer=self.conv_in)\n x = self.conv_in(x)\n\n inters = []\n for i in range(0, self.n_down_layers):\n conv_i = getattr(self, \"conv\" + str(i))\n x = util.same_pad_conv2d(x, padding_type=self.padding_type, layer=conv_i)\n x = conv_i(x)\n inters.append(x)\n\n x = util.same_pad_conv2d(x, padding_type=self.padding_type, layer=self.conv_mid)\n x = self.conv_mid(x)\n x = x.reshape(x.shape[0], -1, 1, 1).expand(-1, -1, *inters[-1].shape[-2:])\n\n for i in reversed(range(0, self.n_down_layers)):\n if self.use_skip_conn:\n x = torch.cat((x, inters[i]), dim=1)\n deconv_i = getattr(self, \"deconv\" + str(i))\n x = deconv_i(x)\n x = util.same_unpad_deconv2d(x, layer=deconv_i)\n x = self.deconv_last(x)\n x = util.same_unpad_deconv2d(x, layer=self.deconv_last)\n return x\n" ]
[ [ "torch.nn.Conv2d", "torch.nn.ConvTranspose2d", "torch.nn.ReLU", "torch.cat", "torch.nn.LeakyReLU" ] ]
pjuangph/sagan-pytorch
[ "b766f0c53184cfc02b4220329585a4d59bbfb2c7" ]
[ "model.py" ]
[ "import torch\r\n\r\nfrom torch import nn\r\nfrom torch.nn import init\r\nfrom torch.nn import functional as F\r\n\r\nimport functools\r\nfrom torch.autograd import Variable\r\n\r\n\r\ndef init_linear(linear):\r\n init.xavier_uniform_(linear.weight)\r\n linear.bias.data.zero_()\r\n\r\n\r\ndef init_conv(conv, glu=True):\r\n init.xavier_uniform_(conv.weight)\r\n if conv.bias is not None:\r\n conv.bias.data.zero_()\r\n\r\n\r\nclass SpectralNorm:\r\n def __init__(self, name):\r\n self.name = name\r\n\r\n def compute_weight(self, module):\r\n weight = getattr(module, self.name + '_orig')\r\n u = getattr(module, self.name + '_u')\r\n size = weight.size()\r\n weight_mat = weight.contiguous().view(size[0], -1)\r\n with torch.no_grad():\r\n v = weight_mat.t() @ u\r\n v = v / v.norm()\r\n u = weight_mat @ v\r\n u = u / u.norm()\r\n sigma = u @ weight_mat @ v\r\n weight_sn = weight / sigma\r\n # weight_sn = weight_sn.view(*size)\r\n\r\n return weight_sn, u\r\n\r\n @staticmethod\r\n def apply(module, name):\r\n fn = SpectralNorm(name)\r\n\r\n weight = getattr(module, name)\r\n del module._parameters[name]\r\n module.register_parameter(name + '_orig', weight)\r\n input_size = weight.size(0)\r\n u = weight.new_empty(input_size).normal_()\r\n module.register_buffer(name, weight)\r\n module.register_buffer(name + '_u', u)\r\n\r\n module.register_forward_pre_hook(fn)\r\n\r\n return fn\r\n\r\n def __call__(self, module, input):\r\n weight_sn, u = self.compute_weight(module)\r\n setattr(module, self.name, weight_sn)\r\n setattr(module, self.name + '_u', u)\r\n\r\n\r\ndef spectral_norm(module, name='weight'):\r\n SpectralNorm.apply(module, name)\r\n\r\n return module\r\n\r\n\r\ndef spectral_init(module, gain=1):\r\n init.kaiming_uniform_(module.weight, gain)\r\n if module.bias is not None:\r\n module.bias.data.zero_()\r\n\r\n return spectral_norm(module)\r\n\r\n\r\ndef leaky_relu(input):\r\n return F.leaky_relu(input, negative_slope=0.2)\r\n\r\n\r\nclass SelfAttention(nn.Module):\r\n def __init__(self, in_channel, gain=1):\r\n super().__init__()\r\n\r\n self.query = spectral_init(nn.Conv1d(in_channel, in_channel // 8, 1),\r\n gain=gain)\r\n self.key = spectral_init(nn.Conv1d(in_channel, in_channel // 8, 1),\r\n gain=gain)\r\n self.value = spectral_init(nn.Conv1d(in_channel, in_channel, 1),\r\n gain=gain)\r\n\r\n self.gamma = nn.Parameter(torch.tensor(0.0))\r\n\r\n def forward(self, input):\r\n shape = input.shape\r\n flatten = input.view(shape[0], shape[1], -1)\r\n query = self.query(flatten).permute(0, 2, 1)\r\n key = self.key(flatten)\r\n value = self.value(flatten)\r\n query_key = torch.bmm(query, key)\r\n attn = F.softmax(query_key, 1)\r\n attn = torch.bmm(value, attn)\r\n attn = attn.view(*shape)\r\n out = self.gamma * attn + input\r\n\r\n return out\r\n\r\n\r\nclass ConditionalNorm(nn.Module):\r\n def __init__(self, in_channel, n_class):\r\n super().__init__()\r\n\r\n self.bn = nn.BatchNorm2d(in_channel, affine=False)\r\n self.embed = nn.Embedding(n_class, in_channel * 2)\r\n self.embed.weight.data[:, :in_channel] = 1\r\n self.embed.weight.data[:, in_channel:] = 0\r\n\r\n def forward(self, input, class_id):\r\n out = self.bn(input)\r\n embed = self.embed(class_id)\r\n gamma, beta = embed.chunk(2, 1)\r\n gamma = gamma.unsqueeze(2).unsqueeze(3)\r\n beta = beta.unsqueeze(2).unsqueeze(3)\r\n out = gamma * out + beta\r\n\r\n return out\r\n\r\n\r\nclass ConvBlock(nn.Module):\r\n def __init__(self, in_channel, out_channel, kernel_size=[3, 3],\r\n padding=1, stride=1, n_class=None, bn=True,\r\n activation=F.relu, upsample=True, self_attention=False):\r\n super().__init__()\r\n\r\n self.conv = spectral_init(nn.Conv2d(in_channel, out_channel,\r\n kernel_size, stride, padding,\r\n bias=False if bn else True))\r\n\r\n self.upsample = upsample\r\n self.activation = activation\r\n self.bn = bn\r\n if bn:\r\n self.norm = ConditionalNorm(out_channel, n_class)\r\n\r\n self.self_attention = self_attention\r\n if self_attention:\r\n self.attention = SelfAttention(out_channel, 1)\r\n\r\n def forward(self, input, class_id=None):\r\n out = input\r\n if self.upsample:\r\n out = F.interpolate(out, scale_factor=2) # upsample\r\n\r\n out = self.conv(out)\r\n\r\n if self.bn:\r\n out = self.norm(out, class_id)\r\n\r\n if self.activation is not None:\r\n out = self.activation(out)\r\n\r\n if self.self_attention:\r\n out = self.attention(out)\r\n\r\n return out\r\n\r\n\r\nclass Generator(nn.Module):\r\n def __init__(self, att=True, image_size=28, n_class=10, image_channels=3):\r\n \"\"\"Generates an image\r\n\r\n Args:\r\n att (bool, optional): Include attention. Defaults to True.\r\n image_size (int, optional): Pixels (HxW) of the square image. Defaults to 28.\r\n n_class (int, optional): Number of classes (dog, cat, bird). Defaults to 10.\r\n image_channels (int, optional): 1 for Grayscale, 3 for RGB. Defaults to 3.\r\n \"\"\"\r\n super().__init__()\r\n\r\n self.lin_code = spectral_init(nn.Linear(image_size, 4 * 4 * 512))\r\n self.conv = nn.ModuleList([ConvBlock(512, 512, n_class=n_class),\r\n ConvBlock(512, 512, n_class=n_class),\r\n ConvBlock(512, 512, n_class=n_class,\r\n self_attention=att),\r\n ConvBlock(512, 256, n_class=n_class),\r\n ConvBlock(256, 128, n_class=n_class)])\r\n\r\n self.colorize = spectral_init(nn.Conv2d(128, image_channels, [3, 3], padding=1))\r\n\r\n def forward(self, input:torch.Tensor, class_id):\r\n \"\"\"Generates an image from a random input and class_id\r\n\r\n Args:\r\n input (torch.Tensor): random image as input\r\n class_id (torch.Tensor): tensor of integers representing a class \r\n\r\n Returns:\r\n [type]: [description]\r\n \"\"\"\r\n out = self.lin_code(input)\r\n out = F.relu(out)\r\n out = out.view(-1, 512, 4, 4)\r\n\r\n for conv in self.conv: # Use module list because we need to pass a class_id into each one of them\r\n out = conv(out, class_id)\r\n\r\n out = self.colorize(out)\r\n\r\n return torch.tanh(out)\r\n\r\n\r\nclass Discriminator(nn.Module):\r\n def __init__(self, n_class=10):\r\n super().__init__()\r\n\r\n def conv(in_channel, out_channel, stride=2,\r\n self_attention=False):\r\n return ConvBlock(in_channel, out_channel, stride=stride,\r\n bn=False, activation=leaky_relu,\r\n upsample=False, self_attention=self_attention)\r\n\r\n self.conv = nn.Sequential(conv(3, 128),\r\n conv(128, 256),\r\n conv(256, 512, stride=1,\r\n self_attention=True),\r\n conv(512, 512),\r\n conv(512, 512),\r\n conv(512, 512))\r\n\r\n self.linear = spectral_init(nn.Linear(512, 1))\r\n\r\n self.embed = nn.Embedding(n_class, 512)\r\n self.embed.weight.data.uniform_(-0.1, 0.1)\r\n self.embed = spectral_norm(self.embed)\r\n\r\n def forward(self, input, class_id):\r\n out = self.conv(input)\r\n out = out.view(out.size(0), out.size(1), -1)\r\n out = out.sum(2)\r\n out_linear = self.linear(out).squeeze(1)\r\n embed = self.embed(class_id)\r\n prod = (out * embed).sum(1)\r\n\r\n return out_linear + prod\r\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.init.xavier_uniform_", "torch.nn.Linear", "torch.nn.functional.softmax", "torch.no_grad", "torch.tensor", "torch.nn.Embedding", "torch.nn.functional.relu", "torch.nn.Conv1d", "torch.nn.Conv2d", "torch.tanh", "torch.nn.functional.leaky_relu", "torch.nn.init.kaiming_uniform_", "torch.bmm", "torch.nn.functional.interpolate" ] ]
messwith/phazes
[ "34b67292e6feaa95428fe68a5ceb29c9862e21d4" ]
[ "prototype_test.py" ]
[ "import numpy\nfrom prototype import Changer\n\n\ndef test_changer():\n changer = Changer(0.5, 1, 1)\n matrix = numpy.array([[0, 0, 0]])\n changer.change(matrix)\n assert matrix[0, 2] == 0\n changer.change(matrix)\n assert matrix[0, 2] == -1\n changer.change(matrix)\n assert matrix[0, 2] == 0\n" ]
[ [ "numpy.array" ] ]
d04943016/ColorScience
[ "b874d70c217249ec47a6017b47c5e3ca2008a6a8" ]
[ "Help/myNumericalIntegration.py" ]
[ "#!/usr/bin/env python3\r\n# Copyright (c) 2018 Wei-Kai Lee. All rights reserved\r\n\r\n# coding=utf-8\r\n# -*- coding: utf8 -*-\r\n\r\n\r\nimport numpy as np\r\n\r\ndef dx(x):\r\n return x[1:]-x[0:x.size-1]\r\ndef yave(y):\r\n xszie = y.shape[-1]\r\n return ( y[...,1::]+y[...,0:(xszie-1):] )/2\r\ndef myNumericalIntegration(x,y):\r\n \"\"\"\r\n myNumericalIntegration is a function to calculate the area of \r\n a y = function(x) by trapezoid method.\r\n x must be ascending.\r\n\r\n >>> x = np.linspace(0,1,100, dtype=np.float64) \r\n >>> y = x**2\r\n >>> value = myNumericalIntegration(x,y)\r\n >>> print(value)\r\n 0.33335033840084355\r\n >>> x = np.linspace(0,1,1000, dtype=np.float64) \r\n >>> y = x**2\r\n >>> value = myNumericalIntegration(x,y)\r\n >>> print(value)\r\n 0.3333335003338339\r\n >>> x = np.linspace(0,1,10000, dtype=np.float64) \r\n >>> y = x**2\r\n >>> value = myNumericalIntegration(x,y)\r\n >>> print(value)\r\n 0.3333333350003337\r\n \"\"\"\r\n x = np.array(x)\r\n y = np.array(y)\r\n return np.einsum('...i,i->...',yave(y),dx(x))\r\ndef yave2(y):\r\n xszie = y.shape[0]\r\n return ( y[1::,...]+y[0:(xszie-1):,...] )/2\r\ndef myNumericalIntegration2(x,y):\r\n x = np.array(x)\r\n y = np.array(y)\r\n return np.einsum('i...,i->...',yave2(y),dx(x))\r\ndef myTRAPEZOIDAL(fun,x0,x1,xPts=200):\r\n \"\"\"\r\n myTRAPEZOIDAL is a function to calclate the integration of function f from \r\n x0 to x1 with equal spacing. (xPts: points of x)\r\n\r\n >>> import numpy as np\r\n >>> f = lambda x: [np.sin(x), np.cos(x)]\r\n >>> Int, xList, yListList = myTRAPEZOIDAL(f,0,np.pi)\r\n >>> print(Int)\r\n [1.99995846e+00 9.02056208e-17]\r\n >>> Int, xList, yListList = myTRAPEZOIDAL(f,0,np.pi,xPts=300)\r\n >>> print(Int)\r\n [1.99998160e+00 2.68882139e-16]\r\n >>> Int, xList, yListList = myTRAPEZOIDAL(f,0,np.pi,xPts=400)\r\n >>> print(Int)\r\n [1.99998967e+00 1.02348685e-16]\r\n >>> Int, xList, yListList = myTRAPEZOIDAL(f,0,np.pi,xPts=1000)\r\n >>> print(Int)\r\n [ 1.99999835e+00 -8.96418356e-16]\r\n \"\"\"\r\n xList = np.linspace(x0, x1, num=int(xPts) )\r\n data = np.array(fun(xList))\r\n Int = myNumericalIntegration(xList,data)\r\n return Int, xList, data\r\ndef myFunIntegration(f, x0, x1, tol=1e-5, recursiveLim=1e4, xCountStart=100, intfun=myTRAPEZOIDAL):\r\n \"\"\"\r\n >>> import numpy as np\r\n >>> f = lambda x: [np.sin(x), np.cos(x)]\r\n >>> Sn, err, nodes, count = myFunIntegration(f, 0, np.pi, tol=1e-5)\r\n >>> print(Sn)\r\n [1.99999934e+00 2.22044605e-16]\r\n >>> print(err)\r\n 2.781470994472901e-06\r\n >>> print(nodes.size)\r\n 17\r\n >>> print(count)\r\n 8\r\n >>> f = lambda x: [ x**2, x**3]\r\n >>> Sn, err, nodes, count = myFunIntegration(f, -2, 2, tol=1e-5)\r\n >>> print(Sn)\r\n [5.333334 0. ]\r\n >>> print(err)\r\n 4.433938502017287e-06\r\n >>> print(count)\r\n 24\r\n >>> print(nodes.size)\r\n 49\r\n >>> f = lambda x: [ np.exp(x), np.exp(x**2)]\r\n >>> Sn, err, nodes, count = myFunIntegration(f, -2, 2, tol=1e-5)\r\n >>> print(Sn)\r\n [ 7.25372109 32.90525734]\r\n >>> print(err)\r\n 5.414152568605779e-06\r\n >>> print(count)\r\n 68\r\n >>> print(nodes.size)\r\n 137\r\n \"\"\"\r\n S, xList, yListList = intfun(f, x0, x1, xPts=xCountStart)\r\n Sn, err, nodes, count = recursive_integration1(f, x0, x1, S, tol=tol, recursiveLim=recursiveLim, xCountStart=xCountStart, intfun=intfun)\r\n return Sn, err, nodes, count\r\n \r\n # S, xList, yListList = intfun(f, x0, x1, xPts=xCountStart)\r\n # Sn, err, xList, count = recursive_integration2(f, x0, x1, S, xList, tol=tol, recursiveLim=recursiveLim)\r\n # return Sn, err, count\r\n\r\n # S, xList, yListList = intfun(f, x0, x1, xPts=xCountStart)\r\n # Sn, err, xListNew, yListListNew, count, dxmin = recursive_integration3(f, S, xList, yListList, tol=tol, recursiveLim=recursiveLim, xCountStart=xCountStart, dxMin=dxMin)\r\n # return Sn, err, xListNew, count\r\ndef recursive_integration1(f, x0, x1, S, tol=1e-3, recursiveLim=1e4, xCountStart=100, intfun=myTRAPEZOIDAL):\r\n \"\"\" \r\n f: function of f(x)\r\n [a,b] : the interval of integration\r\n S : the previous integration result\r\n tol : the tolerance\r\n \r\n This is a subfunction of adapt_simpson.\r\n \"\"\"\r\n xc = float(x0+x1)/2\r\n SL, xListL, dataL = intfun(f,x0,xc,xPts=xCountStart)\r\n SR, xListR, dataR = intfun(f,xc,x1,xPts=xCountStart)\r\n Sn = SL+SR\r\n err = max( np.abs(Sn-S) )\r\n if err <= tol or recursiveLim==1:\r\n nodes = np.array([x0,xc,x1])\r\n count = 1\r\n return Sn, err, nodes, count\r\n fac = 0.5\r\n SL, err1, nodes1, countL = recursive_integration1(f, x0, xc, SL, tol=tol*fac, recursiveLim=recursiveLim-1, xCountStart=xCountStart, intfun=intfun)\r\n SR, err2, nodes2, countR = recursive_integration1(f, xc, x1, SR, tol=tol*(1-fac), recursiveLim=recursiveLim-1, xCountStart=xCountStart, intfun=intfun)\r\n err = err1 + err2\r\n nodes = np.append(nodes1, nodes2[1::])\r\n count = countL+1 if countL>=countR else countR+1 # countL+countR # countL+1 if countL>=countR else countR+1\r\n Sn = SL+SR\r\n return Sn, err, nodes, count\r\ndef myMidpointList_Integration2(f, xList, S):\r\n # Mid point\r\n xList = np.array(xList)\r\n xList2 = (xList[0:xList.size-1]+xList[1:xList.size])/2\r\n data = f(xList2)\r\n # Sum\r\n temptsum = np.einsum('...i,i->...', data, dx(xList) ) \r\n Sn = (S+temptsum)/2\r\n # Merge List\r\n xListNew = np.zeros( xList.size+xList2.size, dtype=xList.dtype )\r\n xListNew[0::2] = xList\r\n xListNew[1::2] = xList2\r\n return Sn, xListNew\r\ndef recursive_integration2(f, x0, x1, S, xList, tol=1e-5, recursiveLim=1e4):\r\n \"\"\" \r\n f: function of f(x)\r\n [a,b] : the interval of integration\r\n S : the previous integration result\r\n tol : the tolerance\r\n \r\n This is a subfunction of adapt_simpson.\r\n \"\"\"\r\n Sn, xListNew = myMidpointList_Integration2(f, xList, S)\r\n err = max( np.abs(Sn-S) )\r\n if err <= tol or recursiveLim==1:\r\n count = 1\r\n return Sn, err, xListNew, count\r\n Sn, err, xListNew, count = recursive_integration2(f, x0, x1, Sn, xList=xListNew, tol=tol, recursiveLim=recursiveLim-1)\r\n count = count + 1\r\n return Sn, err, xListNew, count\r\ndef myMidpointList_Integration3(f, xList, yListList):\r\n # Mid point\r\n xList = np.array(xList)\r\n xList2 = (xList[0:xList.size-1]+xList[1:xList.size])/2\r\n data = f(xList2)\r\n # Sum\r\n temptsum = np.einsum('...i,i->...', data, dx(xList) ) \r\n # Merge x List\r\n xListNew = np.zeros( xList.size+xList2.size, dtype=xList.dtype )\r\n xListNew[0::2] = xList\r\n xListNew[1::2] = xList2\r\n # Merge y List\r\n yListListNew = np.zeros( (yListList.shape[0], xListNew.size) , dtype=yListList.dtype)\r\n yListListNew[:,0::2] = yListList\r\n yListListNew[:,1::2] = data\r\n # Sum\r\n Sn = myNumericalIntegration(xListNew,yListListNew)\r\n return Sn, xListNew, yListListNew\r\ndef recursive_integration3(f, S, xList, yListList, tol=1e-5, recursiveLim=1e4, xCountStart=100, dxMin=None):\r\n \"\"\" \r\n f: function of f(x)\r\n [a,b] : the interval of integration\r\n S : the previous integration result\r\n tol : the tolerance\r\n \r\n This is a subfunction of adapt_simpson.\r\n \"\"\"\r\n indMid = int(len(xList)/2)\r\n SL, xListL, yListListL = myMidpointList_Integration3(f, xList[:indMid+1:], yListList[:,:indMid+1:] )\r\n SR, xListR, yListListR = myMidpointList_Integration3(f, xList[indMid::], yListList[:,indMid::] )\r\n Sn = SL+SR\r\n err = max( np.abs(Sn-S) )\r\n # End Case\r\n xListNew = np.append( xListL, xListR[1::] )\r\n dxmin = min(dx(xListNew))\r\n if err <= tol or recursiveLim==1 or dxMin==None or dxmin<dxMin:\r\n count = 1\r\n yListListNew = np.append( yListListL, yListListR[:,1::] ) \r\n return Sn, err, xListNew, yListListNew, count, dxmin\r\n # Iterative Case\r\n sL, sR = np.sum(SL), np.sum(SR)\r\n fac = sL/(sL+sR) if (sL+sR)!=0 else 0.5\r\n SL, errL, xListL, yListListL, countL, dxL = recursive_integration3(f, SL, xList=xListL, yListList=yListListL, tol=tol*fac, recursiveLim=recursiveLim-1, dxMin=dxMin)\r\n SR, errR, xListR, yListListR, countR, dxR = recursive_integration3(f, SR, xList=xListR, yListList=yListListR, tol=tol*(1-fac), recursiveLim=recursiveLim-1, dxMin=dxMin)\r\n Sn = SL+SR\r\n err = errL + errR\r\n count = countL+1 if countL>countR else countR+1 #countL+countR # countL+1 if countL>countR else countR+1\r\n xListNew = np.append( xListL, xListR[1::] )\r\n dxmin = dxL if dxL<dxR else dxR\r\n yListListNew = np.append( yListListL, yListListR[:,1::] ) \r\n return Sn, err, xListNew, yListListNew, count, dxmin\r\n\r\nif __name__ == '__main__':\r\n import doctest\r\n doctest.testmod()\r\n \"\"\"\r\n x = []\r\n y = []\r\n value = myNumericalIntegration(x,y)\r\n \"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "numpy.sum", "numpy.append", "numpy.zeros", "numpy.abs", "numpy.array" ] ]
shettyprithvi/scattertext
[ "a15613b6feef3ddc56c03aadb8e1e629d28a427d" ]
[ "scattertext/termscoring/CohensDCalculator.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\n\n\nclass CohensDCalculator(object):\n def get_cohens_d_df(self, cat_X, ncat_X, correction_method=None):\n empty_cat_X_smoothing_doc = np.zeros((1, cat_X.shape[1]))\n empty_ncat_X_smoothing_doc = np.zeros((1, ncat_X.shape[1]))\n smoothed_cat_X = np.vstack([empty_cat_X_smoothing_doc, cat_X])\n smoothed_ncat_X = np.vstack([empty_ncat_X_smoothing_doc, ncat_X])\n n1, n2 = float(smoothed_cat_X.shape[0]), float(smoothed_ncat_X.shape[0])\n n = n1 + n2\n #print(cat_X.shape, type(cat_X))\n m1 = cat_X.mean(axis=0).A1 if type(cat_X) == np.matrix else cat_X.mean(axis=0)\n m2 = ncat_X.mean(axis=0).A1 if type(ncat_X) == np.matrix else ncat_X.mean(axis=0)\n v1 = smoothed_cat_X.var(axis=0).A1 if type(smoothed_cat_X) == np.matrix else smoothed_cat_X.mean(axis=0)\n v2 = smoothed_ncat_X.var(axis=0).A1 if type(smoothed_ncat_X) == np.matrix else smoothed_ncat_X.mean(axis=0)\n s_pooled = np.sqrt(((n2 - 1) * v2 + (n1 - 1) * v1) / (n - 2.))\n cohens_d = (m1 - m2) / s_pooled\n cohens_d_se = np.sqrt(((n - 1.) / (n - 3)) * (4. / n) * (1 + np.square(cohens_d) / 8.))\n cohens_d_z = cohens_d / cohens_d_se\n cohens_d_p = norm.sf(cohens_d_z)\n hedges_r = cohens_d * (1 - 3. / ((4. * (n - 2)) - 1))\n hedges_r_se = np.sqrt(n / (n1 * n2) + np.square(hedges_r) / (n - 2.))\n hedges_r_z = hedges_r / hedges_r_se\n hedges_r_p = norm.sf(hedges_r_z)\n score_df = pd.DataFrame({\n 'cohens_d': cohens_d,\n 'cohens_d_se': cohens_d_se,\n 'cohens_d_z': cohens_d_z,\n 'cohens_d_p': cohens_d_p,\n 'hedges_r': hedges_r,\n 'hedges_r_se': hedges_r_se,\n 'hedges_r_z': hedges_r_z,\n 'hedges_r_p': hedges_r_p,\n 'm1': m1,\n 'm2': m2,\n }).fillna(0)\n if correction_method is not None:\n from statsmodels.stats.multitest import multipletests\n score_df['hedges_r_p_corr'] = 0.5\n for method in ['cohens_d', 'hedges_r']:\n score_df[method + '_p_corr'] = 0.5\n pvals = score_df.loc[(score_df['m1'] != 0) | (score_df['m2'] != 0), method + '_p']\n pvals = np.min(np.array([pvals, 1. - pvals])) * 2.\n score_df.loc[(score_df['m1'] != 0) | (score_df['m2'] != 0), method + '_p_corr'] = (\n multipletests(pvals, method=correction_method)[1]\n )\n return score_df" ]
[ [ "numpy.vstack", "scipy.stats.norm.sf", "numpy.zeros", "pandas.DataFrame", "numpy.sqrt", "numpy.square", "numpy.array" ] ]
holman57/Lafite
[ "9e5981a666cd2dcd3ff2a7f38229d6b8678ce6bb" ]
[ "train.py" ]
[ "\nimport os\nimport click\nimport re\nimport json\nimport tempfile\nimport torch\nimport dnnlib\n\nfrom training import training_loop\nfrom metrics import metric_main\nfrom torch_utils import training_stats\nfrom torch_utils import custom_ops\n\n#----------------------------------------------------------------------------\n\nclass UserError(Exception):\n pass\n\n#----------------------------------------------------------------------------\n\ndef setup_training_loop_kwargs(\n f_dim = None,\n d_use_norm = None, # normalize the feature extracted by discriminator or not\n d_use_fts = None, # discriminator extract semantic feature or not\n mixing_prob= None, # mixing probability of ground-truth and language-free generated pairs, mixing_prob=0 means only use ground-truth, mixing_prob=1. means using only pseudo pairs(language-free)\n lam = None, # hyper-parameter for contrastive loss\n temp = None, # hyper-parameter for contrastive loss\n change = None, # hyper-parameter for architecture\n map_num = None, # hyper-parameter for architecture\n gather = None, # hyper-parameter for contrastive loss\n itd = None, # hyper-parameter for contrastive loss\n itc = None, # hyper-parameter for contrastive loss\n iid = None, # hyper-parameter for contrastive loss\n iic = None, # hyper-parameter for contrastive loss\n metric_only_test = None, # hyper-parameter for computing metrics\n fmap = None, # hyper-parameter for architecture, related to channel number\n ratio = None,\n # General options (not included in desc).\n gpus = None, # Number of GPUs: <int>, default = 1 gpu\n snap = None, # Snapshot interval: <int>, default = 50 ticks\n metrics = None, # List of metric names: [], ['fid50k_full'] (default), ...\n seed = None, # Random seed: <int>, default = 0\n # Dataset.\n data = None, # Training dataset (required): <path>\n test_data = None, # Testing dataset for metrics, if not use training dataset\n cond = None, # Train conditional model based on dataset labels: <bool>, default = False\n subset = None, # Train with only N images: <int>, default = all\n mirror = None, # Augment dataset with x-flips: <bool>, default = False\n\n # Base config.\n cfg = None, # Base config: 'auto' (default), 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar'\n gamma = None, # Override R1 gamma: <float>\n kimg = None, # Override training duration: <int>\n batch = None, # Override batch size: <int>\n\n # Discriminator augmentation.\n aug = None, # Augmentation mode: 'ada' (default), 'noaug', 'fixed'\n p = None, # Specify p for 'fixed' (required): <float>\n target = None, # Override ADA target for 'ada': <float>, default = depends on aug\n augpipe = None, # Augmentation pipeline: 'blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc' (default), ..., 'bgcfnc'\n\n # Transfer learning.\n resume = None, # Load previous network: 'noresume' (default), 'ffhq256', 'ffhq512', 'ffhq1024', 'celebahq256', 'lsundog256', <file>, <url>\n freezed = None, # Freeze-D: <int>, default = 0 discriminator layers\n\n # Performance options (not included in desc).\n fp32 = None, # Disable mixed-precision training: <bool>, default = False\n nhwc = None, # Use NHWC memory format with FP16: <bool>, default = False\n allow_tf32 = None, # Allow PyTorch to use TF32 for matmul and convolutions: <bool>, default = False\n nobench = None, # Disable cuDNN benchmarking: <bool>, default = False\n workers = None, # Override number of DataLoader workers: <int>, default = 3\n):\n args = dnnlib.EasyDict()\n\n # ------------------------------------------\n # General options: gpus, snap, metrics, seed\n # ------------------------------------------\n if f_dim is None:\n f_dim = 512\n assert isinstance(f_dim, int)\n args.f_dim = f_dim\n\n if ratio is None:\n ratio = 1.0\n args.ratio = ratio\n \n if mixing_prob is None:\n mixing_prob = 0.\n args.mixing_prob = mixing_prob\n \n if fmap is None:\n fmap = 1.\n \n if metric_only_test is None:\n metric_only_test = False\n args.metric_only_test = metric_only_test\n \n if map_num is None:\n map_num = 8\n \n if lam is None:\n lam = 0.\n args.lam = lam\n \n if temp is None:\n temp = 0.5\n args.temp = temp\n \n if itd is None:\n itd = 10.\n args.itd = itd\n if itc is None:\n itc = 10.\n args.itc = itc\n \n if iid is None:\n iid = 0.\n args.iid = iid\n if iic is None:\n iic = 0.\n args.iic = iic\n \n \n \n if change is None:\n change = 256\n \n if d_use_norm is None:\n d_use_norm = False\n assert isinstance(d_use_norm, bool)\n args.d_use_norm = d_use_norm\n \n if d_use_fts is None:\n d_use_fts = True\n args.d_use_fts = d_use_fts\n \n if gather is None:\n gather = False\n args.gather = gather\n \n if gpus is None:\n gpus = 1\n assert isinstance(gpus, int)\n if not (gpus >= 1 and gpus & (gpus - 1) == 0):\n raise UserError('--gpus must be a power of two')\n args.num_gpus = gpus\n\n if snap is None:\n snap = 50\n assert isinstance(snap, int)\n if snap < 1:\n raise UserError('--snap must be at least 1')\n args.image_snapshot_ticks = snap\n args.network_snapshot_ticks = snap\n\n if metrics is None:\n metrics = ['fid50k_full']\n assert isinstance(metrics, list)\n if not all(metric_main.is_valid_metric(metric) for metric in metrics):\n raise UserError('\\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))\n args.metrics = metrics\n\n if seed is None:\n seed = 0\n assert isinstance(seed, int)\n args.random_seed = seed\n\n # -----------------------------------\n # Dataset: data, cond, subset, mirror\n # -----------------------------------\n\n assert data is not None\n assert isinstance(data, str)\n print('using data: ', data, 'testing data: ', test_data)\n if test_data is None:\n test_data = data\n args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False, use_clip=True, ratio=args.ratio)\n args.testing_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=test_data, use_labels=True, max_size=None, xflip=False, use_clip=True, ratio=1.0)\n args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=False, num_workers=1, prefetch_factor=2)\n try:\n training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset\n args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution\n args.training_set_kwargs.use_labels = training_set.has_labels # be explicit about labels\n args.training_set_kwargs.max_size = len(training_set) # be explicit about dataset size\n desc = training_set.name\n args.testing_set_kwargs.resolution = training_set.resolution # be explicit about resolution\n args.testing_set_kwargs.use_labels = training_set.has_labels # be explicit about labels\n del training_set # conserve memory\n\n except IOError as err:\n raise UserError(f'--data: {err}')\n\n if cond is None:\n cond = False\n assert isinstance(cond, bool)\n if cond:\n if not args.training_set_kwargs.use_labels:\n raise UserError('--cond=True requires labels specified in dataset.json')\n desc += '-cond'\n else:\n args.training_set_kwargs.use_labels = False\n args.testing_set_kwargs.use_labels = False\n\n if subset is not None:\n assert isinstance(subset, int)\n if not 1 <= subset <= args.training_set_kwargs.max_size:\n raise UserError(f'--subset must be between 1 and {args.training_set_kwargs.max_size}')\n desc += f'-subset{subset}'\n if subset < args.training_set_kwargs.max_size:\n args.training_set_kwargs.max_size = subset\n args.training_set_kwargs.random_seed = args.random_seed\n\n if mirror is None:\n mirror = False\n assert isinstance(mirror, bool)\n if mirror:\n desc += '-mirror'\n args.training_set_kwargs.xflip = True\n args.testing_set_kwargs.xflip = True\n\n # ------------------------------------\n # Base config: cfg, gamma, kimg, batch\n # ------------------------------------\n\n if cfg is None:\n cfg = 'auto'\n assert isinstance(cfg, str)\n desc += f'-{cfg}-lam{lam:g}-temp{temp:g}-map_num{map_num:g}'\n\n cfg_specs = {\n 'auto': dict(ref_gpus=-1, kimg=25000, mb=-1, mbstd=-1, fmaps=-1, lrate=-1, gamma=1., ema=-1, ramp=0.05, map=map_num), # Populated dynamically based on resolution and GPU count.\n }\n\n assert cfg in cfg_specs\n spec = dnnlib.EasyDict(cfg_specs[cfg])\n if cfg == 'auto':\n desc += f'-gpus{gpus:d}'\n spec.ref_gpus = gpus\n res = args.training_set_kwargs.resolution\n spec.mb = 16*gpus#max(min(gpus * min(4096 // res, 32), 64), gpus) # keep gpu memory consumption at bay\n spec.mbstd = min(spec.mb // gpus, 4) # other hyperparams behave more predictably if mbstd group size remains fixed\n spec.fmaps = 1 if res >= 512 else fmap\n spec.lrate = 0.002 if res >= 1024 else 0.0025\n spec.gamma = 0.0002 * (res ** 2) / spec.mb # heuristic formula\n spec.ema = spec.mb * 10 / 32\n \n# args.M_kwargs = dnnlib.EasyDict(class_name='training.networks.ManiNetwork', z_dim=args.f_dim, layer_features=args.f_dim, w_dim=512, num_layers=8)\n args.G_kwargs = dnnlib.EasyDict(class_name='training.networks.Generator', z_dim=512, w_dim=512, m_layer_features=args.f_dim, m_num_layers=8, mapping_kwargs=dnnlib.EasyDict(), synthesis_kwargs=dnnlib.EasyDict())\n args.D_kwargs = dnnlib.EasyDict(class_name='training.networks.Discriminator', use_norm=args.d_use_norm, use_fts=args.d_use_fts, block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())\n args.G_kwargs.synthesis_kwargs.channel_base = args.D_kwargs.channel_base = int(spec.fmaps * 32768)\n args.G_kwargs.synthesis_kwargs.channel_max = args.D_kwargs.channel_max = 512\n args.G_kwargs.mapping_kwargs.num_layers = spec.map\n args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 4 # enable mixed-precision training\n args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow\n args.G_kwargs.synthesis_kwargs.change = change\n args.G_kwargs.synthesis_kwargs.f_dim = args.f_dim\n args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd\n args.D_kwargs.epilogue_kwargs.f_dim = args.f_dim\n \n args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)\n args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)\n args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss', r1_gamma=spec.gamma)\n\n args.total_kimg = spec.kimg\n args.batch_size = spec.mb\n args.batch_gpu = spec.mb // spec.ref_gpus\n args.ema_kimg = spec.ema\n args.ema_rampup = spec.ramp\n\n if cfg == 'cifar':\n args.loss_kwargs.pl_weight = 0 # disable path length regularization\n args.loss_kwargs.style_mixing_prob = 0 # disable style mixing\n args.D_kwargs.architecture = 'orig' # disable residual skip connections\n\n if gamma is not None:\n assert isinstance(gamma, float)\n if not gamma >= 0:\n raise UserError('--gamma must be non-negative')\n desc += f'-gamma{gamma:g}'\n args.loss_kwargs.r1_gamma = gamma\n\n if kimg is not None:\n assert isinstance(kimg, int)\n if not kimg >= 1:\n raise UserError('--kimg must be at least 1')\n desc += f'-kimg{kimg:d}'\n args.total_kimg = kimg\n\n if batch is not None:\n assert isinstance(batch, int)\n if not (batch >= 1 and batch % gpus == 0):\n raise UserError('--batch must be at least 1 and divisible by --gpus')\n desc += f'-batch{batch}'\n args.batch_size = batch\n args.batch_gpu = batch // gpus\n\n # ---------------------------------------------------\n # Discriminator augmentation: aug, p, target, augpipe\n # ---------------------------------------------------\n\n if aug is None:\n aug = 'noaug' # no augmentation is used in our experiments\n else:\n assert isinstance(aug, str)\n desc += f'-{aug}'\n\n if aug == 'ada':\n args.ada_target = 0.6\n\n elif aug == 'noaug':\n pass\n\n elif aug == 'fixed':\n if p is None:\n raise UserError(f'--aug={aug} requires specifying --p')\n\n else:\n raise UserError(f'--aug={aug} not supported')\n\n if p is not None:\n assert isinstance(p, float)\n if aug != 'fixed':\n raise UserError('--p can only be specified with --aug=fixed')\n if not 0 <= p <= 1:\n raise UserError('--p must be between 0 and 1')\n desc += f'-p{p:g}'\n args.augment_p = p\n\n if target is not None:\n assert isinstance(target, float)\n if aug != 'ada':\n raise UserError('--target can only be specified with --aug=ada')\n if not 0 <= target <= 1:\n raise UserError('--target must be between 0 and 1')\n desc += f'-target{target:g}'\n args.ada_target = target\n\n assert augpipe is None or isinstance(augpipe, str)\n if augpipe is None:\n augpipe = 'bgc'\n else:\n if aug == 'noaug':\n raise UserError('--augpipe cannot be specified with --aug=noaug')\n desc += f'-{augpipe}'\n\n augpipe_specs = {\n 'blit': dict(xflip=1, rotate90=1, xint=1),\n 'geom': dict(scale=1, rotate=1, aniso=1, xfrac=1),\n 'color': dict(brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),\n 'filter': dict(imgfilter=1),\n 'noise': dict(noise=1),\n 'cutout': dict(cutout=1),\n 'bg': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1),\n 'bgc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),\n 'bgcf': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1),\n 'bgfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, imgfilter=1, noise=1),\n 'bgcfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1),\n 'bgcfnc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1, cutout=1),\n }\n\n assert augpipe in augpipe_specs\n if aug != 'noaug':\n args.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', **augpipe_specs[augpipe])\n\n # ----------------------------------\n # Transfer learning: resume, freezed\n # ----------------------------------\n\n resume_specs = {\n 'ffhq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res256-mirror-paper256-noaug.pkl',\n 'ffhq512': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res512-mirror-stylegan2-noaug.pkl',\n 'ffhq1024': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res1024-mirror-stylegan2-noaug.pkl',\n 'celebahq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/celebahq-res256-mirror-paper256-kimg100000-ada-target0.5.pkl',\n 'lsundog256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/lsundog-res256-paper256-kimg100000-noaug.pkl',\n }\n\n assert resume is None or isinstance(resume, str)\n if resume is None:\n resume = 'noresume'\n elif resume == 'noresume':\n desc += '-noresume'\n elif resume in resume_specs:\n desc += f'-resume{resume}'\n args.resume_pkl = resume_specs[resume] # predefined url\n else:\n desc += '-resumecustom'\n args.resume_pkl = resume # custom path or url\n\n if resume != 'noresume':\n args.ada_kimg = 100 # make ADA react faster at the beginning\n args.ema_rampup = None # disable EMA rampup\n\n if freezed is not None:\n assert isinstance(freezed, int)\n if not freezed >= 0:\n raise UserError('--freezed must be non-negative')\n desc += f'-freezed{freezed:d}'\n args.D_kwargs.block_kwargs.freeze_layers = freezed\n\n # -------------------------------------------------\n # Performance options: fp32, nhwc, nobench, workers\n # -------------------------------------------------\n\n if fp32 is None:\n fp32 = False\n assert isinstance(fp32, bool)\n if fp32:\n args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 0\n args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = None\n\n if nhwc is None:\n nhwc = False\n assert isinstance(nhwc, bool)\n if nhwc:\n args.G_kwargs.synthesis_kwargs.fp16_channels_last = args.D_kwargs.block_kwargs.fp16_channels_last = True\n\n if nobench is None:\n nobench = False\n assert isinstance(nobench, bool)\n if nobench:\n args.cudnn_benchmark = False\n\n if allow_tf32 is None:\n allow_tf32 = False\n assert isinstance(allow_tf32, bool)\n if allow_tf32:\n args.allow_tf32 = True\n\n if workers is not None:\n assert isinstance(workers, int)\n if not workers >= 1:\n raise UserError('--workers must be at least 1')\n args.data_loader_kwargs.num_workers = workers\n\n return desc, args\n\n#----------------------------------------------------------------------------\n\ndef subprocess_fn(rank, args, temp_dir):\n dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)\n\n # Init torch.distributed.\n if args.num_gpus > 1:\n init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))\n if os.name == 'nt':\n init_method = 'file:///' + init_file.replace('\\\\', '/')\n torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)\n else:\n init_method = f'file://{init_file}'\n torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)\n\n # Init torch_utils.\n sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None\n training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)\n if rank != 0:\n custom_ops.verbosity = 'none'\n\n # Execute training loop.\n training_loop.training_loop(rank=rank, **args)\n\n#----------------------------------------------------------------------------\n\nclass CommaSeparatedList(click.ParamType):\n name = 'list'\n\n def convert(self, value, param, ctx):\n _ = param, ctx\n if value is None or value.lower() == 'none' or value == '':\n return []\n return value.split(',')\n\n#----------------------------------------------------------------------------\n\[email protected]()\[email protected]_context\n\[email protected]('--f_dim', help='dimension of features', type=int, metavar='INT')\[email protected]('--change', help='change structure', type=int, metavar='INT')\[email protected]('--map_num', help='layer number of mapping network', type=int, metavar='INT')\[email protected]('--d_use_norm', help='Input features into every layer of discriminator', type=bool, metavar='BOOL')\[email protected]('--d_use_fts', help='Use text feature in discriminator or not', type=bool, metavar='BOOL')\[email protected]('--gather', help='gather all negative samples across gpus or not', type=bool, metavar='BOOL')\[email protected]('--mixing_prob', help='if mixing_prob==1 -> no text data used', type=float)\[email protected]('--lam', help='hyper-parameter for contrastive loss (softmax along different dimensions)', type=float)\[email protected]('--temp', help='temperature for contrastive loss', type=float)\[email protected]('--itd', help='', type=float)\[email protected]('--itc', help='', type=float)\[email protected]('--iid', help='', type=float)\[email protected]('--iic', help='', type=float)\[email protected]('--metric_only_test', help='compute metrics using test dataset vs test dataset?', type=bool, metavar='BOOL')\[email protected]('--fmap', help='', type=float)\[email protected]('--ratio', help='ratio of data with ground-truth text used', type=float)\n\n\n# General options.\[email protected]('--outdir', help='Where to save the results', required=True, metavar='DIR')\[email protected]('--gpus', help='Number of GPUs to use [default: 1]', type=int, metavar='INT')\[email protected]('--snap', help='Snapshot interval [default: 50 ticks]', type=int, metavar='INT')\[email protected]('--metrics', help='Comma-separated list or \"none\" [default: fid50k_full]', type=CommaSeparatedList())\[email protected]('--seed', help='Random seed [default: 0]', type=int, metavar='INT')\[email protected]('-n', '--dry-run', help='Print training options and exit', is_flag=True)\n\n# Dataset.\[email protected]('--data', help='Training data (directory or zip)', metavar='PATH', required=True)\[email protected]('--test_data', help='Testing data (directory or zip)', metavar='PATH', required=True)\n\[email protected]('--cond', help='Train conditional model based on dataset labels [default: false]', type=bool, metavar='BOOL')\[email protected]('--subset', help='Train with only N images [default: all]', type=int, metavar='INT')\[email protected]('--mirror', help='Enable dataset x-flips [default: false]', type=bool, metavar='BOOL')\n\n# Base config.\[email protected]('--cfg', help='Base config [default: auto]', type=click.Choice(['auto', 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar']))\[email protected]('--gamma', help='Override R1 gamma', type=float)\[email protected]('--kimg', help='Override training duration', type=int, metavar='INT')\[email protected]('--batch', help='Override batch size', type=int, metavar='INT')\n\n# Discriminator augmentation.\[email protected]('--aug', help='Augmentation mode [default: ada]', type=click.Choice(['noaug', 'ada', 'fixed']))\[email protected]('--p', help='Augmentation probability for --aug=fixed', type=float)\[email protected]('--target', help='ADA target value for --aug=ada', type=float)\[email protected]('--augpipe', help='Augmentation pipeline [default: bgc]', type=click.Choice(['blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc', 'bgcf', 'bgcfn', 'bgcfnc']))\n\n# Transfer learning.\[email protected]('--resume', help='Resume training [default: noresume]', metavar='PKL')\[email protected]('--freezed', help='Freeze-D [default: 0 layers]', type=int, metavar='INT')\n\n# Performance options.\[email protected]('--fp32', help='Disable mixed-precision training', type=bool, metavar='BOOL')\[email protected]('--nhwc', help='Use NHWC memory format with FP16', type=bool, metavar='BOOL')\[email protected]('--nobench', help='Disable cuDNN benchmarking', type=bool, metavar='BOOL')\[email protected]('--allow-tf32', help='Allow PyTorch to use TF32 internally', type=bool, metavar='BOOL')\[email protected]('--workers', help='Override number of DataLoader workers', type=int, metavar='INT')\n\ndef main(ctx, outdir, dry_run, **config_kwargs):\n \"\"\"Train a GAN using the techniques described in the paper\n \"Training Generative Adversarial Networks with Limited Data\".\n\n Examples:\n\n \\b\n # Train with custom dataset using 1 GPU.\n python train.py --outdir=~/training-runs --data=~/mydataset.zip --gpus=1\n\n \\b\n # Train class-conditional CIFAR-10 using 2 GPUs.\n python train.py --outdir=~/training-runs --data=~/datasets/cifar10.zip \\\\\n --gpus=2 --cfg=cifar --cond=1\n\n \\b\n # Transfer learn MetFaces from FFHQ using 4 GPUs.\n python train.py --outdir=~/training-runs --data=~/datasets/metfaces.zip \\\\\n --gpus=4 --cfg=paper1024 --mirror=1 --resume=ffhq1024 --snap=10\n\n \\b\n # Reproduce original StyleGAN2 config F.\n python train.py --outdir=~/training-runs --data=~/datasets/ffhq.zip \\\\\n --gpus=8 --cfg=stylegan2 --mirror=1 --aug=noaug\n\n \\b\n Base configs (--cfg):\n auto Automatically select reasonable defaults based on resolution\n and GPU count. Good starting point for new datasets.\n stylegan2 Reproduce results for StyleGAN2 config F at 1024x1024.\n paper256 Reproduce results for FFHQ and LSUN Cat at 256x256.\n paper512 Reproduce results for BreCaHAD and AFHQ at 512x512.\n paper1024 Reproduce results for MetFaces at 1024x1024.\n cifar Reproduce results for CIFAR-10 at 32x32.\n\n \\b\n Transfer learning source networks (--resume):\n ffhq256 FFHQ trained at 256x256 resolution.\n ffhq512 FFHQ trained at 512x512 resolution.\n ffhq1024 FFHQ trained at 1024x1024 resolution.\n celebahq256 CelebA-HQ trained at 256x256 resolution.\n lsundog256 LSUN Dog trained at 256x256 resolution.\n <PATH or URL> Custom network pickle.\n \"\"\"\n dnnlib.util.Logger(should_flush=True)\n\n # Setup training options.\n try:\n run_desc, args = setup_training_loop_kwargs(**config_kwargs)\n except UserError as err:\n ctx.fail(err)\n\n # Pick output directory.\n prev_run_dirs = []\n if os.path.isdir(outdir):\n prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]\n prev_run_ids = [re.match(r'^\\d+', x) for x in prev_run_dirs]\n prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]\n cur_run_id = max(prev_run_ids, default=-1) + 1\n args.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')\n assert not os.path.exists(args.run_dir)\n\n # Print options.\n print()\n print('Training options:')\n print(json.dumps(args, indent=2))\n print()\n print(f'Output directory: {args.run_dir}')\n print(f'Training data: {args.training_set_kwargs.path}')\n print(f'Training duration: {args.total_kimg} kimg')\n print(f'Number of GPUs: {args.num_gpus}')\n print(f'Number of images: {args.training_set_kwargs.max_size}')\n print(f'Image resolution: {args.training_set_kwargs.resolution}')\n print(f'Conditional model: {args.training_set_kwargs.use_labels}')\n print(f'Dataset x-flips: {args.training_set_kwargs.xflip}')\n print(f'Discriminator use normalization: {args.d_use_norm}')\n print(f'Discriminator use fts: {args.d_use_fts}')\n\n # Dry run?\n if dry_run:\n print('Dry run; exiting.')\n return\n\n # Create output directory.\n print('Creating output directory...')\n os.makedirs(args.run_dir)\n with open(os.path.join(args.run_dir, 'training_options.json'), 'wt') as f:\n json.dump(args, f, indent=2)\n\n # Launch processes.\n print('Launching processes...')\n torch.multiprocessing.set_start_method('spawn')\n with tempfile.TemporaryDirectory() as temp_dir:\n if args.num_gpus == 1:\n subprocess_fn(rank=0, args=args, temp_dir=temp_dir)\n else:\n torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main() # pylint: disable=no-value-for-parameter\n\n#----------------------------------------------------------------------------\n" ]
[ [ "torch.distributed.init_process_group", "torch.multiprocessing.set_start_method", "torch.device", "torch.multiprocessing.spawn" ] ]
organic-chemistry/repli1D
[ "1cef3aa3ffd760f9b88d0831bf1dce92c819c949" ]
[ "src/repli1d/nn.py" ]
[ "import os\n\nimport numpy as np\nimport pandas as pd\n\nfrom repli1d.analyse_RFD import nan_polate, smooth\n\n\ndef normal_seq(signal, q=99, output_path='../data/'):\n \"\"\"\n normalization function that transforms each fature in range (0,1)\n and outputs the minimum and maximum of features in a csv file in\n data folder inside the repository, suitable for future transformation\n on new dataset in a trained\n neural network.\n\n Parameters\n ----------\n signal : numpy array or pandas dataframe\n in the shape of (n_samples, n_features)\n output_path : str, default='../data/'\n q : float, default=99\n the quantile threshold, to act like a lowerpass filter\n to remove the outliers. The q is in percentage, this function substitutes\n (100-q) quantile from reversed sorted data by the quantile of data that\n specified by user. if user set q=None there would be no denoising and it\n would scale the input by its minimum, and its maximum.\n Returns\n -------\n transformed : numpy array\n a normalised sequence or features in the range (0,1)\n \"\"\"\n max_element = []\n min_element = []\n transformed = []\n if isinstance(signal, pd.DataFrame):\n signal = signal.to_numpy(copy=True)\n elif isinstance(signal, list):\n signal = np.array(signal)\n if signal.ndim == 1:\n if q is not None:\n max_element = np.percentile(signal, q)\n else:\n max_element = max(signal)\n min_element = min(signal)\n signal[signal > max_element] = max_element\n transformed.append((signal-min_element)/(\n max_element-min_element))\n else:\n if q is not None:\n max_element = np.percentile(signal, q, axis=0)\n else:\n max_element = signal.max(axis=0)\n for i in range(signal.shape[1]):\n min_element.append(min(signal[:, i]))\n signal[signal[:, i] > max_element[i]] = max_element[i]\n transformed.append((signal[:, i]-min_element[i])/(\n max_element[i]-min_element[i]))\n transformed = np.array(transformed).T # transpose for correspondence\n if output_path is not None:\n result = pd.DataFrame((min_element, max_element), index=['minimum',\n 'maximum'])\n result.to_csv(output_path + 'min_max_inputs.csv')\n return transformed\n\n\ndef inv_transform(signal, input_path='../data/'):\n \"\"\"\n Inversre transform is a function for transforming the output of NN to the\n scale of real dataset.\n\n Parameters\n ----------\n signal : numpy array or pandas dataframe\n in the shape of (n_samples, n_features)\n input_path : str, default='../data/'\n the address of a folder that contains min_max_outputs.csv.\n Returns\n -------\n inv_transformed : numpy array\n \"\"\"\n if isinstance(signal, pd.DataFrame):\n signal = signal.to_numpy(copy=True)\n scales = pd.read_csv(input_path + 'min_max_outputs.csv')\n min_s = scales.to_numpy(copy=True)[0, 1:]\n max_s = scales.to_numpy(copy=True)[1, 1:]\n scales = max_s - min_s\n scales = scales.reshape(1, -1)\n inv_transformed = np.multiply(signal, scales) + min_s\n return inv_transformed\n\n\ndef dev_transform(signal, input_path='../data/', is_denoised=True):\n \"\"\"\n normalization function that transforms each fature based on the\n scaling of the trainning set. This transformation should be done on\n test set(developmental set), or any new input for a trained neural\n network. Due to existence of a denoising step in the normal_seq funciton,\n this transformation can not reproduce the exact same of initial sequences,\n instead it transforms to the scale of denoised version of training set.\n\n Parameters\n ----------\n signal : numpy array or pandas dataframe\n in the shape of (n_samples, n_features)\n input_path : str, default='../data/'\n is_denoised : boolean\n it specifies the state if original sequence is denoised by a threshold,\n if it's set to False it means that user used q=None in normal_seq function.\n Returns\n -------\n transformed : numpy array\n a normalised sequence or features\n \"\"\"\n transformed = []\n if isinstance(signal, pd.DataFrame):\n signal = signal.to_numpy(copy=True)\n elif isinstance(signal, list):\n signal = np.array(signal)\n scales = pd.read_csv(input_path + 'min_max_inputs.csv')\n max_element = scales.to_numpy(copy=True)[1, 1:]\n min_element = scales.to_numpy(copy=True)[0, 1:]\n if signal.ndim == 1:\n if is_denoised is True:\n signal[signal > max_element] = max_element\n transformed.append((signal-min_element)/(\n max_element-min_element))\n else:\n for i in range(signal.shape[1]):\n if is_denoised is True:\n signal[signal[:, i] > max_element[i]] = max_element[i]\n transformed.append((signal[:, i]-min_element[i])/(\n max_element[i]-min_element[i]))\n transformed = np.array(transformed).T # transpose for correspondence\n return transformed\n\n\ndef transform_norm(signal):\n s = np.array(signal).copy()\n s -= np.percentile(s, 10)\n p = np.percentile(s, 50)\n if p == 0:\n p = np.mean(s)\n s /= p\n s /= 5\n s[s > 50] = 50\n return np.array(s, dtype=np.float32) # mod\n\n\ndef transform_DNase(signal):\n s = np.array(signal).copy()\n s /= 500\n s[s > 1] = 1\n return s\n\n\ndef transform_norm_meth(signal):\n s = np.array(signal).copy()\n print(np.percentile(s, [10, 95]))\n # s = np.percentile(s,10)\n s /= np.percentile(s, 95)\n s /= 20\n return s\n\n# print(transform_norm)\n\n\ndef filter_anomalyf(signal, smv, percentile, nf):\n for n in range(nf):\n delta = np.abs(signal-smooth(signal, smv))\n p = np.percentile(np.abs(delta), percentile)\n signal[np.abs(delta) > p] = np.nan\n signal = nan_polate(signal)\n return signal\n\n\ndef load_signal(name,\n marks=[\"H3K4me1\", \"H3K4me3\", \"H3K27me3\", \"H3K36me3\",\n \"H3K9me3\", \"H2A.Z\", \"H3K79me2\", \"H3K9ac\", \"H3K4me2\",\n \"H3K27ac\", \"H4K20me1\"],\n targets=[\"initiation\"], t_norm=None, smm=None, wig=True,\n augment=None, show=True, add_noise=False,\n filter_anomaly=False,\n repertory_scaling_param=\"../data/\"):\n \"\"\"\n This function does some modification on datset based on its column names\n and also revoke the scaling methods for different features and outputs,\n it also makes a mask for different chromosomes. to be able to\n adapt the method for different chromosomes it is necessary to call\n load_signal, and transform_seq for training set and then revoke them for\n test set or any other set (revoking two consequent load_signal on two\n different dataset then tranform_seq them may return wrong stacked\n sequences), it is necessary due to variable that defines in load_signal.\n\n Parameters\n ----------\n name : str or pd.Dataframe\n the address of a csv file or pandas dataframe\n marks : list\n a list that contains the names of markers as features for NN.\n targets : list\n a list that contains columns names of desired outputs of NN.\n repertory_scaling_param : str\n the address to save the scaling parameters in it.\n Returns\n -------\n df : numpy array\n a scaled dataset of features\n y_init : numpy array\n a scaled dataset of outputs\n notnan : numpy array\n \"\"\"\n if type(name) == str:\n df = pd.read_csv(name)\n\n # wig = True\n mask_borders = np.cumsum(df.chrom.value_counts().to_numpy(copy=True))\n if \"signal\" in df.columns:\n df[\"initiation\"] = df[\"signal\"]\n\n if wig:\n lm = [\"DNaseI\", \"initiation\", \"Meth\", \"Meth450\", \"RFDs\", \"MRTs\",\n \"RFDe\", \"MRTe\", \"AT_20\", \"RNA_seq\", \"AT_5\", \"AT_30\"]\n marks0 = [m+\"wig\" for m in marks if m not in lm]\n for sm in lm:\n if sm in marks:\n marks0 += [sm]\n\n assert(len(marks) == len(marks0))\n marks = marks0\n\n if \"notnan\" in df.columns:\n if show:\n print(\"Found notnan\")\n notnan = df[\"notnan\"]\n else:\n notnan = []\n\n df = df[targets+marks]\n if show:\n print(df.describe())\n\n yinit = [df.pop(target) for target in targets]\n # print(yinit.shape,\"Yinit shape\")\n\n if t_norm is not None:\n transform_norm = t_norm\n\n if transform_norm == normal_seq:\n df = pd.DataFrame(transform_norm(df,\n output_path=repertory_scaling_param))\n else:\n for col in df.columns:\n if show:\n print(col)\n if col not in [\"DNaseI\", \"initiation\", \"Meth\", \"Meth450\", \"RFDe\",\n \"MRTe\", \"RFDs\", \"MRTs\"]:\n df[col] = transform_norm(df[col])\n elif col == \"DNaseI\":\n df[col] = transform_DNase(df[col])\n elif col in [\"initiation\", \"Stall\"]:\n df[col] = df[col] / np.max(df[col])\n elif \"Meth\" in col:\n df[col] = transform_norm_meth(df[col])\n elif \"RFD\" in col:\n if \"RFD\" in col:\n if col == \"RFDe\" and filter_anomaly:\n df[col] = filter_anomalyf(df[col].copy(), smv=5,\n percentile=98.5, nf=4)\n\n # print(\"Nanpo\")\n df[col] = nan_polate(df[col])\n if add_noise and col == \"RFDs\":\n print(\"Noise: \", int(len(df)*0.01))\n for p in np.random.randint(0, len(df),\n size=int(len(df)*0.01)): # article 1%\n df[col][p] = 2*np.random.rand()-1\n\n if smm is not None:\n df[col] = smooth(df[col], smm)\n df[col] = (df[col]+1)/2\n elif \"MRT\" in col:\n if \"MRT\" in col:\n df[col] = nan_polate(df[col])\n if augment == \"test\":\n for asm in [10, 50, 200]:\n df[col+f\"_sm_{asm}\"] = smooth(nan_polate(df[col]), asm)\n df[col + f\"_sm_{asm}\"] -= np.mean(df[col+f\"_sm_{asm}\"])\n df[col + f\"_sm_{asm}\"] /= np.std(df[col + f\"_sm_{asm}\"])\n\n pass\n\n if np.sum(np.isnan(df[col])) != 0:\n raise \"NanVal\"\n\n if show:\n print(np.max(yinit[0]), \"max\")\n print(df.describe())\n\n yinit0 = []\n min_outputs = []\n max_outputs = []\n for y, t in zip(yinit, targets):\n if t in [\"initiation\", \"Stall\"]:\n max_outputs.append(np.max(y))\n min_outputs.append(np.min(y))\n trunc = (y - np.min(y)) / (np.max(y)-np.min(y)) # np.percentile(y,99)\n # trunc[trunc>1] = 1\n result = pd.DataFrame((min_outputs, max_outputs), index=['minimum',\n 'maximum'])\n result.to_csv(os.path.join(repertory_scaling_param,\n 'min_max_outputs.csv'))\n yinit0.append(trunc)\n\n elif t == \"DNaseI\":\n yinit0.append(transform_DNase(y))\n elif t == \"OKSeq\":\n yinit0.append((y+1)/2)\n elif t == \"ORC2\":\n yinit0.append(y)\n else:\n raise \"Undefined target\"\n\n yinit = np.array(yinit0).T\n yinit[np.isnan(yinit)] = 0\n # print(yinit.shape)\n \"\"\"\n import pylab\n f=pylab.figure()\n pylab.plot(yinit)\n pylab.plot(df[\"RFDs\"])\n pylab.show()\n \"\"\"\n dict = {\"df\": df,\n \"yinit\": yinit,\n \"notnan\": notnan,\n \"mask_borders\": mask_borders}\n return dict\n\n\ndef window_stack(a, mask_borders, stepsize=1, width=3):\n \"\"\"\n This function makes windows of the size specified as 'width'\n and sweeping over dataset with the specified step size.\n\n Parameters\n ----------\n a : numpy array\n in the shape of (n_samples, n_features)\n step_size : int\n width : int\n mask_borders : list\n list of end positions of each chromosome as elements along\n the first axis of dataset.\n Returns\n -------\n window_stacked : numpy array or pandas dataframe\n in the shape of (n_windows, n_features*width)\n an array of stacked windows, column wise.\n \"\"\"\n window_stacked = []\n # print([[i,1+i-width or None,stepsize] for i in range(0,width)])\n for index, elem in enumerate(mask_borders):\n if index != 0:\n boundary = mask_borders[index-1] + 1\n else:\n boundary = 0\n b = a[boundary: elem+1]\n window_stacked.append([b[i:1+i-width or None:stepsize] for i in range(0, width)])\n window_stacked = np.hstack(window_stacked)\n return window_stacked\n\n\ndef transform_seq(Xt, yt, stepsize=1, width=3, impair=True):\n \"\"\"\n This function reshapes the output of window_stack function into a\n suitable shape for neural network.\n\n Parameters\n ----------\n Xt : numpy array\n in the shape of (n_samples, n_features)\n yt : numpy array\n in the shape of (n_samples, n_features)\n step_size : int\n width : int\n impair : bool\n Returns\n -------\n X : numpy array\n in the shape of (n_windows, 1, width, n_features)\n Y : numpy array\n in the shape of (n_windows, n_outputs)\n \"\"\"\n # X = (seq,dim)\n # y = (seq)\n # Xt = np.array(Xt, dtype=np.float16)\n yt = np.array(yt, dtype=np.float16)\n # print(Xt.shape, yt.shape)\n\n assert(len(Xt.shape) == 2)\n assert(len(yt.shape) == 2)\n if impair:\n assert(width % 2 == 1)\n X = window_stack(Xt, mask_borders, stepsize, width).reshape(-1, width, Xt.shape[-1])[::, np.newaxis, ::, ::]\n # [::,np.newaxis] #Take the value at the middle of the segment\n Y = window_stack(yt[::, np.newaxis], mask_borders, stepsize, width)[::, width//2]\n\n # print(X.shape, Y.shape)\n # exit()\n\n return X, Y\n\n\ndef train_test_split(chrom, ch_train, ch_test, notnan):\n print(list(ch_train), list(ch_test))\n\n chltrain = ch_train\n chltest = ch_test\n if len(notnan) != 0:\n train = [chi in chltrain and notna for chi, notna in zip(chrom.chrom, notnan)]\n test = [chi in chltest and notna for chi, notna in zip(chrom.chrom, notnan)]\n else:\n print(\"Working on all (no nan)\")\n train = [chi in chltrain for chi in chrom.chrom]\n test = [chi in chltest for chi in chrom.chrom]\n print(np.sum(train), np.sum(test), np.sum(test)/len(test))\n return train, test\n\n\ndef unison_shuffled_copies(a, b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]\n\n\ndef repad1d(res, window):\n return np.concatenate([np.zeros(window//2), res, np.zeros(window//2)])\n\n\nif __name__ == \"__main__\":\n\n import argparse\n import os\n\n from keras.callbacks import (EarlyStopping, History, ModelCheckpoint,\n ReduceLROnPlateau)\n from repli1d.models import jm_cnn_model as create_model\n from keras.models import load_model\n\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--cell', type=str, default=None)\n parser.add_argument('--rootnn', type=str, default=None)\n parser.add_argument('--nfilters', type=int, default=15)\n parser.add_argument('--resolution', type=int, default=5)\n parser.add_argument('--sm', type=int, default=None) # Smoothing exp data\n\n parser.add_argument('--window', type=int, default=51)\n parser.add_argument('--max_epoch', type=int, default=150)\n parser.add_argument('--batch_size', type=int, default=128)\n\n parser.add_argument('--imp', action=\"store_true\")\n parser.add_argument('--reduce_lr', action=\"store_true\")\n\n parser.add_argument('--wig', type=int, default=None)\n parser.add_argument('--dropout', type=float, default=0.1)\n\n parser.add_argument('--kernel_length', type=int, default=10)\n parser.add_argument('--weight', type=str, default=None)\n parser.add_argument('--loss', type=str, default=\"binary_crossentropy\")\n parser.add_argument('--augment', type=str, default=\"\")\n\n parser.add_argument('--marks', nargs='+', type=str, default=[])\n parser.add_argument('--targets', nargs='+', type=str, default=[\"initiation\"])\n parser.add_argument('--listfile', nargs='+', type=str, default=[])\n parser.add_argument('--enrichment', nargs='+', type=float, default=[0.1, 1.0, 5.0])\n parser.add_argument('--roadmap', action=\"store_true\")\n parser.add_argument('--noenrichment', action=\"store_true\")\n parser.add_argument('--predict_files', nargs='+', type=str, default=[])\n\n parser.add_argument('--restart', action=\"store_true\")\n parser.add_argument('--datafile', action=\"store_true\")\n parser.add_argument('--add_noise', action=\"store_true\")\n parser.add_argument('--filter_anomaly', action=\"store_true\")\n\n args = parser.parse_args()\n\n cell = args.cell\n rootnn = args.rootnn\n window = args.window\n marks = args.marks\n if marks == []:\n marks = ['H2az', 'H3k27ac', 'H3k79me2', 'H3k27me3', 'H3k9ac',\n 'H3k4me2', 'H3k4me3', 'H3k9me3', 'H3k4me1', 'H3k36me3', \"H4k20me1\"]\n\n lcell = [cell]\n\n if cell == \"all\":\n lcell = [\"K562\", \"GM\", \"Hela\"]\n\n os.makedirs(args.rootnn, exist_ok=True)\n\n root = \"/home/jarbona/projet_yeast_replication/notebooks/DNaseI/repli1d/\"\n if not args.datafile:\n if args.resolution == 5:\n XC = pd.read_csv(root + \"coords_K562.csv\", sep=\"\\t\") # List of chromosome coordinates\n if args.resolution == 1:\n XC = pd.read_csv(\"data/Hela_peak_1_kb.csv\", sep=\"\\t\")\n\n if args.listfile == []:\n listfile = []\n for cellt in lcell:\n name = \"/home/jarbona/repli1D/data/mlformat_whole_sig_%s_dec2.csv\" % cellt\n name = \"/home/jarbona/repli1D/data/mlformat_whole_sig_standard%s_dec2.csv\" % cellt\n name = \"/home/jarbona/repli1D/data/mlformat_whole_sig_standard%s_nn.csv\" % cellt\n wig = True\n if args.roadmap:\n name = \"/home/jarbona/repli1D/data/roadmap_%s_nn.csv\" % cellt\n wig = False\n\n listfile.append(name)\n else:\n listfile = args.listfile\n wig = False\n\n if args.wig is not None:\n if args.wig == 1:\n wig = True\n else:\n wig = False\n if args.weight is None or args.restart:\n X_train = []\n for name in listfile:\n print(name)\n temp_dict = load_signal(\n name, marks, targets=args.targets, t_norm=transform_norm,\n smm=args.sm, wig=wig, augment=args.augment,\n add_noise=args.add_noise,repertory_scaling_param=args.rootnn+\"/\")\n df, yinit, notnan, mask_borders = temp_dict.values()\n \"\"\"\n traint = [1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19]\n valt = [4, 18, 21, 22]\n testt = [5, 20]\n \"\"\"\n if not args.datafile:\n traint = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\n 18, 19] + [20, 21, 22, 23]\n valt = [20, 21, 22, 23]\n valt = [2]\n testt = [1] # 1]\n\n traint = [\"chr%i\" for i in traint]\n valt = [\"chr%i\" for i in valt]\n testt = [\"chr%i\" for i in testt]\n\n else:\n XC = pd.read_csv(args.listfile[0])\n chs = set(XC.chrom)\n traint = list(chs)\n tests = [\"chr1\"]\n valt = [\"chr2\"]\n traint.remove(tests[0])\n traint.remove(valt[0])\n\n # traint.pop(0)\n\n if not args.datafile:\n for v in testt:\n assert(v not in traint)\n for v in valt:\n assert(v not in traint)\n train, val = train_test_split(XC, traint, valt, notnan)\n X_train_us, X_val_us, y_train_us, y_val_us = df[train], df[val], yinit[train], yinit[val]\n\n vtrain = transform_seq(X_train_us, y_train_us, 1, window)\n vval = transform_seq(X_val_us, y_val_us, 1, window)\n del X_train_us, X_val_us, y_train_us, y_val_us\n if X_train == []:\n X_train, y_train = vtrain\n X_val, y_val = vval\n else:\n X_train = np.concatenate([X_train, vtrain[0]])\n y_train = np.concatenate([y_train, vtrain[1]])\n X_val = np.concatenate([X_val, vval[0]])\n y_val = np.concatenate([y_val, vval[1]])\n\n X_train, y_train = unison_shuffled_copies(X_train, y_train)\n\n n = X_train.shape[0] * X_train.shape[2]\n if n > 1e9:\n nmax = int(0.5e9//X_train.shape[2])\n print(nmax)\n X_train = X_train[:nmax]\n y_train = y_train[:nmax]\n\n print(\"Shape\", X_train.shape, y_train.shape)\n\n weight=None\n if (args.weight is not None) or os.path.exists(rootnn+\"/%sweights.hdf5\" % cell):\n weight= args.weight\n if weight is None:\n weight = rootnn+\"/%sweights.hdf5\" % cell\n multi_layer_keras_model = load_model(weight)\n multi_layer_keras_model.summary()\n del X_train, y_train\n\n if not args.restart and weight is not None:\n #load_model(args.weight)\n pass\n\n else:\n if not args.imp:\n multi_layer_keras_model = create_model(\n X_train, targets=args.targets, nfilters=args.nfilters,\n kernel_length=args.kernel_length, loss=args.loss)\n else:\n multi_layer_keras_model = create_model_imp(\n X_train, targets=args.targets, nfilters=args.nfilters,\n kernel_length=args.kernel_length, loss=args.loss)\n\n if args.restart:\n multi_layer_keras_model = load_model(args.weight)\n\n \"\"\"\n if (len(args.targets) == 1) and (args.targets[0] == \"OKSeq\"):\n\n selpercents = [1.0]\n else:\n selpercents = [0.1, 1.0, 5.0, \"all\"]\n \"\"\"\n\n totenr = args.enrichment + [\"all\"]\n if args.noenrichment:\n totenr = [\"all\"]\n\n print(totenr)\n for selp in totenr:\n\n print(sum(y_train == 0), sum(y_train != 0))\n if type(selp) == float:\n sel = y_train[::, 0] != 0\n th = np.percentile(y_train[::, 0], 100-selp)\n print(\"sepp,th\", selp, th)\n sel = y_train[::, 0] > th\n # sel = y_train[::, 0] > 0.2\n \"\"\"\n if sum(sel)/len(sel) > selp:\n th = np.percentile(sel,100-100*selp)\n print(th)\n sel = y_train[::, 0] > th\n \"\"\"\n print(\"top %i , Total %i, selected %i\" % (sum(sel), len(sel), int(0.01*selp*len(sel))))\n sel[np.random.randint(0, len(sel-1), int(0.01*selp*len(sel)))] = True\n print(\"Chekc\", np.sum(sel))\n else:\n\n sel = np.ones_like(y_train[::, 0], dtype=np.bool)\n print(np.sum(sel), sel.shape)\n print(X_train.shape, X_train[sel].shape)\n cp = [EarlyStopping(patience=3)]\n batch_size = 128\n if selp == \"all\" and False:\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n patience=3, min_lr=0.0001)\n cp = [reduce_lr]\n if args.reduce_lr:\n cp = [EarlyStopping(patience=5),\n ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n patience=3, min_lr=0.0001)]\n\n if args.datafile:\n validation_data = (X_val, y_val)\n validation_split = 0.\n else:\n validation_data = (X_val, y_val)\n validation_split = 0.\n history_multi_filter = multi_layer_keras_model.fit(x=X_train[sel],\n y=y_train[sel],\n batch_size=args.batch_size,\n epochs=args.max_epoch,\n verbose=1,\n callbacks=cp+[History(),\n ModelCheckpoint(save_best_only=True,\n filepath=rootnn+\"/%sweights.{epoch:02d}-{val_loss:.4f}.hdf5\" % cell,\n verbose=1)],\n validation_data=validation_data,\n validation_split=validation_split)\n\n multi_layer_keras_model.save(rootnn+\"/%sweights.hdf5\" % cell)\n print(\"Saving on\", rootnn+\"/%sweights.hdf5\" % cell)\n del X_train, y_train\n ###################################\n # predict\n print(\"Predict\")\n if args.listfile == [] or args.roadmap or (len(args.predict_files) != 0):\n if marks == [\"RFDs\", \"MRTs\"]:\n marks = [\"RFDe\", \"MRTe\"]\n to_pred = []\n if len(args.predict_files) == 0:\n lcell = [\"K562\", \"Hela\", \"GM\"]\n if args.cell is not None and args.weight is not None:\n lcell = [args.cell]\n for cellp in lcell:\n namep = \"/home/jarbona/repli1D/data/mlformat_whole_sig_%s_dec2.csv\" % cellp\n namep = \"/home/jarbona/repli1D/data/mlformat_whole_sig_standard%s_nn.csv\" % cellp\n wig = True\n if args.roadmap:\n namep = \"/home/jarbona/repli1D/data/roadmap_%s_nn.csv\" % cellp\n wig = False\n to_pred.append(namep)\n else:\n to_pred = args.predict_files\n\n if args.wig is not None:\n if args.wig == 1:\n wig = True\n else:\n wig = False\n\n for namep in to_pred:\n\n cellp = os.path.split(namep)[1].split(\"_\")[0] # namep.split(\"_\")[-1][:-4]\n\n print(\"Reading %s, cell %s\" % (namep, cellp))\n temp_dict = load_signal(\n namep, marks, targets=args.targets, t_norm=transform_norm,\n wig=wig, smm=args.sm, augment=args.augment,\n filter_anomaly=args.filter_anomaly)\n df, yinit, notnan, mask_borders = temp_dict.values()\n X, y = transform_seq(df, yinit, 1, window)\n print(X.shape)\n res = multi_layer_keras_model.predict(X)\n del df, X, y\n print(res.shape, \"resshape\", yinit.shape)\n\n for itarget, target in enumerate(args.targets):\n XC[\"signalValue\"] = repad1d(res[::, itarget], window)\n if target == \"OKSeq\":\n XC[\"signalValue\"] = XC[\"signalValue\"] * 2-1\n # XC.to_csv(\"nn_hela_fk.csv\",index=False,sep=\"\\t\")\n if target == \"initiation\":\n ns = rootnn+\"/nn_%s_from_%s.csv\" % (cellp, cell)\n s = 0\n for y1, y2 in zip(yinit, XC[\"signalValue\"]):\n s += (y1-y2)**2\n print(\"Average delta\", s/len(yinit))\n else:\n ns = rootnn+\"/nn_%s_%s_from_%s.csv\" % (cellp, target, cell)\n\n print(\"Saving to\", ns)\n XC.to_csv(ns, index=False, sep=\"\\t\")\n else:\n for namep in args.listfile:\n marks = [\"RFDe\", \"MRTe\"]\n temp_dict = load_signal(\n namep, marks, targets=args.targets, t_norm=transform_norm,\n smm=args.sm, augment=args.augment,\n filter_anomaly=args.filter_anomaly)\n df, yinit, notnan, mask_borders = temp_dict.values()\n X, y = transform_seq(df, yinit, 1, window)\n print(X.shape)\n res = multi_layer_keras_model.predict(X)\n del df, X, y\n print(res.shape, \"resshape\", yinit.shape)\n\n for itarget, target in enumerate(args.targets):\n XC[\"signalValue\"] = repad1d(res[::, itarget], window)\n if target == \"OKSeq\":\n XC[\"signalValue\"] = XC[\"signalValue\"] * 2-1\n # XC.to_csv(\"nn_hela_fk.csv\",index=False,sep=\"\\t\")\n if target in [\"initiation\", \"Init\"]:\n namew = namep.split(\"/\")[-1][:-4]\n ns = rootnn+\"/nn_%s.csv\" % (namew)\n s = 0\n for y1, y2 in zip(yinit, XC[\"signalValue\"]):\n s += (y1-y2)**2\n print(\"Average delta\", s/len(yinit))\n else:\n ns = rootnn+\"/nn_%s_%s.csv\" % (namew, target)\n # print(\"Not implemented\")\n # ns = rootnn+\"/nn_%s_%s_from_%s.csv\" % (cellp, target, cell)\n\n print(\"Saving to\", ns)\n XC.to_csv(ns, index=False, sep=\"\\t\")\n" ]
[ [ "numpy.sum", "numpy.multiply", "numpy.zeros", "pandas.read_csv", "pandas.DataFrame", "numpy.abs", "numpy.ones_like", "numpy.hstack", "numpy.max", "numpy.min", "numpy.random.rand", "numpy.isnan", "numpy.array", "numpy.std", "numpy.concatenate", "numpy.percentile", "numpy.mean" ] ]
koudyk/netneurotools
[ "7631cf8303f1a754dd4df0f209ce4cea50417714" ]
[ "netneurotools/networks.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nFunctions for generating group-level networks from individual measurements\n\"\"\"\n\nimport numpy as np\nfrom scipy.sparse import csgraph\nfrom sklearn.utils.validation import (check_random_state, check_array,\n check_consistent_length)\n\nfrom . import utils\n\n\ndef func_consensus(data, n_boot=1000, ci=95, seed=None):\n \"\"\"\n Calculates thresholded group consensus functional connectivity graph\n\n This function concatenates all time series in `data` and computes a group\n correlation matrix based on this extended time series. It then generates\n length `T` bootstrapped samples from the concatenated matrix and estimates\n confidence intervals for all correlations. Correlations whose sign is\n consistent across bootstraps are retained; inconsistent correlations are\n set to zero.\n\n If `n_boot` is set to 0 or None a simple, group-averaged functional\n connectivity matrix is estimated, instead.\n\n Parameters\n ----------\n data : (N, T, S) array_like\n Pre-processed functional time series, where `N` is the number of nodes,\n `T` is the number of volumes in the time series, and `S` is the number\n of subjects\n n_boot : int, optional\n Number of bootstraps for which to generate correlation. Default: 1000\n ci : (0, 100) float, optional\n Confidence interval for which to assess the reliability of correlations\n with bootstraps. Default: 95\n seed : int, optional\n Random seed. Default: None\n\n Returns\n -------\n consensus : (N, N) numpy.ndarray\n Thresholded, group-level correlation matrix\n\n References\n ----------\n Mišić, B., Betzel, R. F., Nematzadeh, A., Goni, J., Griffa, A., Hagmann,\n P., Flammini, A., Ahn, Y.-Y., & Sporns, O. (2015). Cooperative and\n competitive spreading dynamics on the human connectome. Neuron, 86(6),\n 1518-1529.\n \"\"\"\n\n # check inputs\n rs = check_random_state(seed)\n if ci > 100 or ci < 0:\n raise ValueError(\"`ci` must be between 0 and 100.\")\n\n # group-average functional connectivity matrix desired instead of bootstrap\n if n_boot == 0 or n_boot is None:\n corrs = [np.corrcoef(data[..., sub]) for sub in range(data.shape[-1])]\n return np.mean(corrs, axis=0)\n\n collapsed_data = data.reshape((len(data), -1), order='F')\n consensus = np.corrcoef(collapsed_data)\n\n # only keep the upper triangle for the bootstraps to save on memory usage\n triu_inds = np.triu_indices_from(consensus, k=1)\n bootstrapped_corrmat = np.zeros((len(triu_inds[0]), n_boot))\n\n # generate `n_boot` bootstrap correlation matrices by sampling `t` time\n # points from the concatenated time series\n for boot in range(n_boot):\n inds = rs.randint(collapsed_data.shape[-1], size=data.shape[1])\n bootstrapped_corrmat[..., boot] = \\\n np.corrcoef(collapsed_data[:, inds])[triu_inds]\n\n # extract the CIs from the bootstrapped correlation matrices\n # we don't need the input anymore so overwrite it\n bootstrapped_ci = np.percentile(bootstrapped_corrmat, [100 - ci, ci],\n axis=-1, overwrite_input=True)\n\n # remove unreliable (i.e., CI zero-crossing) correlations\n # if the signs of the bootstrapped confidence intervals are different\n # (i.e., their signs sum to 0), then we want to remove them\n # so, take the logical not of the CI (CI = 0 ---> True) and create a mask\n # then, set all connections from the consensus array inside the mask to 0\n remove_inds = np.logical_not(np.sign(bootstrapped_ci).sum(axis=0))\n mask = np.zeros_like(consensus, dtype=bool)\n mask[triu_inds] = remove_inds\n consensus[mask + mask.T] = 0\n\n return consensus\n\n\ndef _ecdf(data):\n \"\"\"\n Estimates empirical cumulative distribution function of `data`\n\n Taken directly from StackOverflow. See original answer at\n https://stackoverflow.com/questions/33345780.\n\n Parameters\n ----------\n data : array_like\n\n Returns\n -------\n prob : numpy.ndarray\n Cumulative probability\n quantiles : numpy.darray\n Quantiles\n \"\"\"\n\n sample = np.atleast_1d(data)\n\n # find the unique values and their corresponding counts\n quantiles, counts = np.unique(sample, return_counts=True)\n\n # take the cumulative sum of the counts and divide by the sample size to\n # get the cumulative probabilities between 0 and 1\n prob = np.cumsum(counts).astype(float) / sample.size\n\n # match MATLAB\n prob, quantiles = np.append([0], prob), np.append(quantiles[0], quantiles)\n\n return prob, quantiles\n\n\ndef struct_consensus(data, distance, hemiid):\n \"\"\"\n Calculates distance-dependent group consensus structural connectivity graph\n\n Takes as input a weighted stack of connectivity matrices with dimensions\n (N, N, S) where `N` is the number of nodes and `S` is the number of\n matrices or subjects. The matrices must be weighted, and ideally with\n continuous weights (e.g. fractional anisotropy rather than streamline\n count). The second input is a pairwise distance matrix, where distance(i,j)\n is the Euclidean distance between nodes i and j. The final input is an\n (N, 1) vector which labels nodes as belonging to the right (`hemiid==0`) or\n left (`hemiid=1`) hemisphere (note that these values can be flipped as long\n as `hemiid` contains only values of 0 and 1).\n\n This function estimates the average edge length distribution and builds\n a group-averaged connectivity matrix that approximates this distribution\n with density equal to the mean density across subjects.\n\n The algorithm works as follows:\n\n 1. Estimate the cumulative edge length distribution,\n 2. Divide the distribution into M length bins, one for each edge that will\n be added to the group-average matrix, and\n 3. Within each bin, select the edge that is most consistently expressed\n expressed across subjects, breaking ties according to average edge\n weight (which is why the input matrix `data` must be weighted).\n\n The algorithm works separately on within/between hemisphere links.\n\n Parameters\n ----------\n data : (N, N, S) array_like\n Weighted connectivity matrices (i.e., fractional anisotropy), where `N`\n is nodes and `S` is subjects\n distance : (N, N) array_like\n Array where `distance[i, j]` is the Euclidean distance between nodes\n `i` and `j`\n hemiid : (N, 1) array_like\n Hemisphere designation for `N` nodes where a value of 0/1 indicates\n node `N_{i}` is in the right/left hemisphere, respectively\n\n Returns\n -------\n consensus : (N, N) numpy.ndarray\n Binary, group-level connectivity matrix\n\n References\n ----------\n Betzel, R. F., Griffa, A., Hagmann, P., & Mišić, B. (2018). Distance-\n dependent consensus thresholds for generating group-representative\n structural brain networks. Network Neuroscience, 1-22.\n \"\"\"\n\n # confirm input shapes are as expected\n check_consistent_length(data, distance, hemiid)\n try:\n hemiid = check_array(hemiid, ensure_2d=True)\n except ValueError:\n raise ValueError('Provided hemiid must be a 2D array. Reshape your '\n 'data using array.reshape(-1, 1) and try again.')\n\n num_node, _, num_sub = data.shape # info on connectivity matrices\n pos_data = data > 0 # location of + values in matrix\n pos_data_count = pos_data.sum(axis=2) # num sub with + values at each node\n\n with np.errstate(divide='ignore', invalid='ignore'):\n average_weights = data.sum(axis=2) / pos_data_count\n\n # empty array to hold inter/intra hemispheric connections\n consensus = np.zeros((num_node, num_node, 2))\n\n for conn_type in range(2): # iterate through inter/intra hemisphere conn\n if conn_type == 0: # get inter hemisphere edges\n inter_hemi = (hemiid == 0) @ (hemiid == 1).T\n keep_conn = np.logical_or(inter_hemi, inter_hemi.T)\n else: # get intra hemisphere edges\n right_hemi = (hemiid == 0) @ (hemiid == 0).T\n left_hemi = (hemiid == 1) @ (hemiid == 1).T\n keep_conn = np.logical_or(right_hemi @ right_hemi.T,\n left_hemi @ left_hemi.T)\n\n # mask the distance array for only those edges we want to examine\n full_dist_conn = distance * keep_conn\n upper_dist_conn = np.atleast_3d(np.triu(full_dist_conn))\n\n # generate array of weighted (by distance), positive edges across subs\n pos_dist = pos_data * upper_dist_conn\n pos_dist = pos_dist[np.nonzero(pos_dist)]\n\n # determine average # of positive edges across subs\n # we will use this to bin the edge weights\n avg_conn_num = len(pos_dist) / num_sub\n\n # estimate empirical CDF of weighted, positive edges across subs\n cumprob, quantiles = _ecdf(pos_dist)\n cumprob = np.round(cumprob * avg_conn_num).astype(int)\n\n # empty array to hold group-average matrix for current connection type\n # (i.e., inter/intra hemispheric connections)\n group_conn_type = np.zeros((num_node, num_node))\n\n # iterate through bins (for edge weights)\n for n in range(1, int(avg_conn_num) + 1):\n # get current quantile of interest\n curr_quant = quantiles[np.logical_and(cumprob >= (n - 1),\n cumprob < n)]\n if curr_quant.size == 0:\n continue\n\n # find edges in distance connectivity matrix w/i current quantile\n mask = np.logical_and(full_dist_conn >= curr_quant.min(),\n full_dist_conn <= curr_quant.max())\n i, j = np.where(np.triu(mask)) # indices of edges of interest\n\n c = pos_data_count[i, j] # get num sub with + values at edges\n w = average_weights[i, j] # get averaged weight of edges\n\n # find locations of edges most commonly represented across subs\n indmax = np.argwhere(c == c.max())\n\n # determine index of most frequent edge; break ties with higher\n # weighted edge\n if indmax.size == 1: # only one edge found\n group_conn_type[i[indmax], j[indmax]] = 1\n else: # multiple edges found\n indmax = indmax[np.argmax(w[indmax])]\n group_conn_type[i[indmax], j[indmax]] = 1\n\n consensus[:, :, conn_type] = group_conn_type\n\n # collapse across hemispheric connections types and make symmetrical array\n consensus = consensus.sum(axis=2)\n consensus = np.logical_or(consensus, consensus.T).astype(int)\n\n return consensus\n\n\ndef binarize_network(network, retain=10, keep_diag=False):\n \"\"\"\n Keeps top `retain` % of connections in `network` and binarizes\n\n Uses the upper triangle for determining connection percentage, which may\n result in disconnected nodes. If this behavior is not desired see\n :py:func:`netneurotools.networks.threshold_network`.\n\n Parameters\n ----------\n network : (N, N) array_like\n Input graph\n retain : [0, 100] float, optional\n Percent connections to retain. Default: 10\n keep_diag : bool, optional\n Whether to keep the diagonal instead of setting it to 0. Default: False\n\n Returns\n -------\n binarized : (N, N) numpy.ndarray\n Binarized, thresholded graph\n\n See Also\n --------\n netneurotools.networks.threshold_network\n \"\"\"\n\n if retain < 0 or retain > 100:\n raise ValueError('Value provided for `retain` is outside [0, 100]: {}'\n .format(retain))\n\n prctile = 100 - retain\n triu = utils.get_triu(network)\n thresh = np.percentile(triu, prctile, axis=0, keepdims=True)\n binarized = np.array(network > thresh, dtype=int)\n\n if not keep_diag:\n binarized[np.diag_indices(len(binarized))] = 0\n\n return binarized\n\n\ndef threshold_network(network, retain=10):\n \"\"\"\n Keeps top `retain` % of connections in `network` and binarizes\n\n Uses a minimum spanning tree to ensure that no nodes are disconnected from\n the resulting thresholded graph\n\n Parameters\n ----------\n network : (N, N) array_like\n Input graph\n retain : [0, 100] float, optional\n Percent connections to retain. Default: 10\n\n Returns\n -------\n thresholded : (N, N) numpy.ndarray\n Binarized, thresholded graph\n\n See Also\n --------\n netneurotools.networks.binarize_network\n \"\"\"\n\n if retain < 0 or retain > 100:\n raise ValueError('Value provided for `retain` must be a percent '\n 'in range [0, 100]. Provided: {}'.format(retain))\n\n # get number of nodes in graph and invert weights (MINIMUM spanning tree)\n nodes = len(network)\n graph = np.triu(network * -1)\n\n # find MST and count # of edges in graph\n mst = csgraph.minimum_spanning_tree(graph).todense()\n mst_edges = np.sum(mst != 0)\n\n # determine # of remaining edges and ensure we're not over the limit\n remain = int((retain / 100) * ((nodes * (nodes - 1)) / 2)) - mst_edges\n if remain < 0:\n raise ValueError('Minimum spanning tree with {} edges exceeds desired '\n 'connection density of {}% ({} edges). Cannot '\n 'proceed with graph creation.'\n .format(mst_edges, retain, remain + mst_edges))\n\n # zero out edges already in MST and then get indices of next best edges\n graph -= mst\n inds = utils.get_triu(graph).argsort()[:remain]\n inds = tuple(e[inds] for e in np.triu_indices_from(graph, k=1))\n\n # add edges to MST, symmetrize, and convert to binary matrix\n mst[inds] = graph[inds]\n mst = np.array((mst + mst.T) != 0, dtype=int)\n\n return mst\n" ]
[ [ "numpy.sum", "numpy.logical_or", "sklearn.utils.validation.check_consistent_length", "numpy.append", "sklearn.utils.validation.check_random_state", "numpy.logical_and", "numpy.nonzero", "numpy.unique", "numpy.mean", "numpy.corrcoef", "numpy.round", "numpy.triu_indices_from", "numpy.zeros", "scipy.sparse.csgraph.minimum_spanning_tree", "numpy.argmax", "sklearn.utils.validation.check_array", "numpy.percentile", "numpy.triu", "numpy.zeros_like", "numpy.sign", "numpy.cumsum", "numpy.atleast_1d", "numpy.errstate", "numpy.array" ] ]
dankiy/2019_IT
[ "21afdc44913dccf6746879fd075d20098db599cb" ]
[ "task4.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nnp.random.seed(0)\n\nM, N = 10, 5\n\ndef is_pareto_efficient(X):\n is_efficient = np.ones(len(X), dtype = bool)\n for i, c in enumerate(X):\n if is_efficient[i]:\n is_efficient[is_efficient] = np.any(X[is_efficient] > c, axis=1) \n is_efficient[i] = True \n return is_efficient\n\nX = np.random.sample((M, N))\n\neff = is_pareto_efficient(X)\nax = plt.subplot(111, projection=\"polar\")\nplt.thetagrids(np.arange(0, 360, 360/N))\n\nfor i in range(len(eff)):\n if eff[i] == True:\n ax.plot(np.append(np.arange(0, N, 1), 0) * 2 * math.pi/N, np.append(X[i, :], X[i, 0]), color=\"r\")\n else:\n ax.plot(np.append(np.arange(0, N, 1) * 2 * math.pi/N, 0), np.append(X[i, :], X[i, 0]), color=\"b\")\n" ]
[ [ "numpy.append", "numpy.any", "numpy.random.seed", "numpy.arange", "matplotlib.pyplot.subplot", "numpy.random.sample" ] ]
paulgowdy/l2m
[ "c1eb190a9117c249094c2ee8af74f7ee1b6e655f" ]
[ "collect_experience_2.py" ]
[ "from osim.env import L2M2019Env\nfrom osim.control.osim_loco_reflex_song2019 import OsimReflexCtrl\nimport numpy as np\nimport pickle\n\nmode = '2D'\ndifficulty = 1\nvisualize=False\nseed=None\nsim_dt = 0.01\nsim_t = 5\ntimstep_limit = int(round(sim_t/sim_dt))\n\n\nINIT_POSE = np.array([\n 1.699999999999999956e+00, # forward speed\n .5, # rightward speed\n 9.023245653983965608e-01, # pelvis height\n 2.012303881285582852e-01, # trunk lean\n 0*np.pi/180, # [right] hip adduct\n -6.952390849304798115e-01, # hip flex\n -3.231075259785813891e-01, # knee extend\n 1.709011708233401095e-01, # ankle flex\n 0*np.pi/180, # [left] hip adduct\n -5.282323914341899296e-02, # hip flex\n -8.041966456860847323e-01, # knee extend\n -1.745329251994329478e-01]) # ankle flex\n\nif mode is '2D':\n params = np.loadtxt('params2d.txt')\nelif mode is '3D':\n params = np.loadtxt('params_3D_init.txt')\n\n\n\n\n\nlocoCtrl = OsimReflexCtrl(mode=mode, dt=sim_dt)\n\ncontrol_env = L2M2019Env(visualize=visualize, seed=seed, difficulty=difficulty)\ncontrol_env.change_model(model=mode, difficulty=difficulty, seed=seed)\nobs_dict_action = control_env.reset(project=True, seed=seed, obs_as_dict=True, init_pose=INIT_POSE)\ncontrol_env.spec.timestep_limit = timstep_limit\n\nobs_env = L2M2019Env(visualize=False, seed=seed, difficulty=difficulty)\nobs_env.change_model(model=mode, difficulty=difficulty, seed=seed)\nobs_dict_record = obs_env.reset(project=False, seed=seed, obs_as_dict=False, init_pose=INIT_POSE)\nobs_env.spec.timestep_limit = timstep_limit\n\nwith open('norm_sample.p', 'rb') as f:\n\n norm_sample = pickle.load(f)\n\n means = norm_sample[0]\n stds = norm_sample[1]\n\n\ndef process_obs_dict(obs_dict):\n\n '''\n for k in obs_dict.keys():\n\n print(k)\n\n joint_pos\n joint_vel\n joint_acc\n body_pos\n body_vel\n body_acc\n body_pos_rot\n body_vel_rot\n body_acc_rot\n forces\n muscles\n markers\n misc\n v_tgt_field\n\n '''\n\n\n\n\n\n v_tgt = obs_dict['v_tgt_field']\n #print(v_tgt.shape) 2,11,11\n v_tgt = v_tgt.flatten() / 10.0\n\n\n new_obs = list(v_tgt)\n\n pelvis_pos = obs_dict['body_pos']['pelvis']\n\n new_obs.extend(pelvis_pos)\n\n for k in obs_dict['body_pos'].keys():\n\n if k != 'pelvis':\n\n #print(obs_dict['body_pos'][k])\n #print([a - b for a, b in zip(obs_dict['body_pos'][k], pelvis_pos)])\n #print('')\n\n new_obs.extend([a - b for a, b in zip(obs_dict['body_pos'][k], pelvis_pos)])\n\n #'muscles', 'misc'\n # , 'forces'\n\n for k in ['joint_pos', 'joint_vel', 'joint_acc', 'body_vel', 'body_acc', 'body_pos_rot', 'body_vel_rot', 'body_acc_rot']:\n\n for sub_k in obs_dict[k].keys():\n\n new_obs.extend(obs_dict[k][sub_k])\n\n new_obs = [a - b for a,b in zip(new_obs, means)]\n new_obs = [float(a)/float(b) for a,b in zip( new_obs, stds)]\n\n\n\n return new_obs\n\n\n\n\n\ntotal_reward = 0\nt = 0\ni = 0\n\nobs_collect = []\naction_collect = []\n\nwhile True:\n i += 1\n t += sim_dt\n\n proc_obs = process_obs_dict(obs_dict_record)\n\n #print(proc_obs)\n\n locoCtrl.set_control_params(params)\n\n action = locoCtrl.update(obs_dict_action)\n\n obs_collect.append(proc_obs)\n action_collect.append(action)\n\n obs_dict_action, reward, done, info = control_env.step(action, project = True, obs_as_dict=True)\n obs_dict_record, reward_obs, done_obs, info_obs = obs_env.step(action, project = False, obs_as_dict=False)\n\n print(i, reward)\n #print(action)\n #print(len(obs_dict_record))\n\n print('')\n total_reward += reward\n\n if done:\n break\n\nprint(' score={} time={}sec'.format(total_reward, t))\n\nobs_collect = np.array(obs_collect)\naction_collect = np.array(action_collect)\n\nprint(obs_collect.shape)\nprint(action_collect.shape)\n\nwith open('saved_experience_normed.p', 'wb') as f:\n\n pickle.dump([obs_collect, action_collect], f)\n" ]
[ [ "numpy.array", "numpy.loadtxt" ] ]
Aerochip7/gan
[ "d3648c0f3996bd9e5564c05a44ff4215e5156cbd" ]
[ "tensorflow_gan/examples/mnist/conditional_eval.py" ]
[ "# coding=utf-8\n# Copyright 2022 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Evaluates a conditional TF-GAN trained MNIST model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow_gan.examples.mnist import conditional_eval_lib\n\nflags.DEFINE_string('checkpoint_dir', '/tmp/mnist/',\n 'Directory where the model was written to.')\n\nflags.DEFINE_string('eval_dir', '/tmp/mnist/',\n 'Directory where the results are saved to.')\n\nflags.DEFINE_integer('num_images_per_class', 10,\n 'Number of images to generate per class.')\n\nflags.DEFINE_integer('noise_dims', 64,\n 'Dimensions of the generator noise vector')\n\nflags.DEFINE_integer(\n 'max_number_of_evaluations', None,\n 'Number of times to run evaluation. If `None`, run '\n 'forever.')\n\nflags.DEFINE_boolean('write_to_disk', True, 'If `True`, run images to disk.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n hparams = conditional_eval_lib.HParams(FLAGS.checkpoint_dir, FLAGS.eval_dir,\n FLAGS.num_images_per_class,\n FLAGS.noise_dims,\n FLAGS.max_number_of_evaluations,\n FLAGS.write_to_disk)\n conditional_eval_lib.evaluate(hparams, run_eval_loop=True)\n\n\nif __name__ == '__main__':\n tf.disable_v2_behavior()\n app.run(main)\n" ]
[ [ "tensorflow.compat.v1.disable_v2_behavior" ] ]
fmitch/incubator-tvm
[ "67e3f437af90724a5af0bff67d033d47c8a2edf7" ]
[ "experiments/dv_search_matmul.py" ]
[ "import logging\nimport time\nimport sys\nimport os\nimport numpy as np\nfrom multiprocessing import Pool, cpu_count\nimport random\nimport string\nfrom tensors import *\n\nimport pickle\n\nimport tvm\nimport topi\nfrom topi.testing import conv2d_nchw_python\nfrom tvm import te\nfrom tvm import autotvm\nfrom tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner, DataVolumeTuner\nimport tvm.contrib.graph_runtime as runtime\n#from tvm.autotvm.task.topi_integration import deserialize_args\nfrom collections import namedtuple\nfrom itertools import permutations\n\nimport argparse\n\n#import logging\n#logging.getLogger('autotvm').setLevel(logging.DEBUG)\n#logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))\n\nglobal num_threads\nnum_threads = 32\nos.environ[\"TVM_NUM_THREADS\"] = str(num_threads)\n\nletters = string.digits + string.ascii_letters\n\n\ndef get_matmul_dv(ind):\n config = task.config_space.get(ind)\n d_foot, d_vol = autotvm.tuner.data_volume_estimator.estimate_dv(*get_matmul_extents_info(M,N,K,config,matmul_index))\n return -1*(d_vol[2][:,:,-1].sum(axis=0) * np.array([64/100e9, 64/44e9, 64/25e9])).sum()\n\ndef concurrency_ratio(ind):\n config = task.config_space.get(ind)\n mo_value = np.ceil(M / config['tile_m'].size[-1])\n no_value = np.ceil(N / config['tile_n'].size[-1])\n\n concurrency = mo_value * no_value\n\n return np.floor(concurrency/num_threads) / np.ceil(concurrency/num_threads)\n\ndef get_dv(ind):\n config = task.config_space.get(ind)\n d_foot, d_vol = autotvm.tuner.data_volume_estimator.estimate_dv(*get_extents_info(config))\n return -1*(d_vol[2][:,:,-1].sum(axis=0) * np.array([64/100e9, 64/44e9, 64/25e9])).sum()\n\ndef limited_test(ind):\n tic = time.time()\n lower_llvm_limit = 1\n upper_llvm_limit = 2\n lower_asm_limit = 0.5\n upper_asm_limit = 2\n results = []\n config = task.config_space.get(ind)\n with autotvm.ApplyConfig(config):\n with tvm.target.create(\"llvm -mcpu=core-avx2\"):\n s, arg_bufs = task.func(*task.args)\n op_func = tvm.build(s, arg_bufs)\n build_time = time.time() - tic\n\n ll_source = op_func.get_source()\n\n funcs = ll_source.split('\\n\\n')\n llvm_opint = 0\n asm_opint = 0\n length = 0\n for func in funcs:\n if 'fmuladd.v' in func and len(func) > length:\n length = len(func)\n longest = func\n\n loads = 0\n stores = 0\n fmas = 0\n if length > 0:\n lines = longest.split('\\n')\n for line in lines:\n if 'load <' in line:\n loads += 1\n elif 'store <' in line:\n stores += 1\n elif 'fmuladd.v8' in line:\n fmas += 1\n if loads+stores > 0:\n llvm_opint = fmas / (loads+stores)\n\n if llvm_opint >= lower_llvm_limit and llvm_opint <= upper_llvm_limit:\n tic = time.time()\n asm_source = op_func.get_source('asm')\n asm_time = time.time() - tic\n\n\n funcs = asm_source.split(':\\n')\n length = 0\n for func in funcs:\n if 'vfmadd' in func and len(func) > length:\n length = len(func)\n longest = func\n moves = 0\n fmas = 0\n if length > 0:\n lines = longest.split('\\n')\n for line in lines:\n if 'vmov' in line and 'ymm' in line:\n moves += 1\n elif 'vfmadd' in line and 'ymm' in line:\n fmas += 1\n if '(%r' in line:\n moves += 1\n if moves > 0:\n asm_opint = fmas / moves\n\n if asm_opint >= lower_asm_limit and asm_opint <= upper_asm_limit:\n module_file = os.path.join('/tmp/', ''.join(random.choice(letters) for i in range(10)) + '.o')\n op_func.save(module_file)\n return module_file, llvm_opint, asm_opint, ind,build_time, asm_time\n\n return '', llvm_opint, asm_opint, ind, build_time, 0\n\n\ndef eval_time(ind, module_file):\n config = task.config_space.get(ind)\n with autotvm.ApplyConfig(config):\n with tvm.target.create(\"llvm -mcpu=core-avx2\"):\n s, arg_bufs = task.func(*task.args)\n func = tvm.runtime.load_module(module_file)\n\n a_np = np.random.uniform(size=(N, N))\n b_np = np.random.uniform(size=(N, N))\n c_np = np.zeros((N,N))\n ctx = tvm.cpu()\n a_tvm = tvm.nd.array(a_np.astype(np.float32), ctx=ctx)\n b_tvm = tvm.nd.array(b_np.astype(np.float32), ctx=ctx)\n c_tvm = tvm.nd.array(c_np.astype(np.float32), ctx=ctx)\n\n evaluator = func.time_evaluator(func.entry_name, ctx, repeat=10,number=4,)\n variation = 1\n while variation > 0.05:\n res = np.array(sorted(evaluator(a_tvm, b_tvm, c_tvm).results)[:-5])\n variation = res.std() / res.mean()\n\n #if tuple(arg_bufs[1].shape) == b_tvm.shape:\n # res = evaluator(c_tvm, b_tvm, a_tvm)\n #else:\n # res = evaluator(c_tvm, a_tvm, b_tvm)\n\n return res.mean(), ind\n\ndef tune_kernels(args, trials, cr_limit):\n \n func_create = 'template/matmul'\n\n global task\n task = autotvm.task.create(func_create, \n args=(M,N,K,matmul_index,'float32'), \n target='llvm -mcpu=core-avx2')\n print(task.config_space)\n outer_trials = min(int(1e9), len(task.config_space))\n trials = min(trials, len(task.config_space))\n\n\n pickle_file = 'data/matmul/perm%.2f_timed_asm_matmul%i_%s_%icore_%i.pkl' % (cr_limit, matmul_index, N, num_threads, trials)\n if os.path.exists(pickle_file):\n print('File exists', pickle_file)\n return\n with open(pickle_file, 'rb') as fi:\n inds, res, dv, res_times, asm, llvm = pickle.load(fi)\n best = np.array(res).mean(axis=1).argsort()\n inds = np.array(inds)\n cr = []\n for ind in inds:\n cr.append(concurrency_ratio(ind))\n cr = np.array(cr)\n res = np.array(res).mean(axis=1)\n print(res[best[:10]])\n print(np.array(asm)[best[:10]])\n print(np.array(llvm)[best[:10]])\n print(cr[best[:10]])\n #for ind in inds[best[:10]]:\n # print(task.config_space.get(ind))\n return\n\n pool_threads = 80#cpu_count()\n\n #configs = np.random.choice(len(task.config_space), size=outer_trials, replace=False)\n configs = range(outer_trials)\n\n print('Running Data Volume model...')\n tic = time.time()\n with Pool(pool_threads) as p:\n cr = p.map(concurrency_ratio, configs)\n print('CR for %i configs: %f' % (len(configs), time.time() - tic))\n cr = np.array(cr)\n configs = np.array(configs)[(cr > cr_limit)]\n cr = np.array(cr)[(cr > cr_limit)]\n\n with Pool(pool_threads) as p:\n dv = p.map(get_matmul_dv, configs)\n print('DV for %i configs: %f' % (len(configs), time.time() - tic))\n\n dv = -1*np.array(dv)\n dv_order = dv.argsort()\n configs = configs[dv_order]\n dv = dv[dv_order]\n num_configs = len(configs)\n dv_dict = dict(zip(configs,dv))\n\n best_flops = 0.0\n flops = 0.0\n counter = 0\n print('Running on hardware...')\n sorted_order = np.array(dv).argsort()\n vec_counter = 0\n to_try = np.array(configs)[sorted_order]\n build_counter = 0\n\n inds = []\n results = []\n dv = []\n asm_opints = []\n llvm_opints = []\n result_times = []\n\n asm_times = 0\n while len(results) < trials and build_counter < num_configs:\n inds_to_test = []\n module_files = []\n start_index = build_counter\n\n with Pool(pool_threads) as p:\n for module_file, llvm, asm, ind, build_time, asm_time in p.map(limited_test, to_try[start_index:start_index+100*pool_threads]):\n #for ind in to_try:\n # should_test, ind = limited_test(ind)\n build_counter += 1\n if len(module_file) > 0:\n llvm_opints.append(llvm)\n asm_opints.append(asm)\n inds_to_test.append(ind)\n module_files.append(module_file)\n vec_counter += 1\n #print('Prepping tests: %.2f/%.2f GFLOPS %i/%i (%i), %.1f s \\r' % \n # (flops, best_flops, counter, num_configs,\n # build_counter, time.time()-tic), end='')\n\n #finished_index = np.where(to_try == inds_to_test[-1])[0][0]\n #to_try = to_try[finished_index+1:]\n\n #with Pool(6) as p:\n # for x, ind in p.imap(limited_test, to_try):\n inds_to_test = np.array(inds_to_test)\n for ind, module_file in zip(inds_to_test, module_files):\n x, ind = eval_time(ind, module_file)\n result_times.append(time.time() - tic)\n counter += 1\n mean_time = np.array(x).mean()\n flops = task.flop/(mean_time*1e9)\n best_flops = max(flops, best_flops)\n if best_flops == flops:\n best_ind = ind\n inds.append(ind)\n results.append(x)\n dv.append(dv_dict[ind])\n #print('Testing: %.2f/%.2f GFLOPS %i/%i (%i), %.1f s \\r' % \n # (flops, best_flops, counter, num_configs, \n # build_counter, time.time()-tic), end='')\n os.remove(module_file)\n os.remove(module_file+'.so')\n\n\n print()\n print('Best config:', task.config_space.get(best_ind))\n print('Saving %s' % pickle_file)\n with open(pickle_file, 'wb') as output:\n pickle.dump([inds, results, dv, result_times, asm_opints, llvm_opints],\n output, pickle.HIGHEST_PROTOCOL)\n return\n\ndef tune_and_evaluate():\n\n dilation = 1;\n\n parser = argparse.ArgumentParser(description='Run TC benchmarks in TVM')\n parser.add_argument( '-t','--trials', help=\"Int. Number of trials to sample\", default=2000, type=int)\n parser.add_argument( '-b','--benchmark', help=\"Int. Number of Tensor Contraction benchmark (1-4)\", default=1, type=int)\n\n global M, N, K\n global matmul_index\n\n args = parser.parse_args()\n trials = args.trials\n ind = args.benchmark\n cr_limit = 0.9\n\n for size in [1000,4000]:\n matmul_index = ind\n\n print(\"Tuning TC %i...\" % matmul_index)\n #key = list(benchmarks.keys())[args.benchmark]\n\n M,N,K = [size,size,size]\n \n\n print(\"M, N, K\")\n print(M, N, K)\n tune_kernels(args, trials, cr_limit)\n\n\nif __name__ == \"__main__\":\n tune_and_evaluate()\n" ]
[ [ "numpy.random.uniform", "numpy.ceil", "numpy.zeros", "numpy.floor", "numpy.array" ] ]
LeeElvis/OpenMDAO
[ "0ef1f0eeb934d8cd4ef0a02add6ba3c3a13e6150" ]
[ "openmdao/solvers/linear/tests/test_linear_block_gs.py" ]
[ "\"\"\"Test the LinearBlockGS linear solver class.\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.solvers.linear.tests.linear_test_base import LinearSolverTests\nfrom openmdao.test_suite.components.sellar import SellarImplicitDis1, SellarImplicitDis2, \\\n SellarDis1withDerivatives, SellarDis2withDerivatives\nfrom openmdao.test_suite.components.expl_comp_simple import TestExplCompSimpleDense\nfrom openmdao.test_suite.components.sellar import SellarDerivatives\nfrom openmdao.utils.assert_utils import assert_near_equal\n\n\nclass SimpleImp(om.ImplicitComponent):\n def setup(self):\n self.add_input('a', val=1.)\n self.add_output('x', val=0.)\n\n self.declare_partials('*', '*')\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n residuals['x'] = 3.0*inputs['a'] + 2.0*outputs['x']\n\n def linearize(self, inputs, outputs, jacobian):\n jacobian['x', 'x'] = 2.0\n jacobian['x', 'a'] = 3.0\n\n\nclass TestBGSSolver(LinearSolverTests.LinearSolverTestCase):\n linear_solver_class = om.LinearBlockGS\n\n def test_globaljac_err(self):\n prob = om.Problem()\n model = prob.model = om.Group(assembled_jac_type='dense')\n model.add_subsystem('x_param', om.IndepVarComp('length', 3.0),\n promotes=['length'])\n model.add_subsystem('mycomp', TestExplCompSimpleDense(),\n promotes=['length', 'width', 'area'])\n\n model.linear_solver = self.linear_solver_class(assemble_jac=True)\n prob.setup()\n\n with self.assertRaises(RuntimeError) as context:\n prob.run_model()\n\n self.assertEqual(str(context.exception),\n \"Linear solver LinearBlockGS in Group (<model>) doesn't support assembled jacobians.\")\n\n def test_simple_implicit(self):\n # This verifies that we can perform lgs around an implicit comp and get the right answer\n # as long as we slot a non-lgs linear solver on that component.\n\n prob = om.Problem()\n model = prob.model\n model.add_subsystem('p', om.IndepVarComp('a', 5.0))\n comp = model.add_subsystem('comp', SimpleImp())\n model.connect('p.a', 'comp.a')\n\n model.linear_solver = self.linear_solver_class()\n comp.linear_solver = om.DirectSolver()\n\n prob.setup(check=False, mode='fwd')\n prob.run_model()\n\n deriv = prob.compute_totals(of=['comp.x'], wrt=['p.a'])\n self.assertEqual(deriv['comp.x', 'p.a'], -1.5)\n\n def test_implicit_cycle(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 1.0))\n model.add_subsystem('d1', SellarImplicitDis1())\n model.add_subsystem('d2', SellarImplicitDis2())\n model.connect('d1.y1', 'd2.y1')\n model.connect('d2.y2', 'd1.y2')\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n model.nonlinear_solver.options['maxiter'] = 5\n model.linear_solver = self.linear_solver_class()\n\n prob.setup()\n prob.set_solver_print(level=0)\n\n prob.run_model()\n res = model._residuals.get_norm()\n\n # Newton is kinda slow on this for some reason, this is how far it gets with directsolver too.\n self.assertLess(res, 2.0e-2)\n\n def test_implicit_cycle_precon(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 1.0))\n model.add_subsystem('d1', SellarImplicitDis1())\n model.add_subsystem('d2', SellarImplicitDis2())\n model.connect('d1.y1', 'd2.y1')\n model.connect('d2.y2', 'd1.y2')\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n model.nonlinear_solver.options['maxiter'] = 5\n model.nonlinear_solver.linesearch = om.BoundsEnforceLS()\n model.linear_solver = om.ScipyKrylov()\n model.linear_solver.precon = self.linear_solver_class()\n\n prob.setup()\n\n prob['d1.y1'] = 4.0\n prob.set_solver_print()\n prob.run_model()\n res = model._residuals.get_norm()\n\n # Newton is kinda slow on this for some reason, this is how far it gets with directsolver too.\n self.assertLess(res, 2.0e-2)\n\n def test_full_desvar_with_index_obj_relevance_bug(self):\n prob = om.Problem()\n sub = prob.model.add_subsystem('sub', SellarDerivatives())\n prob.model.nonlinear_solver = om.NonlinearBlockGS()\n prob.model.linear_solver = om.LinearBlockGS()\n sub.nonlinear_solver = om.NonlinearBlockGS()\n sub.linear_solver = om.LinearBlockGS()\n\n prob.model.add_design_var('sub.z', lower=-100, upper=100)\n prob.model.add_objective('sub.z', index=1)\n\n prob.set_solver_print(level=0)\n\n prob.setup()\n\n # We don't call run_driver() here because we don't\n # actually want the optimizer to run\n prob.run_model()\n\n derivs = prob.compute_totals(of=['sub.z'], wrt=['sub.z'])\n\n assert_near_equal(derivs[('sub.z', 'sub.z')], [[0., 1.]])\n\n\nclass TestBGSSolverFeature(unittest.TestCase):\n\n def test_specify_solver(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.linear_solver = om.LinearBlockGS()\n model.nonlinear_solver = om.NonlinearBlockGS()\n\n prob.setup()\n\n prob.set_val('x', 1.)\n prob.set_val('z', np.array([5.0, 2.0]))\n\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_near_equal(J['obj', 'z'][0][0], 9.61001056, .00001)\n assert_near_equal(J['obj', 'z'][0][1], 1.78448534, .00001)\n\n def test_feature_maxiter(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = om.NonlinearBlockGS()\n\n model.linear_solver = om.LinearBlockGS()\n model.linear_solver.options['maxiter'] = 2\n\n prob.setup()\n\n prob.set_val('x', 1.)\n prob.set_val('z', np.array([5.0, 2.0]))\n\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_near_equal(J['obj', 'z'][0][0], 9.60230118004, .00001)\n assert_near_equal(J['obj', 'z'][0][1], 1.78022500547, .00001)\n\n def test_feature_atol(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = om.NonlinearBlockGS()\n\n model.linear_solver = om.LinearBlockGS()\n model.linear_solver.options['atol'] = 1.0e-3\n\n prob.setup()\n\n prob.set_val('x', 1.)\n prob.set_val('z', np.array([5.0, 2.0]))\n\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_near_equal(J['obj', 'z'][0][0], 9.61016296175, .00001)\n assert_near_equal(J['obj', 'z'][0][1], 1.78456955704, .00001)\n\n def test_feature_rtol(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = om.NonlinearBlockGS()\n\n model.linear_solver = om.LinearBlockGS()\n model.linear_solver.options['rtol'] = 1.0e-3\n\n prob.setup()\n\n prob.set_val('x', 1.)\n prob.set_val('z', np.array([5.0, 2.0]))\n\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_near_equal(J['obj', 'z'][0][0], 9.61016296175, .00001)\n assert_near_equal(J['obj', 'z'][0][1], 1.78456955704, .00001)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array" ] ]
STScI-MIRI/miricoord
[ "d378c24f4b8d649fb15d557c6350ab5070afba66" ]
[ "miricoord/lrs/lrs_pipetools.py" ]
[ "#\n\"\"\"\nUseful python tools for working with the MIRI LRS; calls a specific version\nof the tools specified below.\n\nThis version of the tools hooks into the JWST Calibration\nPipeline code to do the heavy lifting. Note that this\nmeans performance may be affected by what version of\nthe pipeline you are running!! It does, however, use\noffline versions of the CRDS reference files contained\nwithin this github repository.\n\nThis is mostly useful for testing the pipeline rather than\nfor creating reference files.\n\nConvert JWST v2,v3 locations (in arcsec) to MIRI Imager SCA x,y pixel locations.\nNote that the pipeline uses a 0-indexed pixel convention\nwhile SIAF uses 1-indexed pixels.\n\nBy default, calling a function in here will use the default version of the linked\nCDP-specific tools. This can be overridden by calling set_toolversion(version).\n\nAuthor: David R. Law ([email protected])\n\nREVISION HISTORY:\n17-Dec-2018 Written by David Law ([email protected])\n\"\"\"\n\nimport os as os\nimport sys\nimport numpy as np\nfrom astropy.modeling import models\nfrom asdf import AsdfFile\nfrom jwst import datamodels\nfrom jwst.assign_wcs import miri\nfrom numpy.testing import assert_allclose\nimport pdb\n\n#############################\n\n# Set the tools version. Default is CDP-7 (there is no CDP-7b)\ndef set_toolversion(version):\n # If the toolversion global was already set, delete it\n try:\n del globals()['tv']\n except:\n pass\n\n # Define toolversion as global scope within lrs_tools\n global tv\n # Import appropriate version\n if (version == 'default'):\n import miricoord.lrs.toolversions.lrs_pipetools_cdp7 as tv\n elif (version == 'cdp7'):\n import miricoord.lrs.toolversions.lrs_pipetools_cdp7 as tv\n else:\n print('Invalid tool version specified!')\n \n return\n\n#############################\n\n# Return the tools version\ndef version():\n # Determine whether the CDP toolversion has been set. If not, set to default.\n try:\n sys.getrefcount(tv)\n except:\n set_toolversion('default')\n \n return tv.version()\n\n#############################\n\n# Return a model for the detector pixel to v2,v3,lambda distortion\n# Note that stype must be a single string (slit or slitless)\ndef xytov2v3lam_model(stype,**kwargs):\n # Determine whether the CDP toolversion has been set. If not, set to default.\n try:\n sys.getrefcount(tv)\n except:\n set_toolversion('default')\n \n model=tv.xytov2v3lam_model(stype,**kwargs)\n\n return model\n\n#############################\n\n# Convert 0-indexed subarray pixels to v2,v3 in arcsec using the model\n# Note that stype must be a single string (slit or slitless)\ndef xytov2v3lam(x,y,stype,**kwargs):\n model=xytov2v3lam_model(stype,**kwargs)\n\n v2,v3,lam=model(x,y)\n\n return v2,v3,lam\n\n#############################\n\n# Convert v2,v3,lambda in arcsec to 0-indexed subarray pixels using the model\n# Note that stype must be a single string (slit or slitless)\ndef v2v3lamtoxy(v2,v3,lam,stype,**kwargs):\n model=xytov2v3lam_model(stype,**kwargs)\n \n x,y=model.inverse(v2,v3,lam)\n\n return x,y\n\n#############################\n\n# Test the forward and reverse transforms\ndef testtransform():\n # Determine whether the CDP toolversion has been set. If not, set to default.\n try:\n sys.getrefcount(tv)\n except:\n set_toolversion('default')\n\n # Get test data from a generating function\n x,y,v2,v3,lam,stype=tv.testdata()\n\n ntype=len(stype)\n # Loop over the slit and slitless varieties of test data\n for i in range(0,ntype):\n thisx,thisy,thisv2,thisv3,thislam,thisstype=x[i],y[i],v2[i],v3[i],lam[i],stype[i]\n v2new,v3new,lamnew=xytov2v3lam(thisx,thisy,thisstype)\n xnew,ynew=v2v3lamtoxy(thisv2,thisv3,thislam,thisstype)\n\n # Assert that reference values and newly-created values are close\n assert_allclose(thisx,xnew,atol=0.05)\n assert_allclose(thisy,ynew,atol=0.05)\n assert_allclose(thisv2,v2new,atol=0.05)\n assert_allclose(thisv3,v3new,atol=0.05)\n assert_allclose(thislam,lamnew,atol=0.05)\n \n return\n" ]
[ [ "numpy.testing.assert_allclose" ] ]
lylhw13/thread-pool
[ "e982392728dfabd50f5549e932b1b90f772d8d31" ]
[ "test/plot.py" ]
[ "str = '''\njobnum is 1, thread_num is 2\njobnum is 10, thread_num is 3\njobnum is 9, thread_num is 3\njobnum is 8, thread_num is 3\njobnum is 7, thread_num is 3\njobnum is 6, thread_num is 3\njobnum is 15, thread_num is 6\njobnum is 14, thread_num is 6\njobnum is 13, thread_num is 6\njobnum is 12, thread_num is 6\njobnum is 11, thread_num is 6\njobnum is 10, thread_num is 6\njobnum is 9, thread_num is 6\njobnum is 28, thread_num is 9\njobnum is 27, thread_num is 9\njobnum is 26, thread_num is 9\njobnum is 25, thread_num is 9\njobnum is 24, thread_num is 9\njobnum is 33, thread_num is 16\njobnum is 32, thread_num is 16\njobnum is 31, thread_num is 16\njobnum is 30, thread_num is 16\njobnum is 29, thread_num is 16\njobnum is 28, thread_num is 16\njobnum is 27, thread_num is 16\njobnum is 26, thread_num is 16\njobnum is 25, thread_num is 16\njobnum is 24, thread_num is 16\njobnum is 23, thread_num is 16\njobnum is 22, thread_num is 16\njobnum is 21, thread_num is 16\njobnum is 20, thread_num is 16\njobnum is 19, thread_num is 16\njobnum is 28, thread_num is 16\njobnum is 27, thread_num is 16\njobnum is 26, thread_num is 16\njobnum is 25, thread_num is 16\njobnum is 24, thread_num is 16\njobnum is 23, thread_num is 16\njobnum is 22, thread_num is 16\njobnum is 21, thread_num is 16\njobnum is 20, thread_num is 16\njobnum is 19, thread_num is 16\njobnum is 18, thread_num is 16\njobnum is 17, thread_num is 16\njobnum is 16, thread_num is 16\njobnum is 15, thread_num is 15\njobnum is 14, thread_num is 14\njobnum is 13, thread_num is 13\njobnum is 12, thread_num is 12\njobnum is 11, thread_num is 11\njobnum is 10, thread_num is 10\njobnum is 19, thread_num is 11\njobnum is 18, thread_num is 11\njobnum is 27, thread_num is 16\njobnum is 26, thread_num is 16\njobnum is 25, thread_num is 16\njobnum is 24, thread_num is 16\njobnum is 23, thread_num is 16\njobnum is 22, thread_num is 16\njobnum is 21, thread_num is 16\njobnum is 20, thread_num is 16\njobnum is 19, thread_num is 16\njobnum is 18, thread_num is 16\njobnum is 17, thread_num is 16\njobnum is 16, thread_num is 16\njobnum is 15, thread_num is 15\njobnum is 14, thread_num is 14\njobnum is 13, thread_num is 14\njobnum is 22, thread_num is 14\njobnum is 21, thread_num is 14\njobnum is 29, thread_num is 16\njobnum is 28, thread_num is 16\njobnum is 27, thread_num is 16\njobnum is 26, thread_num is 16\njobnum is 25, thread_num is 16\njobnum is 24, thread_num is 16\njobnum is 23, thread_num is 16\njobnum is 22, thread_num is 16\njobnum is 21, thread_num is 16\njobnum is 20, thread_num is 16\njobnum is 19, thread_num is 16\njobnum is 18, thread_num is 16\njobnum is 17, thread_num is 16\njobnum is 16, thread_num is 16\njobnum is 15, thread_num is 15\njobnum is 14, thread_num is 14\njobnum is 13, thread_num is 14\njobnum is 12, thread_num is 13\njobnum is 11, thread_num is 11\njobnum is 10, thread_num is 10\njobnum is 9, thread_num is 9\njobnum is 8, thread_num is 8\njobnum is 7, thread_num is 7\njobnum is 6, thread_num is 6\njobnum is 5, thread_num is 5\njobnum is 4, thread_num is 4\njobnum is 3, thread_num is 3\njobnum is 2, thread_num is 2\njobnum is 1, thread_num is 2\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nif __name__ == \"__main__\":\n jobnum = []\n threadnum = []\n for line in str.split('\\n'):\n if line:\n data = line.replace(',', ' ').split()\n jobnum.append(int(data[2]))\n threadnum.append(int(data[-1]))\n\n t = np.arange(0, len(jobnum), 1)\n fig, ax = plt.subplots()\n ax.plot(t, jobnum, label=\"job num\")\n ax.plot(t, threadnum, label=\"thread num\")\n ax.set(title = \"dynamic thread num\")\n ax.legend()\n ax.grid()\n fig.savefig(\"dynamic.png\")\n plt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
cmougan/Novartis2020
[ "390f34efa6bbc1e168f4e58d2d335c7cfa7d865e" ]
[ "pre-datathon/models/basic_mae_cb.py" ]
[ "\n\nimport numpy as np\nfrom catboost import CatBoostRegressor\nfrom sklearn.datasets import load_boston\nfrom sklearn.metrics import mean_absolute_error\n\nfrom tools.catboost_custom import MaeObjective\n\nnp.random.seed(42)\n\n\nif __name__ == \"__main__\":\n\n X, y = load_boston(return_X_y=True)\n # Using this, it learns\n cb = CatBoostRegressor(\n loss_function=MaeObjective(),\n # loss_function=\"MAE\",\n eval_metric='MAE'\n )\n\n cb.fit(\n X,\n y,\n )\n\n print(mean_absolute_error(cb.predict(X), y))\n" ]
[ [ "sklearn.datasets.load_boston", "numpy.random.seed" ] ]
VietDunghacker/mmdetection
[ "9e97878b2c5247bebe8ec406752941ffc8083871" ]
[ "mmdet/models/dense_heads/embedding_rpn_head.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import BaseModule\n\nfrom mmdet.models.builder import HEADS\nfrom ...core import bbox_cxcywh_to_xyxy\n\n\[email protected]_module()\nclass EmbeddingRPNHead(BaseModule):\n\t\"\"\"RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .\n\n\tUnlike traditional RPNHead, this module does not need FPN input, but just\n\tdecode `init_proposal_bboxes` and expand the first dimension of\n\t`init_proposal_bboxes` and `init_proposal_features` to the batch_size.\n\n\tArgs:\n\t\tnum_proposals (int): Number of init_proposals. Default 100.\n\t\tproposal_feature_channel (int): Channel number of\n\t\t\tinit_proposal_feature. Defaults to 256.\n\t\tinit_cfg (dict or list[dict], optional): Initialization config dict.\n\t\t\tDefault: None\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t num_proposals=100,\n\t\t\t\t proposal_feature_channel=256,\n\t\t\t\t init_cfg=None,\n\t\t\t\t **kwargs):\n\t\tassert init_cfg is None, 'To prevent abnormal initialization ' \\\n\t\t\t\t\t\t\t\t 'behavior, init_cfg is not allowed to be set'\n\t\tsuper(EmbeddingRPNHead, self).__init__(init_cfg)\n\t\tself.num_proposals = num_proposals\n\t\tself.proposal_feature_channel = proposal_feature_channel\n\t\tself._init_layers()\n\n\tdef _init_layers(self):\n\t\t\"\"\"Initialize a sparse set of proposal boxes and proposal features.\"\"\"\n\t\tself.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)\n\t\tself.init_proposal_features = nn.Embedding(self.num_proposals, self.proposal_feature_channel)\n\n\tdef init_weights(self):\n\t\t\"\"\"Initialize the init_proposal_bboxes as normalized.\n\n\t\t[c_x, c_y, w, h], and we initialize it to the size of the entire\n\t\timage.\n\t\t\"\"\"\n\t\tsuper(EmbeddingRPNHead, self).init_weights()\n\t\tnn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)\n\t\tnn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)\n\n\tdef _decode_init_proposals(self, imgs, img_metas):\n\t\t\"\"\"Decode init_proposal_bboxes according to the size of images and\n\t\texpand dimension of init_proposal_features to batch_size.\n\n\t\tArgs:\n\t\t\timgs (list[Tensor]): List of FPN features.\n\t\t\timg_metas (list[dict]): List of meta-information of\n\t\t\t\timages. Need the img_shape to decode the init_proposals.\n\n\t\tReturns:\n\t\t\tTuple(Tensor):\n\n\t\t\t\t- proposals (Tensor): Decoded proposal bboxes,\n\t\t\t\t has shape (batch_size, num_proposals, 4).\n\t\t\t\t- init_proposal_features (Tensor): Expanded proposal\n\t\t\t\t features, has shape\n\t\t\t\t (batch_size, num_proposals, proposal_feature_channel).\n\t\t\t\t- imgs_whwh (Tensor): Tensor with shape\n\t\t\t\t (batch_size, 4), the dimension means\n\t\t\t\t [img_width, img_height, img_width, img_height].\n\t\t\"\"\"\n\t\tproposals = self.init_proposal_bboxes.weight.clone()\n\t\tproposals = bbox_cxcywh_to_xyxy(proposals)\n\t\tnum_imgs = len(imgs[0])\n\t\timgs_whwh = []\n\t\tfor meta in img_metas:\n\t\t\th, w, _ = meta['img_shape']\n\t\t\timgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))\n\t\timgs_whwh = torch.cat(imgs_whwh, dim=0)\n\t\timgs_whwh = imgs_whwh[:, None, :]\n\n\t\t# imgs_whwh has shape (batch_size, 1, 4)\n\t\t# The shape of proposals change from (num_proposals, 4)\n\t\t# to (batch_size ,num_proposals, 4)\n\t\tproposals = proposals * imgs_whwh\n\n\t\tinit_proposal_features = self.init_proposal_features.weight.clone()\n\t\tinit_proposal_features = init_proposal_features[None].expand(num_imgs, *init_proposal_features.size())\n\t\treturn proposals, init_proposal_features, imgs_whwh\n\n\tdef forward_dummy(self, img, img_metas):\n\t\t\"\"\"Dummy forward function.\n\n\t\tUsed in flops calculation.\n\t\t\"\"\"\n\t\treturn self._decode_init_proposals(img, img_metas)\n\n\tdef forward_train(self, img, img_metas):\n\t\t\"\"\"Forward function in training stage.\"\"\"\n\t\treturn self._decode_init_proposals(img, img_metas)\n\n\tdef simple_test_rpn(self, img, img_metas):\n\t\t\"\"\"Forward function in testing stage.\"\"\"\n\t\treturn self._decode_init_proposals(img, img_metas)\n\n\tdef simple_test(self, img, img_metas):\n\t\t\"\"\"Forward function in testing stage.\"\"\"\n\t\traise NotImplementedError\n\n\tdef aug_test_rpn(self, feats, img_metas):\n\t\traise NotImplementedError(\n\t\t\t'EmbeddingRPNHead does not support test-time augmentation')\n" ]
[ [ "torch.cat", "torch.nn.Embedding", "torch.nn.init.constant_" ] ]
Shaviv-Hoffman-Lowitz/pylot
[ "d1295a42f0edd79670dc64053824a3e075d433e2" ]
[ "pylot/perception/detection/traffic_light_det_operator.py" ]
[ "\"\"\"Implements an operator that detects traffic lights.\"\"\"\nimport logging\n\nimport erdos\n\nimport numpy as np\n\nimport pylot.utils\nfrom pylot.perception.detection.traffic_light import TrafficLight, \\\n TrafficLightColor\nfrom pylot.perception.detection.utils import BoundingBox2D\nfrom pylot.perception.messages import TrafficLightsMessage\n\nimport tensorflow as tf\n\n\nclass TrafficLightDetOperator(erdos.Operator):\n \"\"\"Detects traffic lights using a TensorFlow model.\n\n The operator receives frames on a camera stream, and runs a model for each\n frame.\n\n Args:\n camera_stream (:py:class:`erdos.ReadStream`): The stream on which\n camera frames are received.\n traffic_lights_stream (:py:class:`erdos.WriteStream`): Stream on which\n the operator sends\n :py:class:`~pylot.perception.messages.TrafficLightsMessage`\n messages.\n flags (absl.flags): Object to be used to access absl flags.\n \"\"\"\n def __init__(self, camera_stream, traffic_lights_stream, flags):\n # Register a callback on the camera input stream.\n camera_stream.add_callback(self.on_frame, [traffic_lights_stream])\n self._logger = erdos.utils.setup_logging(self.config.name,\n self.config.log_file_name)\n self._flags = flags\n self._detection_graph = tf.Graph()\n # Load the model from the model file.\n pylot.utils.set_tf_loglevel(logging.ERROR)\n with self._detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(self._flags.traffic_light_det_model_path,\n 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self._gpu_options = tf.GPUOptions(\n allow_growth=True,\n visible_device_list=str(self._flags.traffic_light_det_gpu_index),\n per_process_gpu_memory_fraction=flags.\n traffic_light_det_gpu_memory_fraction)\n # Create a TensorFlow session.\n self._tf_session = tf.Session(\n graph=self._detection_graph,\n config=tf.ConfigProto(gpu_options=self._gpu_options))\n # Get the tensors we're interested in.\n self._image_tensor = self._detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n self._detection_boxes = self._detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n self._detection_scores = self._detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self._detection_classes = self._detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self._num_detections = self._detection_graph.get_tensor_by_name(\n 'num_detections:0')\n self._labels = {\n 1: TrafficLightColor.GREEN,\n 2: TrafficLightColor.YELLOW,\n 3: TrafficLightColor.RED,\n 4: TrafficLightColor.OFF\n }\n # Serve some junk image to load up the model.\n self.__run_model(np.zeros((108, 192, 3)))\n\n @staticmethod\n def connect(camera_stream):\n \"\"\"Connects the operator to other streams.\n\n Args:\n camera_stream (:py:class:`erdos.ReadStream`): The stream on which\n camera frames are received.\n\n Returns:\n :py:class:`erdos.WriteStream`: Stream on which the operator sends\n :py:class:`~pylot.perception.messages.TrafficLightsMessage`\n messages for traffic lights.\n \"\"\"\n traffic_lights_stream = erdos.WriteStream()\n return [traffic_lights_stream]\n\n @erdos.profile_method()\n def on_frame(self, msg, traffic_lights_stream):\n \"\"\"Invoked whenever a frame message is received on the stream.\n\n Args:\n msg: A :py:class:`~pylot.perception.messages.FrameMessage`.\n obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which\n the operator sends\n :py:class:`~pylot.perception.messages.TrafficLightsMessage`\n messages for traffic lights.\n \"\"\"\n self._logger.debug('@{}: {} received message'.format(\n msg.timestamp, self.config.name))\n assert msg.frame.encoding == 'BGR', 'Expects BGR frames'\n boxes, scores, labels = self.__run_model(\n msg.frame.as_rgb_numpy_array())\n\n traffic_lights = self.__convert_to_detected_tl(\n boxes, scores, labels, msg.frame.camera_setup.height,\n msg.frame.camera_setup.width)\n\n self._logger.debug('@{}: {} detected traffic lights {}'.format(\n msg.timestamp, self.config.name, traffic_lights))\n\n traffic_lights_stream.send(\n TrafficLightsMessage(msg.timestamp, traffic_lights))\n traffic_lights_stream.send(erdos.WatermarkMessage(msg.timestamp))\n\n if self._flags.log_traffic_light_detector_output:\n msg.frame.annotate_with_bounding_boxes(msg.timestamp,\n traffic_lights)\n msg.frame.save(msg.timestamp.coordinates[0], self._flags.data_path,\n 'tl-detector-{}'.format(self.config.name))\n\n def __run_model(self, image_np):\n # Expand dimensions since the model expects images to have\n # shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n (boxes, scores, classes, num) = self._tf_session.run(\n [\n self._detection_boxes, self._detection_scores,\n self._detection_classes, self._num_detections\n ],\n feed_dict={self._image_tensor: image_np_expanded})\n\n num_detections = int(num[0])\n labels = [self._labels[label] for label in classes[0][:num_detections]]\n boxes = boxes[0][:num_detections]\n scores = scores[0][:num_detections]\n return boxes, scores, labels\n\n def __convert_to_detected_tl(self, boxes, scores, labels, height, width):\n traffic_lights = []\n for index in range(len(scores)):\n if scores[\n index] > self._flags.traffic_light_det_min_score_threshold:\n bbox = BoundingBox2D(\n int(boxes[index][1] * width), # x_min\n int(boxes[index][3] * width), # x_max\n int(boxes[index][0] * height), # y_min\n int(boxes[index][2] * height) # y_max\n )\n traffic_lights.append(\n TrafficLight(scores[index],\n labels[index],\n bounding_box=bbox))\n return traffic_lights\n" ]
[ [ "numpy.zeros", "tensorflow.gfile.GFile", "tensorflow.Graph", "numpy.expand_dims", "tensorflow.import_graph_def", "tensorflow.ConfigProto", "tensorflow.GraphDef" ] ]
ymeng-git/tvm
[ "e53cbe48ca307d14a2359c1f6fe15f4ccfa87c8f" ]
[ "tests/python/contrib/test_ethosu/test_legalize.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-argument\n\nimport pytest\n\npytest.importorskip(\"ethosu.vela\")\n\nimport math\n\nimport numpy as np\nimport tensorflow as tf\nimport tflite.Model\n\nimport tvm\nfrom tvm import relay\nfrom tvm.relay.backend.contrib.ethosu import legalize, preprocess\nfrom tvm.relay import dataflow_pattern\nfrom tvm.relay.op.contrib import ethosu\nfrom tvm.relay.backend.contrib.ethosu import util\nfrom tvm.relay.build_module import bind_params_by_name\n\nfrom . import infra\n\n\ndef partition_ethosu_by_table(mod, pattern_table):\n \"\"\"In case only the legalization part is supported for an operator, we don't\n want to add the operator's pattern to the pattern table so that the compiler\n wouldn't attempt to offload an operator without full stack support.\"\"\"\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.MergeComposite(pattern_table)(mod)\n mod = relay.transform.AnnotateTarget(\"ethos-u\")(mod)\n mod = relay.transform.MergeCompilerRegions()(mod)\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.PartitionGraph()(mod)\n mod = relay.transform.InferType()(mod)\n mod = preprocess.preprocess_ext_io()(mod)\n return mod\n\n\ndef test_split_indices_legalize():\n def create_graph(axis):\n x = relay.var(\"x\", shape=(1, 50, 50, 3))\n x_relu = relay.nn.relu(x)\n split_output = relay.split(x_relu, [5, 20, 45], axis).tuple_value\n return relay.Function([x], split_output)\n\n def expected_mod_axis1():\n expected_ir_string = \"\"\"\n #[version = \"0.0.5\"]\n def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 5, 50, 3), float32],\\\n Tensor[(1, 15, 50, 3), float32],\\\n Tensor[(1, 25, 50, 3), float32],\\\n Tensor[(1, 5, 50, 3), float32]) {\n %0 = nn.relu(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;\n %1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 5, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 5, 50, 3), float32] */;\n %2 = strided_slice(%0, begin=[0, 5, 0, 0], end=[1, 20, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 15, 50, 3), float32] */;\n %3 = strided_slice(%0, begin=[0, 20, 0, 0], end=[1, 45, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 25, 50, 3), float32] */;\n %4 = strided_slice(%0, begin=[0, 45, 0, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 5, 50, 3), float32] */;\n (%1, %2, %3, %4)\n }\n \"\"\"\n return tvm.parser.fromtext(expected_ir_string)\n\n def expected_mod_axis2():\n expected_ir_string = \"\"\"\n #[version = \"0.0.5\"]\n def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 50, 5, 3), float32],\\\n Tensor[(1, 50, 15, 3), float32],\\\n Tensor[(1, 50, 25, 3), float32],\\\n Tensor[(1, 50, 5, 3), float32]) {\n %0 = nn.relu(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;\n %1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 50, 5, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 5, 3), float32] */;\n %2 = strided_slice(%0, begin=[0, 0, 5, 0], end=[1, 50, 20, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 15, 3), float32] */;\n %3 = strided_slice(%0, begin=[0, 0, 20, 0], end=[1, 50, 45, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 25, 3), float32] */;\n %4 = strided_slice(%0, begin=[0, 0, 45, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 5, 3), float32] */;\n (%1, %2, %3, %4)\n }\n \"\"\"\n return tvm.parser.fromtext(expected_ir_string)\n\n mod_axis1 = tvm.IRModule()\n mod_axis1[\"tvmgen_default_ethos_u_main_0\"] = create_graph(1)\n mod_axis1 = legalize.LegalizeSplit()(mod_axis1)\n expected_axis1 = expected_mod_axis1()\n tvm.ir.assert_structural_equal(mod_axis1, expected_axis1)\n\n mod_axis2 = tvm.IRModule()\n mod_axis2[\"tvmgen_default_ethos_u_main_0\"] = create_graph(2)\n mod_axis2 = legalize.LegalizeSplit()(mod_axis2)\n expected_axis2 = expected_mod_axis2()\n tvm.ir.assert_structural_equal(mod_axis2, expected_axis2)\n\n\ndef test_split_sections_legalize():\n def create_graph(axis, sections):\n x = relay.var(\"x\", shape=(1, 50, 50, 3))\n x_abs = relay.abs(x)\n split_output = relay.split(x_abs, sections, axis).tuple_value\n outputs = list()\n for section_idx in range(sections):\n split_single_out = relay.TupleGetItem(split_output, section_idx)\n tanh = relay.tanh(split_single_out)\n outputs.append(tanh)\n tuple_out = relay.Tuple(outputs)\n return relay.Function([x], tuple_out)\n\n def expected_mod_axis1():\n expected_ir_string = \"\"\"\n #[version = \"0.0.5\"]\n def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 10, 50, 3), float32],\\\n Tensor[(1, 10, 50, 3), float32],\\\n Tensor[(1, 10, 50, 3), float32],\\\n Tensor[(1, 10, 50, 3), float32],\\\n Tensor[(1, 10, 50, 3), float32]) {\n %0 = abs(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;\n %1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 10, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %2 = strided_slice(%0, begin=[0, 10, 0, 0], end=[1, 20, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %3 = strided_slice(%0, begin=[0, 20, 0, 0], end=[1, 30, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %4 = strided_slice(%0, begin=[0, 30, 0, 0], end=[1, 40, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %5 = strided_slice(%0, begin=[0, 40, 0, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %6 = (%1, %2, %3, %4, %5);\n %7 = %6.0;\n %8 = tanh(%7) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %9 = %6.1;\n %10 = tanh(%9) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %11 = %6.2;\n %12 = tanh(%11) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %13 = %6.3;\n %14 = tanh(%13) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %15 = %6.4;\n %16 = tanh(%15) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n (%8, %10, %12, %14, %16)\n }\n \"\"\"\n return tvm.parser.fromtext(expected_ir_string)\n\n def expected_mod_axis2():\n expected_ir_string = \"\"\"\n #[version = \"0.0.5\"]\n def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 50, 10, 3), float32],\\\n Tensor[(1, 50, 10, 3), float32],\\\n Tensor[(1, 50, 10, 3), float32],\\\n Tensor[(1, 50, 10, 3), float32],\\\n Tensor[(1, 50, 10, 3), float32]) {\n %0 = abs(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;\n %1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 50, 10, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %2 = strided_slice(%0, begin=[0, 0, 10, 0], end=[1, 50, 20, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %3 = strided_slice(%0, begin=[0, 0, 20, 0], end=[1, 50, 30, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %4 = strided_slice(%0, begin=[0, 0, 30, 0], end=[1, 50, 40, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %5 = strided_slice(%0, begin=[0, 0, 40, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %6 = (%1, %2, %3, %4, %5);\n %7 = %6.0;\n %8 = tanh(%7) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %9 = %6.1;\n %10 = tanh(%9) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %11 = %6.2;\n %12 = tanh(%11) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %13 = %6.3;\n %14 = tanh(%13) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %15 = %6.4;\n %16 = tanh(%15) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n (%8, %10, %12, %14, %16)\n }\n \"\"\"\n return tvm.parser.fromtext(expected_ir_string)\n\n mod_axis1 = tvm.IRModule()\n mod_axis1[\"tvmgen_default_ethos_u_main_0\"] = create_graph(1, 5)\n mod_axis1 = legalize.LegalizeSplit()(mod_axis1)\n expected_axis1 = expected_mod_axis1()\n tvm.ir.assert_structural_equal(mod_axis1, expected_axis1)\n\n mod_axis2 = tvm.IRModule()\n mod_axis2[\"tvmgen_default_ethos_u_main_0\"] = create_graph(2, 5)\n mod_axis2 = legalize.LegalizeSplit()(mod_axis2)\n expected_axis2 = expected_mod_axis2()\n tvm.ir.assert_structural_equal(mod_axis2, expected_axis2)\n\n\ndef infer_type_function_pass(func):\n mod = tvm.IRModule()\n mod[\"test\"] = func\n mod = relay.transform.InferType()(mod)\n return mod[\"test\"]\n\n\ndef get_shape_expr(in_expr, out_expr):\n main_f = relay.Function([in_expr], out_expr)\n main_f = infer_type_function_pass(main_f)\n shape = [int(i) for i in main_f.body.checked_type.shape]\n return shape\n\n\nINVERSE_LAYOUT_TRANSFORM_OHWI_MAP = {\n \"HWIO\": [1, 2, 3, 0],\n \"HWOI\": [1, 2, 0, 3],\n \"OWHI\": [0, 1, 2, 3],\n}\n\n\[email protected](\"ifm_shape\", [(1, 299, 299, 3), (1, 55, 55, 3)])\[email protected](\"kernel_shape\", [(3, 2), (1, 3)])\[email protected](\"padding\", [\"SAME\", \"VALID\"])\[email protected](\"strides, dilation\", [((1, 1), (2, 1)), ((3, 2), (1, 1))])\[email protected](\"activation\", [None, \"RELU\"])\ndef test_tflite_conv2d_legalize(ifm_shape, kernel_shape, padding, strides, dilation, activation):\n dtype = \"int8\"\n\n def create_tflite_graph_single():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, input_shape):\n op = tf.nn.conv2d(\n input_shape,\n filters=tf.constant(\n np.random.uniform(size=(kernel_shape[0], kernel_shape[1], 3, 3)),\n dtype=tf.float32,\n ),\n strides=strides,\n padding=padding,\n data_format=\"NHWC\",\n dilations=dilation,\n )\n if activation:\n op = tf.nn.relu(op)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n op = ext_func.body\n ofm_channels = op.attrs.ofm_channels\n\n # check IFM\n ifm = op.args[0].checked_type\n assert list(ifm.shape) == list(ifm_shape)\n assert str(ifm.dtype) == dtype\n assert ifm.shape[3] == ofm_channels\n\n # check OFM\n ofm = op.checked_type\n expected_ofm_shape = infra.compute_ofm_shape(\n ifm_shape, padding, kernel_shape, strides, dilation\n )\n assert list(ofm.shape) == list(expected_ofm_shape)\n assert str(ofm.dtype) == dtype\n assert ofm.shape[3] == ofm_channels\n\n # check weights\n weights_ohwi = op.args[1].data.asnumpy()\n assert str(weights_ohwi.dtype) == dtype\n assert weights_ohwi.shape[0] == ofm_channels\n assert weights_ohwi.shape[1] == kernel_shape[0]\n assert weights_ohwi.shape[2] == kernel_shape[1]\n assert weights_ohwi.shape[3] == 3\n\n # Check that scale_bias matches weight tensor\n assert list(op.args[2].checked_type.shape)[0] == ofm_channels\n\n expected_padding = infra.compute_padding_shape(\n ifm_shape,\n expected_ofm_shape,\n padding,\n (kernel_shape[0], kernel_shape[1]),\n strides,\n dilation,\n )\n assert list(op.attrs.padding) == list(expected_padding)\n assert list(op.attrs.strides) == list(strides)\n assert list(op.attrs.dilation) == list(dilation)\n if activation == \"RELU\":\n assert str(op.attrs.activation) == \"CLIP\"\n\n conv2d_pattern_table = [\n (\n ethosu.QnnConv2DParams.composite_name,\n ethosu.qnn_conv2d_pattern(),\n lambda pat: ethosu.QnnConv2DParams(pat).is_valid(),\n )\n ]\n\n tflite_graph = create_tflite_graph_single()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, conv_params = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n\n mod[\"main\"] = bind_params_by_name(mod[\"main\"], conv_params)\n mod = partition_ethosu_by_table(mod, conv2d_pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.Conv2DRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\"ifm_shape\", [(1, 299, 299, 3), (1, 123, 17, 7)])\[email protected](\"kernel_shape\", [(7, 3), (22, 5)])\[email protected](\"padding\", [\"SAME\", \"VALID\"])\[email protected](\"strides, dilation\", [((1, 1), (2, 1)), ((3, 2), (1, 1))])\[email protected](\"activation\", [\"RELU\", None])\ndef test_tflite_depthwise_conv_2d_legalize(\n ifm_shape, kernel_shape, padding, strides, dilation, activation\n):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def depthwise_conv2d(self, x):\n weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]\n weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)\n # The input strides to the TensorFlow API needs to be of shape 1x4\n tf_strides = [1, strides[0], strides[1], 1]\n op = tf.nn.depthwise_conv2d(\n x, weight, strides=tf_strides, padding=padding, dilations=dilation\n )\n if activation:\n op = tf.nn.relu(op)\n return op\n\n model = Model()\n concrete_func = model.depthwise_conv2d.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n op = ext_func.body\n ofm_channels = op.attrs.ofm_channels\n\n # check IFM\n ifm = op.args[0].checked_type\n assert list(ifm.shape) == list(ifm_shape)\n assert str(ifm.dtype) == dtype\n assert ifm.shape[3] == ofm_channels\n\n # check OFM\n ofm = op.checked_type\n expected_ofm_shape = infra.compute_ofm_shape(\n ifm_shape, padding, kernel_shape, strides, dilation\n )\n assert list(ofm.shape) == list(expected_ofm_shape)\n assert str(ofm.dtype) == dtype\n assert ofm.shape[3] == ofm_channels\n\n # check weights\n weights_ohwi = op.args[1].data.asnumpy()\n assert str(weights_ohwi.dtype) == dtype\n assert weights_ohwi.shape[0] == ofm_channels\n assert weights_ohwi.shape[1] == kernel_shape[0]\n assert weights_ohwi.shape[2] == kernel_shape[1]\n assert weights_ohwi.shape[3] == 1 # only depth multiplier 1 is supported\n\n # Check that scale_bias matches weight tensor\n assert list(op.args[2].checked_type.shape)[0] == ofm_channels\n\n expected_padding = infra.compute_padding_shape(\n ifm_shape, expected_ofm_shape, padding, kernel_shape, strides, dilation\n )\n assert list(op.attrs.padding) == list(expected_padding)\n assert op.attrs.ofm_channels == ofm_channels\n assert list(op.attrs.strides) == list(strides)\n assert list(op.attrs.dilation) == list(dilation)\n if activation == \"RELU\":\n assert str(op.attrs.activation) == \"CLIP\"\n\n depthwise_pattern_table = [\n (\n ethosu.QnnDepthwiseConv2DParams.composite_name,\n ethosu.qnn_depthwise_conv2d_pattern(),\n lambda pat: ethosu.QnnDepthwiseConv2DParams(pat).is_valid(),\n )\n ]\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, params = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n\n mod[\"main\"] = bind_params_by_name(mod[\"main\"], params)\n mod = partition_ethosu_by_table(mod, depthwise_pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.DepthwiseConv2DRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\"pooling_type\", [\"MAX\", \"AVG\"])\[email protected](\"ifm_shape\", [[1, 3, 4, 3], [1, 4, 5, 2]])\[email protected](\n \"pool_shape, strides, activation_function, padding\",\n [([1, 2], [1, 2], \"NONE\", \"SAME\"), ([2, 3], [2, 3], \"RELU\", \"VALID\")],\n)\ndef test_tflite_pool2d_legalize(\n ifm_shape, pooling_type, strides, pool_shape, activation_function, padding\n):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x):\n if pooling_type == \"MAX\":\n op = tf.nn.max_pool(x, pool_shape, strides, padding)\n elif pooling_type == \"AVG\":\n op = tf.nn.avg_pool(x, pool_shape, strides, padding)\n if activation_function == \"RELU\":\n op = tf.nn.relu(op)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n ofm_shape = infra.compute_ofm_shape(ifm_shape, padding, pool_shape, strides)\n op = ext_func.body\n assert list(op.args[0].checked_type.shape) == ifm_shape\n assert op.args[0].checked_type.dtype == dtype\n assert list(op.checked_type.shape) == ofm_shape\n assert op.checked_type.dtype == dtype\n assert op.attrs.pooling_type == pooling_type\n assert list(op.attrs.strides) == strides\n assert list(op.attrs.padding) == infra.compute_padding_shape(\n ifm_shape, ofm_shape, padding, pool_shape, strides\n )\n assert list(op.attrs.pool_shape) == pool_shape\n assert op.attrs.ofm_channels == ifm_shape[3]\n if activation_function == \"RELU\":\n assert str(op.attrs.activation) == \"CLIP\"\n\n if pooling_type == \"MAX\":\n rewriter = legalize.MaxPoolingRewriter()\n pattern_table = [\n (\n ethosu.MaxPool2DParams.composite_name,\n ethosu.qnn_maxpool2d_pattern(),\n lambda pat: ethosu.MaxPool2DParams(pat).is_valid(),\n ),\n ]\n elif pooling_type == \"AVG\":\n rewriter = legalize.AvgPoolingRewriter()\n pattern_table = [\n (\n ethosu.AvgPool2DParams.composite_name,\n ethosu.qnn_avgpool2d_pattern(),\n lambda pat: ethosu.AvgPool2DParams(pat).is_valid(),\n ),\n ]\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"x\": ifm_shape},\n dtype_dict={\"x\": dtype},\n )\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\"operator_type\", [\"ADD\", \"SUB\", \"MUL\", \"MIN\", \"MAX\"])\[email protected](\n \"ifm_shape, ifm2_shape, reversed_operands\",\n [\n ([1, 2, 3, 4], [1, 2, 3, 4], False),\n ([1, 2, 3, 4], [1, 1, 3, 1], False),\n ([1, 1, 3, 1], [1, 2, 3, 4], True),\n ([1, 4, 4], [4, 1], False),\n ([4], [4], False),\n ([4], [1, 2, 3, 4], True),\n ([1, 4, 4], [4, 1], False),\n ],\n)\[email protected](\"activation_function\", [\"NONE\", \"RELU\"])\ndef test_tflite_binary_elemwise_legalize(\n operator_type,\n ifm_shape,\n ifm2_shape,\n reversed_operands,\n activation_function,\n):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x, y):\n if operator_type == \"ADD\":\n op = tf.math.add(x, y)\n elif operator_type == \"SUB\":\n op = tf.math.subtract(x, y)\n elif operator_type == \"MUL\":\n op = tf.math.multiply(x, y)\n elif operator_type == \"MIN\":\n op = tf.math.minimum(x, y)\n elif operator_type == \"MAX\":\n op = tf.math.maximum(x, y)\n if activation_function == \"RELU\":\n op = tf.nn.relu(op)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32), tf.TensorSpec(ifm2_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n data2 = np.random.rand(*tuple(ifm2_shape)) * 2\n yield [data.astype(np.float32), data2.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n out_shape = ifm2_shape if reversed_operands else ifm_shape\n shapes = [ifm_shape, ifm2_shape]\n ifm_index, ifm2_index = (1, 0) if reversed_operands else (0, 1)\n op = ext_func.body\n\n has_reshaped_output = False\n shapes_padded = [[1] * (4 - len(s)) + s for s in shapes]\n out_padded = [1] * (4 - len(out_shape)) + out_shape\n if op.op.name != \"contrib.ethosu.binary_elementwise\":\n has_reshaped_output = True\n op = op.args[0]\n\n assert list(op.args[0].checked_type.shape) == shapes_padded[ifm_index]\n assert list(op.args[1].checked_type.shape) == shapes_padded[ifm2_index]\n assert op.args[0].checked_type.dtype == dtype\n assert list(op.checked_type.shape) == out_padded\n assert op.checked_type.dtype == dtype\n assert op.attrs.operator_type == operator_type\n assert op.attrs.reversed_operands == reversed_operands\n if activation_function == \"RELU\":\n assert str(op.attrs.activation) == \"CLIP\"\n\n if has_reshaped_output:\n assert list(ext_func.body.checked_type.shape) == out_shape\n\n if operator_type == \"ADD\":\n rewriter = legalize.AddRewriter()\n pattern_table = [\n (\n ethosu.AddParams.composite_name,\n ethosu.qnn_add_pattern(),\n lambda pat: ethosu.AddParams(pat).is_valid(),\n ),\n ]\n elif operator_type == \"SUB\":\n rewriter = legalize.SubRewriter()\n pattern_table = [\n (\n ethosu.SubParams.composite_name,\n ethosu.qnn_subtract_pattern(),\n lambda pat: ethosu.SubParams(pat).is_valid(),\n ),\n ]\n elif operator_type == \"MUL\":\n rewriter = legalize.MulRewriter()\n pattern_table = [\n (\n ethosu.MulParams.composite_name,\n ethosu.qnn_mul_pattern(),\n lambda pat: ethosu.MulParams(pat).is_valid(),\n ),\n ]\n elif operator_type == \"MIN\":\n rewriter = legalize.MinRewriter()\n pattern_table = [\n (\n ethosu.MinParams.composite_name,\n ethosu.minimum_pattern(),\n lambda pat: ethosu.MinParams(pat).is_valid(),\n ),\n ]\n elif operator_type == \"MAX\":\n rewriter = legalize.MaxRewriter()\n pattern_table = [\n (\n ethosu.MaxParams.composite_name,\n ethosu.maximum_pattern(),\n lambda pat: ethosu.MaxParams(pat).is_valid(),\n ),\n ]\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"x\": ifm_shape, \"y\": ifm2_shape},\n dtype_dict={\"x\": dtype, \"y\": dtype},\n )\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\ndef test_binary_add_from_constant_scalar():\n dtype = \"uint8\"\n ifm_shape = (1, 4, 4, 8)\n\n def create_graph():\n inp = relay.var(\"input\", shape=ifm_shape, dtype=dtype)\n scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)\n add = relay.qnn.op.add(\n inp,\n scalar,\n relay.const(1.0, dtype=\"float32\"),\n relay.const(0, dtype=\"int32\"),\n relay.const(1.0, dtype=\"float32\"),\n relay.const(0, dtype=\"int32\"),\n relay.const(1.0, dtype=\"float32\"),\n relay.const(0, dtype=\"int32\"),\n )\n func = relay.Function(relay.analysis.free_vars(add), add)\n return tvm.IRModule.from_expr(func)\n\n def verify(ext_func):\n op = ext_func.body\n assert list(op.args[0].checked_type.shape) == [1, 4, 4, 8]\n assert list(op.args[1].checked_type.shape) == [1, 1, 1, 1]\n assert op.args[0].checked_type.dtype == \"uint8\"\n assert list(op.checked_type.shape) == [1, 4, 4, 8]\n assert op.checked_type.dtype == \"uint8\"\n assert op.attrs.operator_type == \"ADD\"\n\n rewriter = legalize.AddRewriter()\n pattern_table = [\n (\n ethosu.AddParams.composite_name,\n ethosu.qnn_add_pattern(),\n lambda pat: ethosu.AddParams(pat).is_valid(),\n ),\n ]\n\n mod = create_graph()\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape, ifm2_shape, reversed_operands\",\n [\n ([1, 2, 3, 4], [1, 2, 3, 4], False),\n ([1, 2, 3, 4], [1, 1, 3, 1], False),\n ([1, 1, 3, 1], [1, 2, 3, 4], True),\n ],\n)\ndef test_ethosu_left_shift_binary_elemwise_legalize(ifm_shape, ifm2_shape, reversed_operands):\n dtype = \"int32\"\n operator_type = \"SHL\"\n\n def create_graph():\n input1 = relay.var(\"x1\", shape=ifm_shape, dtype=dtype)\n input2 = relay.var(\"x2\", shape=ifm2_shape, dtype=dtype)\n c1 = relay.left_shift(input1, input2)\n f = relay.Function([input1, input2], c1)\n mod = tvm.IRModule()\n mod[\"main\"] = f\n return mod\n\n def verify(ext_func):\n out_shape = ifm2_shape if reversed_operands else ifm_shape\n shapes = [ifm_shape, ifm2_shape]\n ifm_index, ifm2_index = (1, 0) if reversed_operands else (0, 1)\n op = ext_func.body\n assert list(op.args[0].checked_type.shape) == shapes[ifm_index]\n assert list(op.args[1].checked_type.shape) == shapes[ifm2_index]\n assert op.args[0].checked_type.dtype == dtype\n assert list(op.checked_type.shape) == out_shape\n assert op.checked_type.dtype == dtype\n assert op.attrs.operator_type == operator_type\n assert op.attrs.reversed_operands == reversed_operands\n assert str(op.attrs.activation) == \"NONE\"\n\n rewriter = legalize.ShlRewriter()\n pattern_table = [\n (\n ethosu.ShlParams.composite_name,\n ethosu.shl_pattern(),\n lambda pat: ethosu.ShlParams(pat).is_valid(),\n ),\n ]\n\n mod = create_graph()\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape, new_shape\",\n [\n ((1, 4, 1, 2), (4, 2)),\n ((1, 5, 1, 20), (100,)),\n ((12, 20), (1, 6, 4, 10)),\n ((30,), (10, 1, 3)),\n ],\n)\ndef test_relay_reshape_legalize(ifm_shape, new_shape):\n\n ifm = relay.var(\"ifm\", shape=ifm_shape, dtype=\"int8\")\n reshape = relay.op.reshape(ifm, new_shape)\n func = relay.Function([ifm], reshape)\n mod = tvm.IRModule()\n mod[\"main\"] = func\n mod = relay.transform.InferType()(mod)\n\n reshape_pattern_table = [\n (\n ethosu.ReshapeParams.composite_name,\n ethosu.reshape_pattern(),\n lambda pat: ethosu.ReshapeParams(pat).is_valid(),\n ),\n ]\n\n mod = partition_ethosu_by_table(mod, reshape_pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.ReshapeRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.NoOpRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod = relay.transform.InferType()(mod)\n\n ext_func = mod[\"tvmgen_default_ethos_u_main_0\"]\n\n identity = ext_func.body\n assert identity.op.name == \"contrib.ethosu.identity\"\n\n # check that the reshape is still there\n reshape = identity.args[0]\n assert reshape.op.name == \"reshape\"\n\n # check that identity's output shape matches reshape's output shape\n assert tuple(identity.checked_type.shape) == new_shape\n\n\[email protected](\n \"ifm_shape, begin, end\",\n [\n ([1, 10, 50, 4], [0, 5, 11, 2], [1, 10, 22, 3]),\n ([1, 101, 35, 27], [0, 5, 11, 2], [1, 10, 22, 3]),\n ([15, 17, 3], [3, 0, 0], [11, 17, 1]),\n ([1, 6043], [0, 704], [1, 800]),\n ],\n)\ndef test_relay_strided_slice_legalize(ifm_shape, begin, end):\n\n ifm = relay.var(\"ifm\", shape=ifm_shape, dtype=\"int8\")\n strided_slice = relay.op.strided_slice(ifm, begin, end)\n func = relay.Function([ifm], strided_slice)\n mod = tvm.IRModule()\n mod[\"main\"] = func\n mod = relay.transform.InferType()(mod)\n\n strided_slice_pattern_table = [\n (\n ethosu.StridedSliceParams.composite_name,\n ethosu.strided_slice_pattern(),\n lambda pat: ethosu.StridedSliceParams(pat).is_valid(),\n ),\n ]\n\n mod = partition_ethosu_by_table(mod, strided_slice_pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.StridedSliceRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.NoOpRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod = relay.transform.InferType()(mod)\n\n ext_func = mod[\"tvmgen_default_ethos_u_main_0\"]\n\n identity = ext_func.body\n assert identity.op.name == \"contrib.ethosu.identity\"\n\n # check that the strided_slice is still there\n strided_slice = identity.args[0]\n assert strided_slice.op.name == \"strided_slice\"\n\n # check that identity's output shape matches strided slice's output shape\n slice_shape = [a - b for a, b in zip(end, begin)]\n assert list(identity.checked_type.shape) == slice_shape\n\n\[email protected](\"operator_type\", [\"ABS\"])\[email protected](\n \"ifm_shape\",\n [[1, 2, 3, 4], [1, 7, 3], [8, 3, 1], [11, 22], [300]],\n)\ndef test_tflite_unary_elemwise_legalize(\n operator_type,\n ifm_shape,\n):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def abs_func(self, x):\n if operator_type == \"ABS\":\n op = tf.math.abs(x)\n return op\n\n model = Model()\n\n # Save the model\n concrete_func = model.abs_func.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n out_shape = ifm_shape\n func_body = ext_func.body\n\n # If we legalized the unary elementwise op into 4D\n if func_body.op.name == \"reshape\":\n reshape = func_body\n unary = func_body.args[0]\n reshape2 = unary.args[0]\n\n # Check the input to the reshape\n reshape2_in_shape = [i for i in reshape2.args[0].checked_type.shape]\n assert reshape2_in_shape == ifm_shape\n\n # Check that the unary elementwise operator is 4D after reshape\n assert len(unary.checked_type.shape) == 4\n assert unary.args[0].checked_type.dtype == dtype\n\n # Check that the output of the graph has the same shape as input\n reshape_out_shape = [i for i in reshape.checked_type.shape]\n assert reshape_out_shape == ifm_shape\n assert unary.attrs.operator_type == operator_type\n\n else:\n unary = func_body\n\n # Check the IFM\n assert list(unary.args[0].checked_type.shape) == ifm_shape\n assert unary.args[0].checked_type.dtype == dtype\n\n # Check the OFM\n assert list(unary.checked_type.shape) == out_shape\n assert unary.checked_type.dtype == dtype\n\n # operator type check\n assert unary.attrs.operator_type == operator_type\n\n if operator_type == \"ABS\":\n rewriter = legalize.AbsRewriter()\n pattern_table = [\n (\n ethosu.AbsParams.composite_name,\n ethosu.abs_pattern(),\n lambda pat: ethosu.AbsParams(pat).is_valid(),\n ),\n ]\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n mod = partition_ethosu_by_table(mod, pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\ndef test_tflite_tanh_legalize():\n dtype = \"int8\"\n ifm_shape = (1, 241, 132, 7)\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tanh_func(self, x):\n op = tf.math.tanh(x)\n return op\n\n model = Model()\n concrete_func = model.tanh_func.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, params = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n\n mod = ethosu.partition_for_ethosu(mod, params)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.TanhRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod = relay.transform.InferType()(mod)\n\n func_body = mod[\"tvmgen_default_ethos_u_main_0\"].body\n assert func_body.op.name == \"contrib.ethosu.identity\"\n assert func_body.attrs.activation == \"TANH\"\n assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)\n assert tuple(func_body.args[1].checked_type.shape) == (256,)\n\n\[email protected](\n \"ifm_shape, axis, keep_dims, use_same_quantization\",\n [\n # mean to depthwise + multiply\n [(1, 8, 16, 16), (1, 2), True, False],\n [(1, 8, 16, 16), (2, 1), True, False],\n [(1, 3, 4), (0, 1), True, False],\n [(8, 5), (1, 0), True, False],\n [(1, 65, 2, 1), (1, 2), True, False], # special case when h > 64\n # mean to average pool\n [(1, 8, 16, 16), (1,), True, True],\n [(1, 8, 16, 16), (2,), False, True],\n [(1, 8, 16, 16), (1, 2), False, True],\n [(3, 3, 4), (0,), True, True],\n [(3, 3, 4), (1,), False, True],\n [(8, 5), (0,), False, True],\n [(8, 5), (1,), True, True],\n # mean to depthwise\n [(1, 8, 16, 16), (1,), True, False],\n [(1, 8, 16, 16), (2,), True, False],\n [(1, 8, 16, 16), (1, 2), False, False],\n [(8, 4), (0,), False, False],\n ],\n)\ndef test_mean(ifm_shape, axis, keep_dims, use_same_quantization):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x):\n op = tf.math.reduce_mean(x, axis=axis, keepdims=keep_dims)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n return mod\n\n def create_relay_graph_with_same_quantization():\n ifm = relay.var(\"input\", shape=ifm_shape, dtype=dtype)\n cast = relay.cast(ifm, dtype=\"int32\")\n mean = relay.mean(cast, axis=axis, keepdims=keep_dims)\n requantize = relay.qnn.op.requantize(\n mean,\n input_scale=relay.const(1.0, dtype=\"float32\"),\n input_zero_point=relay.const(0, dtype=\"int32\"),\n output_scale=relay.const(1.0, dtype=\"float32\"),\n output_zero_point=relay.const(0, dtype=\"int32\"),\n )\n\n func = relay.Function(relay.analysis.free_vars(requantize), requantize)\n mod = tvm.IRModule.from_expr(func)\n return mod\n\n def verify(ext_func):\n out_var = ext_func.body\n\n next_op = out_var\n mul_op = None\n pooling_op = None\n depthwise_op = None\n if (\n isinstance(next_op, relay.expr.Call)\n and isinstance(next_op.op, tvm.ir.op.Op)\n and next_op.op.name == \"reshape\"\n ):\n next_op = next_op.args[0]\n if util.is_named_ethosu_op(next_op, \"binary_elementwise\"):\n mul_op = next_op\n next_op = next_op.args[0]\n if util.is_named_ethosu_op(next_op, \"pooling\"):\n pooling_op = next_op\n next_op = next_op.args[0]\n if util.is_named_ethosu_op(next_op, \"depthwise_conv2d\"):\n depthwise_op = next_op\n next_op = next_op.args[0]\n while (\n isinstance(next_op, relay.expr.Call)\n and isinstance(next_op.op, tvm.ir.op.Op)\n and next_op.op.name == \"reshape\"\n ):\n next_op = next_op.args[0]\n in_var = next_op\n\n def calculate_expected_output_shape():\n for i in range(len(ifm_shape)):\n if i in axis:\n if keep_dims:\n yield 1\n else:\n yield ifm_shape[i]\n\n out_shape = tuple(calculate_expected_output_shape())\n\n # check IFM\n assert tuple(in_var.checked_type.shape) == ifm_shape\n assert in_var.checked_type.dtype == dtype\n\n # check OFM\n assert tuple(out_var.checked_type.shape) == out_shape\n assert out_var.checked_type.dtype == dtype\n\n # check expected legalization case\n if axis in [(1, 2), (2, 1), (0, 1), (1, 0)] and keep_dims and dtype == \"int8\":\n assert depthwise_op and mul_op\n assert mul_op.attrs.operator_type == \"MUL\"\n elif pooling_op:\n attrs = pooling_op.attrs\n assert (\n attrs.ifm_scale == attrs.ofm_scale and attrs.ifm_zero_point == attrs.ofm_zero_point\n )\n else:\n assert depthwise_op\n assert not mul_op\n\n rewriter = legalize.MeanRewriter()\n pattern_table = [\n (\n ethosu.MeanParams.composite_name,\n ethosu.mean_pattern(),\n lambda pat: ethosu.MeanParams(pat).is_valid(),\n ),\n ]\n\n mod = (\n create_relay_graph_with_same_quantization()\n if use_same_quantization\n else create_tflite_graph()\n )\n mod = partition_ethosu_by_table(mod, pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"shapes, axis\",\n [\n ([(2, 3), (4, 3)], 0),\n ([(10, 2, 1), (10, 14, 1)], 1),\n ([(10,), (13,), (14,)], 0),\n ([(1, 5, 2, 1), (1, 5, 7, 1), (1, 5, 3, 1)], 2),\n ],\n)\ndef test_tflite_concat_legalize(shapes, axis):\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, shapes, axis):\n op = tf.concat(shapes, axis)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n [tf.TensorSpec(shape, tf.float32) for shape in shapes], axis\n )\n\n def representative_dataset():\n for _ in range(100):\n datas = [np.random.rand(*shape) for shape in shapes]\n yield [data.astype(np.float32) for data in datas]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n\n return tflite_model\n\n def verify(ext_func):\n new_concat_axis = np.sum(shape[axis] for shape in shapes)\n out_shape = list(shapes[0])\n out_shape[axis] = new_concat_axis\n\n op = ext_func.body\n for i, _ in enumerate(shapes):\n assert list(op.args[0][i].checked_type.shape) == list(shapes[i])\n\n assert list(op.checked_type.shape) == out_shape\n assert op.checked_type.dtype == \"int8\"\n\n concat_pattern_table = [\n (\n ethosu.ConcatParams.composite_name,\n ethosu.concat_pattern(),\n lambda pat: ethosu.ConcatParams(pat).is_valid(),\n )\n ]\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n relay_module, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={(\"ifm\" + str(i)): shape for i, shape in enumerate(shapes)},\n dtype_dict={(\"ifm\" + str(i)): \"int8\" for i, _ in enumerate(shapes)},\n )\n mod = partition_ethosu_by_table(relay_module, concat_pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.ConcatRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.NoOpRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod[\"tvmgen_default_ethos_u_main_0\"] = relay.transform.InferType()(mod)[\n \"tvmgen_default_ethos_u_main_0\"\n ]\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\ndef test_tflite_sigmoid_legalize():\n dtype = \"int8\"\n ifm_shape = (1, 237, 91, 7)\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def sigmoid_func(self, x):\n op = tf.math.sigmoid(x)\n return op\n\n model = Model()\n concrete_func = model.sigmoid_func.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_output_type = tf.int8\n converter.inference_input_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, params = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n\n mod = ethosu.partition_for_ethosu(mod, params)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.SigmoidRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod = relay.transform.InferType()(mod)\n\n func_body = mod[\"tvmgen_default_ethos_u_main_0\"].body\n assert func_body.op.name == \"contrib.ethosu.identity\"\n assert func_body.attrs.activation == \"SIGMOID\"\n assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)\n assert tuple(func_body.args[1].checked_type.shape) == (256,)\n\n\[email protected](\n \"ifm_shape, num_or_size_splits, axis\",\n [\n ((1, 4, 6, 8), 3, 2),\n ((4, 6, 8), 2, 0),\n ((5, 15), 3, 1),\n ((3, 7), 1, 1),\n ((100,), 25, 0),\n ],\n)\ndef test_tflite_split_legalize(ifm_shape, num_or_size_splits, axis):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x, num_or_size_splits, axis):\n op = tf.split(x, num_or_size_splits, axis=axis)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, tf.float32), num_or_size_splits, axis\n )\n\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n\n return tflite_model\n\n def verify(ext_func):\n # dig out the split\n single_output_split = num_or_size_splits == 1\n split = (\n ext_func.body.tuple_value\n if single_output_split\n else ext_func.body.args[0][0].args[0].tuple_value\n )\n assert split.op.name == \"split\"\n\n # Split is specified by number of equal chunks\n assert split.attrs.indices_or_sections == num_or_size_splits\n\n assert split.attrs.axis == axis\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n mod = ethosu.partition_for_ethosu(mod)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.PartitionedSplitRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = relay.transform.InferType()(mod)[\n \"tvmgen_default_ethos_u_main_0\"\n ]\n\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape, num_or_size_splits, axis\",\n [\n ((1, 4, 6, 8), (1, 3, 4), 3),\n ((10, 18, 4), (1, 4, 3, 2), 0),\n ((22, 7), (4, -1), 1),\n ((25,), (25,), 0),\n ],\n)\ndef test_tflite_split_v_legalize(ifm_shape, num_or_size_splits, axis):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x, num_or_size_splits, axis):\n # TF split gets converted into TFLite's split_v\n op = tf.split(x, num_or_size_splits, axis=axis)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, tf.float32), num_or_size_splits, axis\n )\n\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n\n return tflite_model\n\n def verify(ext_func):\n # dig out the split\n single_output_split = len(num_or_size_splits) == 1\n split = (\n ext_func.body.tuple_value\n if single_output_split\n else ext_func.body.args[0][0].args[0].tuple_value\n )\n assert split.op.name == \"split\"\n\n # Split is specified by the size of sections, so converting num_or_size_splits\n # into the indices where the tensor is split at since this is how split is represented\n # in Relay\n split_sections = [] if single_output_split else [num_or_size_splits[0]]\n for split_size in num_or_size_splits[1:-1]:\n sec = split_sections[-1] + split_size\n split_sections.append(sec)\n assert list(split.attrs.indices_or_sections) == split_sections\n\n assert split.attrs.axis == axis\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n mod = ethosu.partition_for_ethosu(mod)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.PartitionedSplitRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = relay.transform.InferType()(mod)[\n \"tvmgen_default_ethos_u_main_0\"\n ]\n\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape,ifm_scale,ifm_zp,ofm_scale,ofm_zp\",\n [[(1, 8, 8, 3), 1.0, 0, 1.0, 0], [(1, 20, 30, 3), 1.345, 34, 0.32, -23]],\n)\ndef test_ethosu_requantize(ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp):\n dtype = \"int8\"\n\n def create_model():\n ifm = relay.var(\"ifm\", shape=ifm_shape, dtype=\"int8\")\n requantize = relay.qnn.op.requantize(\n ifm,\n relay.const(ifm_scale, dtype=\"float32\"),\n relay.const(ifm_zp, dtype=\"int32\"),\n relay.const(ofm_scale, dtype=\"float32\"),\n relay.const(ofm_zp, dtype=\"int32\"),\n )\n return tvm.IRModule.from_expr(relay.Function([ifm], requantize))\n\n def verify(ext_func):\n op = ext_func.body\n\n # Check IFM\n ifm = op.args[0].checked_type\n assert list(ifm.shape) == list(ifm_shape)\n assert str(ifm.dtype) == dtype\n\n # Check OFM\n ofm = op.checked_type\n assert list(ofm.shape) == list(ifm_shape)\n assert str(ofm.dtype) == dtype\n\n # Check quantization params\n assert math.isclose(op.attrs.ifm_scale, ifm_scale, abs_tol=1e-7)\n assert op.attrs.ifm_zero_point == ifm_zp\n assert math.isclose(op.attrs.ofm_scale, ofm_scale, abs_tol=1e-7)\n assert op.attrs.ofm_zero_point == ofm_zp\n\n rewriter = legalize.RequantizeRewriter()\n pattern_table = [\n (\n ethosu.RequantizeParams.composite_name,\n ethosu.requantize_pattern(),\n lambda pat: ethosu.RequantizeParams(pat).is_valid(),\n ),\n ]\n\n mod = create_model()\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\ndef test_multiple_requantize_offload():\n \"\"\"\n Testing requantize offload in the case one requantize operation is part of\n an existing pattern (in this case Mean: cast->mean->requantize) and the\n other is a stand-alone requantize.\n \"\"\"\n\n def create_model():\n ifm = relay.var(\"input\", shape=(1, 3, 3, 4), dtype=\"int8\")\n cast = relay.cast(ifm, dtype=\"int32\")\n mean = relay.mean(cast, axis=1, keepdims=True)\n requantize = relay.qnn.op.requantize(\n mean,\n input_scale=relay.const(1.0, dtype=\"float32\"),\n input_zero_point=relay.const(0, dtype=\"int32\"),\n output_scale=relay.const(1.0, dtype=\"float32\"),\n output_zero_point=relay.const(0, dtype=\"int32\"),\n )\n requantize = relay.qnn.op.requantize(\n requantize,\n input_scale=relay.const(1.0, dtype=\"float32\"),\n input_zero_point=relay.const(0, dtype=\"int32\"),\n output_scale=relay.const(1.0, dtype=\"float32\"),\n output_zero_point=relay.const(0, dtype=\"int32\"),\n )\n return tvm.IRModule.from_expr(relay.Function([ifm], requantize))\n\n def verify(ext_func):\n # If mean operation and separate requantize were offloaded correctly,\n # there should only be a pooling operation followed by an identity\n # operation leagalized.\n op = ext_func.body\n assert op.op.name == \"contrib.ethosu.identity\"\n op = op.args[0]\n assert ext_func.body.args[0].op.name == \"contrib.ethosu.pooling\"\n op = op.args[0]\n assert isinstance(op, relay.Var)\n\n mod = create_model()\n mod = ethosu.partition_for_ethosu(mod)\n mod = legalize.LegalizeEthosU()(mod)\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape,size\",\n [\n [(1, 2, 2, 1), (4, 4)],\n [(1, 4, 7, 3), (8, 14)],\n [(1, 3, 5, 3), (3, 5)],\n ],\n)\ndef test_tflite_resize2d_nearest_neighbor(ifm_shape, size):\n align_corners = False\n dtype = \"int8\"\n\n def create_tflite_graph():\n @tf.function\n def resize_model(x):\n return tf.compat.v1.image.resize_nearest_neighbor(\n x, size, align_corners=align_corners, half_pixel_centers=False\n )\n\n concrete_func = resize_model.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n return mod\n\n def verify(ext_func):\n op = ext_func.body\n in_var = op.args[0]\n\n # check IFM\n assert tuple(in_var.checked_type.shape) == ifm_shape\n assert in_var.checked_type.dtype == dtype\n\n # check OFM\n attrs = dict(op.attrs)\n out_shape = (ifm_shape[0], size[0], size[1], ifm_shape[3])\n assert tuple(op.checked_type.shape) == out_shape\n assert op.checked_type.dtype == dtype\n\n # Check Op attributes\n if size[0] == ifm_shape[1] and size[1] == ifm_shape[2]:\n assert op.op.name == \"contrib.ethosu.identity\"\n else:\n assert attrs[\"pooling_type\"] == \"AVG\"\n assert attrs[\"upscale\"] == \"NEAREST\"\n\n rewriter = legalize.Resize2dRewriter()\n pattern_table = [\n (\n ethosu.Resize2dParams.composite_name,\n ethosu.resize2d_pattern(),\n lambda pat: ethosu.Resize2dParams(pat).is_valid(),\n ),\n ]\n\n mod = create_tflite_graph()\n mod = partition_ethosu_by_table(mod, pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape,size,align_corners\",\n [\n [(1, 2, 2, 1), (4, 4), False],\n [(1, 4, 7, 3), (8, 14), False],\n [(1, 2, 2, 1), (3, 3), True],\n [(1, 4, 7, 3), (7, 13), True],\n [(1, 3, 5, 3), (3, 5), False],\n ],\n)\ndef test_tflite_resize2d_bilinear(ifm_shape, size, align_corners):\n dtype = \"int8\"\n\n def create_tflite_graph():\n @tf.function\n def resize_model(x):\n return tf.compat.v1.image.resize_bilinear(\n x, size, align_corners=align_corners, half_pixel_centers=False\n )\n\n concrete_func = resize_model.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n return mod\n\n def verify(ext_func):\n op = ext_func.body\n in_var = op.args[0]\n\n # check IFM\n assert tuple(in_var.checked_type.shape) == ifm_shape\n assert in_var.checked_type.dtype == dtype\n\n # check OFM\n attrs = dict(op.attrs)\n out_shape = (ifm_shape[0], size[0], size[1], ifm_shape[3])\n assert tuple(op.checked_type.shape) == out_shape\n assert op.checked_type.dtype == dtype\n\n # Check Op attributes\n if size[0] == ifm_shape[1] and size[1] == ifm_shape[2]:\n assert op.op.name == \"contrib.ethosu.identity\"\n else:\n assert attrs[\"pooling_type\"] == \"AVG\"\n assert attrs[\"upscale\"] == \"NEAREST\"\n\n # Check padding\n if align_corners:\n assert list(attrs[\"padding\"]) == [0, 0, 0, 0]\n else:\n assert list(attrs[\"padding\"]) == [0, 0, 1, 1]\n\n rewriter = legalize.Resize2dRewriter()\n pattern_table = [\n (\n ethosu.Resize2dParams.composite_name,\n ethosu.resize2d_pattern(),\n lambda pat: ethosu.Resize2dParams(pat).is_valid(),\n ),\n ]\n\n mod = create_tflite_graph()\n mod = partition_ethosu_by_table(mod, pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n" ]
[ [ "numpy.sum", "numpy.ones", "tensorflow.math.subtract", "tensorflow.nn.avg_pool", "tensorflow.math.tanh", "tensorflow.math.sigmoid", "tensorflow.concat", "tensorflow.math.maximum", "tensorflow.split", "tensorflow.compat.v1.image.resize_nearest_neighbor", "tensorflow.math.minimum", "tensorflow.nn.max_pool", "tensorflow.lite.TFLiteConverter.from_concrete_functions", "numpy.random.rand", "numpy.random.uniform", "tensorflow.nn.depthwise_conv2d", "tensorflow.math.reduce_mean", "tensorflow.math.multiply", "tensorflow.math.abs", "tensorflow.compat.v1.image.resize_bilinear", "tensorflow.math.add", "tensorflow.TensorSpec", "tensorflow.nn.relu" ] ]
ischigal/gammapy
[ "c56ca1bb237d9eb4a7a3aed8eaf359206bf0e628" ]
[ "gammapy/modeling/tests/test_fit.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Unit tests for the Fit class\"\"\"\nimport pytest\nfrom numpy.testing import assert_allclose\nfrom astropy.table import Table\nfrom gammapy.datasets import Dataset\nfrom gammapy.modeling import Fit, Parameter\nfrom gammapy.modeling.models import Model, Models\nfrom gammapy.utils.testing import requires_dependency\n\npytest.importorskip(\"iminuit\")\n\n\nclass MyModel(Model):\n x = Parameter(\"x\", 2)\n y = Parameter(\"y\", 3e2)\n z = Parameter(\"z\", 4e-2)\n name = \"test\"\n datasets_names = [\"test\"]\n type = \"model\"\n\n\nclass MyDataset(Dataset):\n tag = \"MyDataset\"\n\n def __init__(self, name=\"test\"):\n self._name = name\n self._models = Models([MyModel(x=1.99, y=2.99e3, z=3.99e-2)])\n self.data_shape = (1,)\n self.meta_table = Table()\n\n @property\n def models(self):\n return self._models\n\n def stat_sum(self):\n # self._model.parameters = parameters\n x, y, z = [p.value for p in self.models.parameters]\n x_opt, y_opt, z_opt = 2, 3e2, 4e-2\n return (x - x_opt) ** 2 + (y - y_opt) ** 2 + (z - z_opt) ** 2\n\n def fcn(self):\n x, y, z = [p.value for p in self.models.parameters]\n x_opt, y_opt, z_opt = 2, 3e5, 4e-5\n x_err, y_err, z_err = 0.2, 3e4, 4e-6\n return (\n ((x - x_opt) / x_err) ** 2\n + ((y - y_opt) / y_err) ** 2\n + ((z - z_opt) / z_err) ** 2\n )\n\n def stat_array(self):\n \"\"\"Statistic array, one value per data point.\"\"\"\n\n\n@requires_dependency(\"iminuit\")\n@requires_dependency(\"sherpa\")\[email protected](\"backend\", [\"sherpa\", \"scipy\"])\ndef test_optimize_backend_and_covariance(backend):\n dataset = MyDataset()\n\n if backend == \"scipy\":\n kwargs = {\"method\": \"L-BFGS-B\"}\n else:\n kwargs = {}\n\n kwargs[\"backend\"] = backend\n\n fit = Fit(optimize_opts=kwargs)\n result = fit.run([dataset])\n result = result[\"optimize_result\"]\n\n pars = result.parameters\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-3)\n assert_allclose(pars[\"y\"].value, 3e2, rtol=1e-3)\n assert_allclose(pars[\"z\"].value, 4e-2, rtol=1e-2)\n\n assert_allclose(pars[\"x\"].error, 1, rtol=1e-7)\n assert_allclose(pars[\"y\"].error, 1, rtol=1e-7)\n assert_allclose(pars[\"z\"].error, 1, rtol=1e-7)\n\n correlation = dataset.models.covariance.correlation\n assert_allclose(correlation[0, 1], 0, atol=1e-7)\n assert_allclose(correlation[0, 2], 0, atol=1e-7)\n assert_allclose(correlation[1, 2], 0, atol=1e-7)\n\n\[email protected](\"backend\", [\"minuit\"])\ndef test_run(backend):\n dataset = MyDataset()\n fit = Fit(backend=backend)\n result = fit.run([dataset])\n result = result[\"optimize_result\"]\n pars = result.parameters\n\n assert result.success is True\n\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-3)\n assert_allclose(pars[\"y\"].value, 3e2, rtol=1e-3)\n assert_allclose(pars[\"z\"].value, 4e-2, rtol=1e-3)\n\n assert_allclose(pars[\"x\"].error, 1, rtol=1e-7)\n assert_allclose(pars[\"y\"].error, 1, rtol=1e-7)\n assert_allclose(pars[\"z\"].error, 1, rtol=1e-7)\n\n correlation = dataset.models.covariance.correlation\n assert_allclose(correlation[0, 1], 0, atol=1e-7)\n assert_allclose(correlation[0, 2], 0, atol=1e-7)\n assert_allclose(correlation[1, 2], 0, atol=1e-7)\n\n\n@requires_dependency(\"sherpa\")\[email protected](\"backend\", [\"minuit\", \"sherpa\", \"scipy\"])\ndef test_optimize(backend):\n dataset = MyDataset()\n\n if backend == \"scipy\":\n kwargs = {\"method\": \"L-BFGS-B\"}\n else:\n kwargs = {}\n\n fit = Fit(store_trace=True, backend=backend, optimize_opts=kwargs)\n result = fit.optimize([dataset])\n pars = dataset.models.parameters\n\n assert result.success is True\n assert_allclose(result.total_stat, 0, atol=1)\n\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-3)\n assert_allclose(pars[\"y\"].value, 3e2, rtol=1e-3)\n assert_allclose(pars[\"z\"].value, 4e-2, rtol=1e-2)\n\n assert len(result.trace) == result.nfev\n\n\n# TODO: add some extra covariance tests, in addition to run\n# Probably mainly if error message is OK if optimize didn't run first.\n# def test_covariance():\n\n\[email protected](\"backend\", [\"minuit\"])\ndef test_confidence(backend):\n dataset = MyDataset()\n fit = Fit(backend=backend)\n fit.optimize([dataset])\n result = fit.confidence(datasets=[dataset], parameter=\"x\")\n\n assert result[\"success\"] is True\n assert_allclose(result[\"errp\"], 1)\n assert_allclose(result[\"errn\"], 1)\n\n # Check that original value state wasn't changed\n assert_allclose(dataset.models.parameters[\"x\"].value, 2)\n\n\[email protected](\"backend\", [\"minuit\"])\ndef test_confidence_frozen(backend):\n dataset = MyDataset()\n dataset.models.parameters[\"x\"].frozen = True\n fit = Fit(backend=backend)\n fit.optimize([dataset])\n result = fit.confidence(datasets=[dataset], parameter=\"y\")\n\n assert result[\"success\"] is True\n assert_allclose(result[\"errp\"], 1)\n assert_allclose(result[\"errn\"], 1)\n\n\ndef test_stat_profile():\n dataset = MyDataset()\n fit = Fit()\n fit.run([dataset])\n dataset.models.parameters[\"x\"].scan_n_values = 3\n result = fit.stat_profile(datasets=[dataset], parameter=\"x\")\n\n assert_allclose(result[\"x_scan\"], [0, 2, 4], atol=1e-7)\n assert_allclose(result[\"stat_scan\"], [4, 0, 4], atol=1e-7)\n assert len(result[\"fit_results\"]) == 0\n\n # Check that original value state wasn't changed\n assert_allclose(dataset.models.parameters[\"x\"].value, 2)\n\n\ndef test_stat_profile_reoptimize():\n dataset = MyDataset()\n fit = Fit()\n fit.run([dataset])\n\n dataset.models.parameters[\"y\"].value = 0\n dataset.models.parameters[\"x\"].scan_n_values = 3\n result = fit.stat_profile(datasets=[dataset], parameter=\"x\", reoptimize=True)\n\n assert_allclose(result[\"x_scan\"], [0, 2, 4], atol=1e-7)\n assert_allclose(result[\"stat_scan\"], [4, 0, 4], atol=1e-7)\n assert_allclose(\n result[\"fit_results\"][0].total_stat, result[\"stat_scan\"][0], atol=1e-7\n )\n\n\ndef test_stat_surface():\n dataset = MyDataset()\n fit = Fit()\n fit.run([dataset])\n\n x_values = [1, 2, 3]\n y_values = [2e2, 3e2, 4e2]\n\n dataset.models.parameters[\"x\"].scan_values = x_values\n dataset.models.parameters[\"y\"].scan_values = y_values\n result = fit.stat_surface(datasets=[dataset], x=\"x\", y=\"y\")\n\n assert_allclose(result[\"x_scan\"], x_values, atol=1e-7)\n assert_allclose(result[\"y_scan\"], y_values, atol=1e-7)\n expected_stat = [\n [1.0001e04, 1.0000e00, 1.0001e04],\n [1.0000e04, 0.0000e00, 1.0000e04],\n [1.0001e04, 1.0000e00, 1.0001e04],\n ]\n assert_allclose(list(result[\"stat_scan\"]), expected_stat, atol=1e-7)\n assert len(result[\"fit_results\"]) == 0\n\n # Check that original value state wasn't changed\n assert_allclose(dataset.models.parameters[\"x\"].value, 2)\n assert_allclose(dataset.models.parameters[\"y\"].value, 3e2)\n\n\ndef test_stat_surface_reoptimize():\n dataset = MyDataset()\n fit = Fit()\n fit.run([dataset])\n\n x_values = [1, 2, 3]\n y_values = [2e2, 3e2, 4e2]\n\n dataset.models.parameters[\"z\"].value = 0\n dataset.models.parameters[\"x\"].scan_values = x_values\n dataset.models.parameters[\"y\"].scan_values = y_values\n\n result = fit.stat_surface(\n datasets=[dataset], x=\"x\", y=\"y\", reoptimize=True\n )\n\n assert_allclose(result[\"x_scan\"], x_values, atol=1e-7)\n assert_allclose(result[\"y_scan\"], y_values, atol=1e-7)\n expected_stat = [\n [1.0001e04, 1.0000e00, 1.0001e04],\n [1.0000e04, 0.0000e00, 1.0000e04],\n [1.0001e04, 1.0000e00, 1.0001e04],\n ]\n\n assert_allclose(list(result[\"stat_scan\"]), expected_stat, atol=1e-7)\n assert_allclose(\n result[\"fit_results\"][0][0].total_stat, result[\"stat_scan\"][0][0], atol=1e-7\n )\n\n\ndef test_stat_contour():\n dataset = MyDataset()\n dataset.models.parameters[\"x\"].frozen = True\n fit = Fit(backend=\"minuit\")\n fit.optimize([dataset])\n result = fit.stat_contour(datasets=[dataset], x=\"y\", y=\"z\")\n\n assert result[\"success\"] is True\n\n x = result[\"y\"]\n assert_allclose(len(x), 10)\n assert_allclose(x[0], 299, rtol=1e-5)\n assert_allclose(x[-1], 299.292893, rtol=1e-5)\n y = result[\"z\"]\n assert_allclose(len(y), 10)\n assert_allclose(y[0], 0.04, rtol=1e-5)\n assert_allclose(y[-1], 0.747107, rtol=1e-5)\n\n # Check that original value state wasn't changed\n assert_allclose(dataset.models.parameters[\"y\"].value, 300)\n" ]
[ [ "numpy.testing.assert_allclose" ] ]
Lexelius/contrast
[ "ef7d6d8c51fb922e89c1c46db734e3c09f88a9fc" ]
[ "beamlines/nanomax/macro_attenuate.py" ]
[ "\"\"\"\nModule providing a macro to automatically absorb X percent of the\nbeam using the absorbers at the NanoMAX beamline\n\"\"\"\n\nimport os\nimport numpy as np\nfrom contrast.environment import env, macro, register_shortcut, runCommand\n\n# ToDo\n# - avoid elements with absorption edges close to the current energy\n# - way of printing the closest possible absorption values\n\n@macro\nclass attenuate(object):\n \"\"\"\n Sets the attenuators to absorb X percent of the beam depending \n on the current photon beam enegery.\n\n usage / examples:\n %attenuate # show current attenuator setting / value\n %attenuate 0.2 # attenuate to 20% beam intensity\n %attenuate 0.1 ['Si','Al'] # attenuate to 10% but only use Si and Al\n # ['Al','Ti','Si','Cu','Fe','Mo','Ta','Ag'] \n %attenuate 0.2 how='unsafe' # attenuate to 20% beam intensity without\n # manually confirming the motor movement\n # ... for the usement in macros\n \"\"\"\n\n ############################################################################\n # absorber settings at the NanoMAX beamline - status 2019-10-06 \n ############################################################################\n\n elements = ['Al', 'Ti', 'Si', 'Cu', None, 'Fe', 'Mo', 'Ta', 'Ag']\n position = [-40000, -29000, -18000, -9000, 0, 11000, 21000, 33000, 41000]\n carriers = ['attenuator1_x', 'attenuator2_x', \n 'attenuator3_x', 'attenuator4_x'] \n thickness = [[ 25, 50, 100, 100], # in um\n [ 20, 40, 80, 160],\n [ 250, 500, 1000, 1000],\n [ 20, 40, 80, 160],\n [ 0, 0, 0, 0],\n [ 50, 100, 200, 400],\n [ 15, 30, 60, 120],\n [ 20, 40, 80, 160],\n [ 25, 50, 100, 200]]\n thickness = np.array(thickness)\n\n ############################################################################\n # loading offline data between 5 and 25 keV \n # taken from http://henke.lbl.gov/optical_constants/filter2.html\n ############################################################################\n\n absorption_data = {}\n base = os.path.dirname(os.path.realpath(__file__))\n base += '/attenuation/attenuation_1um_'\n for element in [x for x in elements if not(x==None)]:\n fpath = base + element + '.txt'\n data = np.loadtxt(fpath, skiprows=2)\n absorption_data[element] = data\n\n ############################################################################\n # methods\n ############################################################################\n\n def __init__(self, attenuate_to=None, use_ele=['Al', 'Ti', 'Si', 'Cu', 'Fe', 'Mo', 'Ta', 'Ag'], how='safe', verbosity=3):\n self.attenuate_to = attenuate_to\n self.how = how\n self.verbosity = verbosity\n self.use_ele = use_ele\n self.use_ele.append(None)\n\n def get_current_energy(self):\n runCommand('wms energy')\n #print(env)\n self.photon_energy = env.lastMacroResult\n\n def calculate_transmission_of_1um(self):\n # linear interpolation of T(E) in log log \n self.transmission_1um = {}\n for element in [x for x in self.elements if not(x==None)]:\n T_log = np.interp(x = np.log(self.photon_energy),\n xp = np.log(self.absorption_data[element][:,0]),\n fp = np.log(self.absorption_data[element][:,1]))\n self.transmission_1um[element] = np.exp(T_log)\n\n def calculate_transmission_of_actual_foils(self):\n self.transmission = 1.*np.ones_like(self.thickness)\n for i, element in enumerate(self.elements):\n for j, carrier in enumerate(self.carriers):\n if not(element==None):\n d_um = self.thickness[i,j]\n T = (self.transmission_1um[element])**d_um\n self.transmission[i,j] = 1.*T\n\n def calcualte_possible_permutations(self):\n self.T_tot = [[T1*T2*T3*T4, i1, i2, i3, i4, \n [self.elements[i1], self.elements[i2], \n self.elements[i3], self.elements[i4]]] \n for i1, T1 in enumerate(self.transmission[:,0]) \n for i2, T2 in enumerate(self.transmission[:,1]) \n for i3, T3 in enumerate(self.transmission[:,2]) \n for i4, T4 in enumerate(self.transmission[:,3]) ] \n self.T_tot = np.array(self.T_tot)\n self.T_tot = self.T_tot[np.argsort(self.T_tot[:,0])]\n\n def run_command(self, command):\n runCommand(command)\n\n def get_current_carrier_positions(self):\n carrier_positions = []\n for carrier in sorted(self.carriers):\n runCommand('wms '+carrier)\n carrier_positions.append(env.lastMacroResult)\n return np.array(carrier_positions)\n\n def estiamte_carrier_index(self, position):\n array = np.asarray(self.position)\n idx = (np.abs(array - position)).argmin()\n return idx\n\n def show_current_attenuation(self, printing=True):\n carrier_positions = self.get_current_carrier_positions()\n carrier_indices = np.array([self.estiamte_carrier_index(pos) for \n pos in carrier_positions])\n self.get_current_energy()\n self.calculate_transmission_of_1um()\n self.calculate_transmission_of_actual_foils()\n self.T_currently = 1\n for j, i in enumerate(carrier_indices):\n self.T_currently *= self.transmission[i,j]\n \n if printing:\n print('currently:')\n print(' absorption ', str(1-self.T_currently))\n print(' transmission', str(self.T_currently))\n print('with:')\n for i_carrier, i_pos in enumerate(carrier_indices):\n i_pos = int(i_pos)\n line = ' ' + self.carriers[i_carrier]\n line += ' '+ str(carrier_positions[i_carrier]).rjust(10)\n line += ' #' + str(self.thickness[i_pos, i_carrier]).rjust(5)\n line += ' um of ' + str(self.elements[i_pos])\n print(line)\n\n def input_validation(self):\n if self.attenuate_to == None:\n self.show_current_attenuation()\n return False\n elif not(isinstance(self.attenuate_to, (int, float))):\n print('no number given as attenuation value')\n return False\n else:\n return True\n\n def check_possible_permutations_for_elements(self):\n self.T_allowed = []\n for permutation in self.T_tot:\n works = 0\n for have_to_use in permutation[5]:\n if have_to_use in self.use_ele: works+=1\n if works == len(permutation[5]):\n self.T_allowed.append(permutation)\n self.T_allowed = np.array(self.T_allowed)\n\n def run(self):\n if self.input_validation():\n\n self.get_current_energy()\n self.calculate_transmission_of_1um()\n self.calculate_transmission_of_actual_foils()\n self.calcualte_possible_permutations()\n self.check_possible_permutations_for_elements()\n\n self.T_min = 1.*self.T_allowed[0,0]\n\n try:\n if self.attenuate_to == 'max':\n print('choosing maximal possible attenuation')\n self.T_choosen = 1.*self.T_allowed[0,:]\n self.attenuate_to = 1.-self.T_choosen[0]\n\n # is the choosen absorption value reachable?\n elif ((self.attenuate_to > 1) or \n (round(1-self.T_min,3 ) <= self.attenuate_to)):\n print('absorption of', self.attenuate_to, \n 'cannot be reached')\n print('instead choosing maximum possible attenuation')\n self.T_choosen = 1.*self.T_allowed[0,:]\n\n # which combination gives the closest result?\n else:\n self.T_choosen = list(filter(lambda i: i[0] <= 1-self.attenuate_to, \n self.T_allowed))[-1]\n except ValueError:\n print(\"Oops! That was no valid input\")\n\n # get needed mv motor commands\n commands = []\n for i_carrier, i_pos in enumerate(self.T_choosen[1:1+len(self.carriers)]):\n i_pos = int(i_pos)\n command = 'mv ' + str(self.carriers[i_carrier])\n command += ' ' + str(self.position[i_pos]).ljust(8)\n commands.append(command)\n\n # print an output\n if self.verbosity>=3 or self.how=='safe':\n print('aimed for:')\n print(' absorption ', self.attenuate_to)\n print(' transmission', max(0, 1-self.attenuate_to))\n print(' at currently', self.photon_energy, 'eV')\n print('can achieve:')\n print(' absorption ', str(1-self.T_choosen[0]))\n print(' transmission', str(self.T_choosen[0]))\n print('with motor setting:')\n\n for i_carrier, i_pos in enumerate(self.T_choosen[1:1+len(self.carriers)]):\n i_pos = int(i_pos)\n line = ' ' + commands[i_carrier]\n line += '#' + str(self.thickness[i_pos, i_carrier]).rjust(5)\n line += ' um of ' + str(self.elements[i_pos])\n print(line)\n\n # move motors\n if self.how=='safe':\n yes = ['yes', 'y', '1', 'true']\n user_input = input('Proceed to move motors? [Y/n] ').lower()\n if user_input in yes:\n\n # run all motor movement commands\n for command in commands: self.run_command(command)\n\n # check that the motors have moved to the calculated position\n self.show_current_attenuation(printing=False)\n if self.T_currently != self.T_choosen[0]:\n print('\\x1b[0;49;91[ERROR] mattenuation was NOT set\\x1b[0m')\n else:\n print('\\x1b[0;49;92msuccessfully set the attenuation\\x1b[0m')\n\n else: \n for command in commands: self.run_command(command)\n" ]
[ [ "numpy.ones_like", "numpy.asarray", "numpy.exp", "numpy.argsort", "numpy.abs", "numpy.log", "numpy.array", "numpy.loadtxt" ] ]
martcous/dipy
[ "6bff5655f03db19bde5aa951ffb91987983a889b" ]
[ "dipy/reconst/tests/test_peak_finding.py" ]
[ "from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport numpy.testing as npt\nfrom dipy.reconst.recspeed import (local_maxima, remove_similar_vertices,\n search_descending)\nfrom dipy.data import get_sphere, get_data\nfrom dipy.core.sphere import unique_edges, HemiSphere\nfrom dipy.sims.voxel import all_tensor_evecs\n\n\ndef test_local_maxima():\n sphere = get_sphere('symmetric724')\n vertices, faces = sphere.vertices, sphere.faces\n edges = unique_edges(faces)\n\n # Check that the first peak is == max(odf)\n odf = abs(vertices.sum(-1))\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_equal(max(odf), peak_values[0])\n npt.assert_equal(max(odf), odf[peak_index[0]])\n\n # Create an artificial odf with a few peaks\n odf = np.zeros(len(vertices))\n odf[1] = 1.\n odf[143] = 143.\n odf[505] = 505.\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_array_equal(peak_values, [505, 143, 1])\n npt.assert_array_equal(peak_index, [505, 143, 1])\n\n # Check that neighboring points can both be peaks\n odf = np.zeros(len(vertices))\n point1, point2 = edges[0]\n odf[[point1, point2]] = 1.\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_array_equal(peak_values, [1., 1.])\n npt.assert_(point1 in peak_index)\n npt.assert_(point2 in peak_index)\n\n # Repeat with a hemisphere\n hemisphere = HemiSphere(xyz=vertices, faces=faces)\n vertices, edges = hemisphere.vertices, hemisphere.edges\n\n # Check that the first peak is == max(odf)\n odf = abs(vertices.sum(-1))\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_equal(max(odf), peak_values[0])\n npt.assert_equal(max(odf), odf[peak_index[0]])\n\n # Create an artificial odf with a few peaks\n odf = np.zeros(len(vertices))\n odf[1] = 1.\n odf[143] = 143.\n odf[300] = 300.\n peak_value, peak_index = local_maxima(odf, edges)\n npt.assert_array_equal(peak_value, [300, 143, 1])\n npt.assert_array_equal(peak_index, [300, 143, 1])\n\n # Check that neighboring points can both be peaks\n odf = np.zeros(len(vertices))\n point1, point2 = edges[0]\n odf[[point1, point2]] = 1.\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_array_equal(peak_values, [1., 1.])\n npt.assert_(point1 in peak_index)\n npt.assert_(point2 in peak_index)\n\n # Should raise an error if odf has nans\n odf[20] = np.nan\n npt.assert_raises(ValueError, local_maxima, odf, edges)\n\n # Should raise an error if edge values are too large to index odf\n edges[0, 0] = 9999\n odf[20] = 0\n npt.assert_raises(IndexError, local_maxima, odf, edges)\n\n\ndef test_remove_similar_peaks():\n vertices = np.array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.],\n [1.1, 1., 0.],\n [0., 2., 1.],\n [2., 1., 0.],\n [1., 0., 0.]])\n norms = np.sqrt((vertices*vertices).sum(-1))\n vertices = vertices/norms[:, None]\n\n # Return unique vertices\n uv = remove_similar_vertices(vertices, .01)\n npt.assert_array_equal(uv, vertices[:6])\n\n # Return vertices with mapping and indices\n uv, mapping, index = remove_similar_vertices(vertices, .01,\n return_mapping=True,\n return_index=True)\n npt.assert_array_equal(uv, vertices[:6])\n npt.assert_array_equal(mapping, list(range(6)) + [0])\n npt.assert_array_equal(index, range(6))\n\n # Test mapping with different angles\n uv, mapping = remove_similar_vertices(vertices, .01, return_mapping=True)\n npt.assert_array_equal(uv, vertices[:6])\n npt.assert_array_equal(mapping, list(range(6)) + [0])\n uv, mapping = remove_similar_vertices(vertices, 30, return_mapping=True)\n npt.assert_array_equal(uv, vertices[:4])\n npt.assert_array_equal(mapping, list(range(4)) + [1, 0, 0])\n uv, mapping = remove_similar_vertices(vertices, 60, return_mapping=True)\n npt.assert_array_equal(uv, vertices[:3])\n npt.assert_array_equal(mapping, list(range(3)) + [0, 1, 0, 0])\n\n # Test index with different angles\n uv, index = remove_similar_vertices(vertices, .01, return_index=True)\n npt.assert_array_equal(uv, vertices[:6])\n npt.assert_array_equal(index, range(6))\n uv, index = remove_similar_vertices(vertices, 30, return_index=True)\n npt.assert_array_equal(uv, vertices[:4])\n npt.assert_array_equal(index, range(4))\n uv, index = remove_similar_vertices(vertices, 60, return_index=True)\n npt.assert_array_equal(uv, vertices[:3])\n npt.assert_array_equal(index, range(3))\n\n\ndef test_search_descending():\n a = np.linspace(10., 1., 10)\n\n npt.assert_equal(search_descending(a, 1.), 1)\n npt.assert_equal(search_descending(a, .89), 2)\n npt.assert_equal(search_descending(a, .79), 3)\n\n # Test small array\n npt.assert_equal(search_descending(a[:1], 1.), 1)\n npt.assert_equal(search_descending(a[:1], 0.), 1)\n npt.assert_equal(search_descending(a[:1], .5), 1)\n\n # Test very small array\n npt.assert_equal(search_descending(a[:0], 1.), 0)\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n" ]
[ [ "numpy.testing.assert_raises", "numpy.testing.assert_array_equal", "numpy.array", "numpy.linspace", "numpy.testing.assert_" ] ]
Prakhar-Bhartiya/SentimentAnalysis
[ "8fa2664a57b01e7303ef26d1226a81c0e25be4b7" ]
[ "preprocessing.py" ]
[ "\"\"\"\nDATA DESCRIPTION\n\nsentiment140 dataset. It contains 1,600,000 tweets extracted using the twitter api . The tweets have been annotated (0 = negative, 4 = positive) and they can be used to detect sentiment .\n\nIt contains the following 6 fields:\n\ntarget: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive)\nids: The id of the tweet ( 2087)\ndate: the date of the tweet (Sat May 16 23:58:44 UTC 2009)\nflag: The query (lyx). If there is no query, then this value is NO_QUERY.\nuser: the user that tweeted (robotickilldozr)\ntext: the text of the tweet (Lyx is cool)\n\n\"\"\"\n\n#import libraries\nimport pandas as pd\n\ndata = pd.read_csv('training.1600000.processed.noemoticon.csv',encoding = 'latin', header=None, nrows=25)\n\n#Adding header to data\ndata = data.rename(columns={0: 'target', 1: 'id', 2: 'TimeStamp', 3: 'query', 4: 'username', 5: 'content'})\n\n#Dropping unncessary columns\ndata.drop(['id','TimeStamp','query'], axis=1, inplace=True)\n\n\nprint(data.to_string())\n" ]
[ [ "pandas.read_csv" ] ]
chrisjonesBSU/fresnel
[ "92e17346899a78b68af9bc8006a6bec95e3476cc" ]
[ "fresnel/__init__.py" ]
[ "# Copyright (c) 2016-2021 The Regents of the University of Michigan\n# Part of fresnel, released under the BSD 3-Clause License.\n\n\"\"\"The fresnel ray tracing package.\"\"\"\n\nimport os\nimport numpy\n\nfrom . import geometry # noqa: F401 - ignore unused import\nfrom . import tracer\nfrom . import camera\nfrom . import color # noqa: F401 - ignore unused import (users will use)\nfrom . import light\nfrom . import version # noqa: F401 - ignore unused import (users will use)\n\nfrom . import _common\nif _common.cpu_built():\n from . import _cpu\nif _common.gpu_built():\n from . import _gpu\n\n\nclass Device(object):\n \"\"\"Hardware device to use for ray tracing.\n\n Args:\n mode (str): Specify execution mode: Valid values are ``auto``, ``gpu``,\n and ``cpu``.\n n (int): Specify the number of cpu threads / GPUs this device will use.\n *None* will use all available threads / devices.\n\n `Device` defines hardware device to use for ray tracing. `Scene` and\n `Tracer` instances must be attached to a `Device`. You may attach any number\n of scenes and tracers to a single `Device`.\n\n See Also:\n Tutorials:\n\n - :doc:`examples/02-Advanced-topics/01-Devices`\n - :doc:`examples/02-Advanced-topics/02-Tracer-methods`\n\n When mode is ``auto``, the default, `Device` will select GPU rendering if\n available and fall back on CPU rendering if not. Set mode to ``gpu`` or\n ``cpu`` to force a specific mode.\n\n Important:\n By default (``n==None``), this device will use all available GPUs or CPU\n cores. Set *n* to the number of GPUs or CPU cores this device should\n use. When selecting *n* GPUs, the device selects the first *n* in the\n `available_gpus` list.\n\n Tip:\n Use only a single `Device` to reduce memory consumption.\n\n The static member `available_modes` lists which modes are available. For a\n mode to be available, the corresponding module must be enabled at compile\n time. Additionally, there must be at least one GPU present for the ``gpu``\n mode to be available.\n\n .. code-block:: python\n\n >>> fresnel.Device.available_modes\n ['gpu', 'cpu', 'auto']\n \"\"\"\n\n available_modes = []\n \"\"\"list[str]: Available execution modes.\"\"\"\n\n available_gpus = []\n \"\"\"list[str]: Available GPUS.\"\"\"\n\n def __init__(self, mode='auto', n=None):\n # determine the number of available GPUs\n num_gpus = 0\n if _common.gpu_built():\n num_gpus = _gpu.get_num_available_devices()\n\n # determine the selected mode\n selected_mode = ''\n\n if mode == 'auto':\n if num_gpus > 0:\n selected_mode = 'gpu'\n else:\n selected_mode = 'cpu'\n if not _common.cpu_built():\n raise RuntimeError(\"No GPUs available AND CPU \"\n \"implementation is not compiled\")\n\n if mode == 'gpu':\n if not _common.gpu_built():\n raise RuntimeError(\"GPU implementation is not compiled\")\n if num_gpus == 0:\n raise RuntimeError(\"No GPUs are available\")\n selected_mode = 'gpu'\n\n if mode == 'cpu':\n if not _common.cpu_built():\n raise RuntimeError(\"CPU implementation is not compiled\")\n selected_mode = 'cpu'\n\n if n is None:\n thread_limit = -1\n else:\n thread_limit = int(n)\n\n # initialize the device\n if selected_mode == 'gpu':\n self.module = _gpu\n self._device = _gpu.Device(\n os.path.dirname(os.path.realpath(__file__)), thread_limit)\n self._mode = 'gpu'\n elif selected_mode == 'cpu':\n self.module = _cpu\n self._device = _cpu.Device(thread_limit)\n self._mode = 'cpu'\n else:\n raise ValueError(\"Invalid mode\")\n\n @property\n def mode(self):\n \"\"\"str: The active mode.\"\"\"\n return self._mode\n\n def __str__(self):\n \"\"\"Human readable `Device` summary.\"\"\"\n return '<fresnel.Device: ' + self._device.describe() + '>'\n\n\n# determine available Device modes\nif _common.gpu_built():\n if _gpu.get_num_available_devices() > 0:\n Device.available_modes.append('gpu')\n\nif _common.cpu_built():\n Device.available_modes.append('cpu')\n\nif len(Device.available_modes) > 0:\n Device.available_modes.append('auto')\n\n# determine available Device GPUs\nif _common.gpu_built():\n gpus_str = _gpu.Device.getAllGPUs()\n gpus_list = gpus_str.split('\\n')\n if len(gpus_list) >= 2:\n Device.available_gpus = gpus_list[:-1]\n\n\nclass Scene(object):\n \"\"\"Content of the scene to ray trace.\n\n Args:\n device (Device): Device to use when rendering the scene.\n\n camera (camera.Camera): Camera to view the scene. When `None`,\n defaults to::\n\n camera.Orthographic(position=(0, 0, 100),\n look_at=(0, 0, 0),\n up=(0, 1, 0),\n height=100)\n\n lights (list[Light]): Lights to light the scene. When `None`, defaults\n to: ``light.rembrandt()``\n\n `Scene` defines the contents of the scene to be traced, including any number\n of `Geometry` objects, the `Camera`, the `background_color`,\n `background_alpha`, and `lights`.\n\n Every `Scene` must be associated with a `Device`. For convenience, `Scene`\n creates a default `Device` when *device* is ``None``.\n\n See Also:\n Tutorials:\n\n - :doc:`examples/00-Basic-tutorials/00-Introduction`\n - :doc:`examples/00-Basic-tutorials/04-Scene-properties`\n - :doc:`examples/00-Basic-tutorials/05-Lighting-setups`\n - :doc:`examples/02-Advanced-topics/01-Devices`\n \"\"\"\n\n def __init__(self, device=None, camera=None, lights=None):\n if device is None:\n device = Device()\n\n self._device = device\n self._scene = self.device.module.Scene(self.device._device)\n self.geometry = []\n if camera is None:\n self.camera = globals()['camera'].Orthographic(position=(0, 0, 100),\n look_at=(0, 0, 0),\n up=(0, 1, 0),\n height=100)\n else:\n self.camera = camera\n\n if lights is None:\n self.lights = light.rembrandt()\n else:\n self.lights = lights\n\n self._tracer = None\n\n def get_extents(self):\n \"\"\"Get the extents of the scene.\n\n Returns:\n (3,2) `numpy.ndarray` of ``numpy.float32``: The lower left and\\\n upper right corners of the scene.\n \"\"\"\n if len(self.geometry) == 0:\n return numpy.array([[0, 0, 0], [0, 0, 0]], dtype=numpy.float32)\n\n scene_extents = self.geometry[0].get_extents()\n for geom in self.geometry[1:]:\n extents = geom.get_extents()\n scene_extents[0, :] = numpy.min(\n [scene_extents[0, :], extents[0, :]], axis=0)\n scene_extents[1, :] = numpy.max(\n [scene_extents[1, :], extents[1, :]], axis=0)\n\n return scene_extents\n\n @property\n def device(self):\n \"\"\"Device: Device this `Scene` is attached to.\"\"\"\n return self._device\n\n @property\n def camera(self):\n \"\"\"camera.Camera: Camera view parameters.\"\"\"\n return camera._from_cpp(self._scene.getCamera())\n\n @camera.setter\n def camera(self, value):\n if isinstance(value, camera.Camera):\n self._scene.setCamera(value._camera)\n else:\n raise TypeError(f\"camera {value} is not a fresnel.camera.Camera\")\n\n @property\n def background_color(self):\n \"\"\"((3, ) `numpy.ndarray` of ``numpy.float32``): Background color \\\n linear RGB.\n\n Note:\n Use `fresnel.color.linear` to convert standard sRGB colors into the\n linear color space used by fresnel.\n \"\"\"\n c = self._scene.getBackgroundColor()\n return numpy.array([c.r, c.g, c.b], dtype=numpy.float32)\n\n @background_color.setter\n def background_color(self, value):\n self._scene.setBackgroundColor(_common.RGBf(*value))\n\n @property\n def background_alpha(self):\n \"\"\"float: Background alpha (opacity) in the range [0,1].\"\"\"\n return self._scene.getBackgroundAlpha()\n\n @background_alpha.setter\n def background_alpha(self, value):\n self._scene.setBackgroundAlpha(value)\n\n @property\n def lights(self):\n \"\"\"list[Light]: Lights in the scene.\n\n `lights` is a sequence of up to 4 directional lights that apply to the\n scene. Each light has a direction, color, and size.\n \"\"\"\n return light._LightListProxy(self._scene.getLights())\n\n @lights.setter\n def lights(self, values):\n tmp = light._LightListProxy()\n for v in values:\n tmp.append(v)\n\n self._scene.setLights(tmp._lights)\n\n\ndef preview(scene, w=600, h=370, anti_alias=True):\n \"\"\"Preview a scene.\n\n Args:\n scene (`Scene`): Scene to render.\n w (int): Output image width (in pixels).\n h (int): Output image height (in pixels).\n anti_alias (bool): Whether to perform anti-aliasing.\n\n :py:func:`preview` is a shortcut that renders output with `tracer.Preview`.\n \"\"\"\n t = tracer.Preview(scene.device, w=w, h=h, anti_alias=anti_alias)\n return t.render(scene)\n\n\ndef pathtrace(scene, w=600, h=370, samples=64, light_samples=1):\n \"\"\"Path trace a scene.\n\n Args:\n scene (`Scene`): Scene to render.\n w (int): Output image width (in pixels).\n h (int): Output image height (in pixels).\n samples (int): Number of times to sample the pixels of the scene.\n\n light_samples (int): Number of light samples to take for each pixel\n sample.\n\n :py:func:`pathtrace` is a shortcut that renders output with `tracer.Path`.\n \"\"\"\n t = tracer.Path(scene.device, w=w, h=h)\n t.sample(scene, samples=samples, light_samples=light_samples)\n return t.output\n" ]
[ [ "numpy.array", "numpy.max", "numpy.min" ] ]
brokenegg/transformer
[ "c402ccffd6be1e01c589ad2b9064a5837d4464c7" ]
[ "brokenegg_transformer/modeling/tf_utils.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common TF utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\nimport tensorflow as tf\n\nfrom tensorflow.python.util import deprecation\nfrom brokenegg_transformer.modeling import activations\n\n\[email protected](\n None,\n \"tf.keras.layers.Layer supports multiple positional args and kwargs as \"\n \"input tensors. pack/unpack inputs to override __call__ is no longer \"\n \"needed.\"\n)\ndef pack_inputs(inputs):\n \"\"\"Pack a list of `inputs` tensors to a tuple.\n\n Args:\n inputs: a list of tensors.\n\n Returns:\n a tuple of tensors. if any input is None, replace it with a special constant\n tensor.\n \"\"\"\n inputs = tf.nest.flatten(inputs)\n outputs = []\n for x in inputs:\n if x is None:\n outputs.append(tf.constant(0, shape=[], dtype=tf.int32))\n else:\n outputs.append(x)\n return tuple(outputs)\n\n\[email protected](\n None,\n \"tf.keras.layers.Layer supports multiple positional args and kwargs as \"\n \"input tensors. pack/unpack inputs to override __call__ is no longer \"\n \"needed.\"\n)\ndef unpack_inputs(inputs):\n \"\"\"unpack a tuple of `inputs` tensors to a tuple.\n\n Args:\n inputs: a list of tensors.\n\n Returns:\n a tuple of tensors. if any input is a special constant tensor, replace it\n with None.\n \"\"\"\n inputs = tf.nest.flatten(inputs)\n outputs = []\n for x in inputs:\n if is_special_none_tensor(x):\n outputs.append(None)\n else:\n outputs.append(x)\n x = tuple(outputs)\n\n # To trick the very pointless 'unbalanced-tuple-unpacking' pylint check\n # from triggering.\n if len(x) == 1:\n return x[0]\n return tuple(outputs)\n\n\ndef is_special_none_tensor(tensor):\n \"\"\"Checks if a tensor is a special None Tensor.\"\"\"\n return tensor.shape.ndims == 0 and tensor.dtype == tf.int32\n\n\n# TODO(hongkuny): consider moving custom string-map lookup to keras api.\ndef get_activation(identifier):\n \"\"\"Maps a identifier to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n It checks string first and if it is one of customized activation not in TF,\n the corresponding activation will be returned. For non-customized activation\n names and callable identifiers, always fallback to tf.keras.activations.get.\n\n Args:\n identifier: String name of the activation function or callable.\n\n Returns:\n A Python function corresponding to the activation function.\n \"\"\"\n if isinstance(identifier, six.string_types):\n name_to_fn = {\n \"gelu\": activations.gelu,\n \"simple_swish\": activations.simple_swish,\n \"hard_swish\": activations.hard_swish,\n \"identity\": activations.identity,\n }\n identifier = str(identifier).lower()\n if identifier in name_to_fn:\n return tf.keras.activations.get(name_to_fn[identifier])\n return tf.keras.activations.get(identifier)\n\n\ndef get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape\n\n\ndef assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n raise ValueError(\n \"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not \"\n \"equal to the expected tensor rank `%s`\" %\n (name, actual_rank, str(tensor.shape), str(expected_rank)))\n" ]
[ [ "tensorflow.python.util.deprecation.deprecated", "tensorflow.shape", "tensorflow.nest.flatten", "tensorflow.constant", "tensorflow.keras.activations.get" ] ]
TammoR/LogicalFactorisationMachines
[ "55bd94001f2852ea61f69cbb07a0cbdb41231028" ]
[ "lom/_numba/lom_outputs_fuzzy.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nOutput functions for logical operator machine products\n\"\"\"\n\nimport numpy as np\nfrom numba import jit, prange # int8, float64,\n\n# fuzzy output functions mapping from scalar vectors of probabilities to\n# to single data-point\n\n\n# OR-AND\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_AND_product_fuzzy(Z_n, U_d):\n \"\"\"\n Compute probability of emitting a zero for fuzzy vectors under OR-AND logic.\n \"\"\"\n out = 1\n for l in range(Z_n.shape[0]):\n out *= 1 - Z_n[l] * U_d[l]\n return 1 - out # map to [-1,1], this is a shortcut here. it's correct.\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_AND_product_fuzzy_3d(Z_n, U_d, V_m):\n \"\"\"\n Compute probability of emitting a zero for fuzzy vectors under OR-AND logic.\n \"\"\"\n out = np.float64(1.0)\n for l in range(Z_n.shape[0]):\n out *= 1 - ( Z_n[l] * U_d[l] * V_m[l] )\n return 1 - out\n\n\n# XOR-AND\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True, parallel=False)\ndef XOR_AND_product_fuzzy(Z_n, U_d):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= 1 - Z_n[l_prime] * U_d[l_prime]\n out += Z_n[l] * U_d[l] * temp\n return out\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_AND_product_fuzzy_3d(Z_n, U_d, V_m):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= 1 - Z_n[l_prime] * U_d[l_prime] * V_m[l_prime]\n out += Z_n[l] * U_d[l] * V_m[l] * temp\n return out\n\n\n# XOR-NAND\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_NAND_product_fuzzy(Z_n, U_d):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= Z_n[l_prime] * U_d[l_prime]\n out += (1 - Z_n[l] * U_d[l]) * temp\n return out\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_NAND_product_fuzzy_3d(Z_n, U_d, V_m):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= Z_n[l_prime] * U_d[l_prime] * V_m[l_prime]\n out += (1 - Z_n[l] * U_d[l] * V_m[l]) * temp\n return out\n\n\n# OR-XOR\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_XOR_product_fuzzy(Z_n, U_d):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= (Z_n[l] * U_d[l]) + (1 - Z_n[l]) * (1 - U_d[l])\n return 1 - temp\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_XOR_product_fuzzy_3d(Z_n, U_d, V_m):\n temp = np.float64(1)\n # this is hard to generalise to arbitrary D\n for l in range(Z_n.shape[0]):\n temp *= 1 - Z_n[l] * (1 - U_d[l]) * (1 - V_m[l]) +\\\n U_d[l] * (1 - V_m[l]) * (1 - Z_n[l]) +\\\n V_m[l] * (1 - Z_n[l]) * (1 - U_d[l])\n return 1 - temp\n\n\n# NAND-XOR\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef NAND_XOR_product_fuzzy(Z_n, U_d):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= Z_n[l] * (1 - U_d[l]) + U_d[l] * (1 - Z_n[l])\n return 1 - temp\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef NAND_XOR_product_fuzzy_3d(Z_n, U_d, V_m):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= Z_n[l] * (1 - U_d[l]) * (1 - V_m[l]) +\\\n V_m[l] * (1 - Z_n[l]) * (1 - U_d[l]) +\\\n U_d[l] * (1 - V_m[l]) * (1 - Z_n[l])\n\n return 1 - temp\n\n\n# XOR_XOR\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_XOR_product_fuzzy(Z_n, U_d):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= Z_n[l_prime] * U_d[l_prime] +\\\n (1 - Z_n[l_prime]) * (1 - U_d[l_prime])\n out += temp * ((1 - Z_n[l]) * U_d[l] + (1 - U_d[l]) * Z_n[l])\n return out\n\n\n@jit('float64(float64, float64, float64)', nogil=True, nopython=True)\ndef p_XOR_fuzzy_3d(z, u, v):\n \"\"\"\n Compute XOR probability given p(x), p(u), p(z)\n \"\"\"\n return 3 * z * u * v - 2 * (z * u + u * v + z * v) + z + u + v\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_XOR_product_fuzzy_3d(Z_n, U_d, V_m):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= 1 - p_XOR_fuzzy_3d(\n Z_n[l_prime], U_d[l_prime], V_m[l_prime])\n out += temp * p_XOR_fuzzy_3d(Z_n[l], U_d[l], V_m[l])\n return out\n\n\n# XOR_NXOR\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_NXOR_product_fuzzy(Z_n, U_d):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= Z_n[l_prime] * (1 - U_d[l_prime]) +\\\n (1 - Z_n[l_prime]) * U_d[l_prime]\n out += temp * ((Z_n[l] * U_d[l]) + (1 - U_d[l]) * (1 - Z_n[l]))\n return out\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_NXOR_product_fuzzy_3d(Z_n, U_d, V_m):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= p_XOR_fuzzy_3d(\n Z_n[l_prime], U_d[l_prime], V_m[l_prime])\n out += temp * (1 - p_XOR_fuzzy_3d(Z_n[l], U_d[l], V_m[l]))\n return out\n\n\n# OR_NAND\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_NAND_product_fuzzy(Z_n, U_d):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= Z_n[l] * U_d[l]\n return 1 - temp\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_NAND_product_fuzzy_3d(Z_n, U_d, V_m):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= Z_n[l] * U_d[l] * V_m[l]\n return 1 - temp\n\n\n# MAX_AND\n@jit('float64[:, :](float64[:, :], float64[:, :], float64[:])',\n nogil=True, nopython=False, parallel=True)\ndef MAX_AND_product_fuzzy(Z, U, lbdas):\n N = Z.shape[0]\n D = U.shape[0]\n L = Z.shape[1]\n out = np.zeros([N, D]) # , dtype=np.float)\n for n in prange(N):\n for d in range(D):\n acc = 0 # accumulator for sum\n for l1 in range(L):\n temp1 = Z[n, l1] * U[d, l1] * lbdas[l1]\n # check for explaining away\n prod = 1\n for l2 in range(L):\n if l1 == l2:\n continue\n temp2 = Z[n, l2] * U[d, l2]\n if temp2 * lbdas[l2] > temp1:\n prod *= 1 - temp2\n acc += temp1 * prod\n out[n, d] = acc\n return out \n\n" ]
[ [ "numpy.float64", "numpy.zeros" ] ]
ipmach/Thesis2021
[ "91dbb0eebba64f1fa2c18562e2c9f35f532ef7c0" ]
[ "src/python_code/Models/model_PAE_CNN.py" ]
[ "from Models.PAE_models.Encoder_CNN import EncoderCNN\nfrom Models.PAE_models.Decoder_CNN import DecoderCNN\nfrom Models.PAE_models.Bijecter import RealNVP\nfrom sklearn.preprocessing import MinMaxScaler\nfrom Models.AE_CNN_interface import AE_CNN\nimport joblib\n\n\nclass PAECNN(AE_CNN):\n\n def __init__(self, filters, dim, switch=False, **kwargs):\n \"\"\"\n Wrapper for the Probabilistic AutoEncoder (PAE)\n :param dim: hyperparameters of the model [h_dim, z_dim, real_dim]\n \"\"\"\n # Define the hyperparameters\n super(PAECNN, self).__init__(filters, dim, **kwargs)\n # Initialize the models\n self.encoder_ = EncoderCNN(filters, self.z_dim)\n self.decoder_ = DecoderCNN(filters)\n self.b = RealNVP(num_coupling_layers=6, z_dim=self.z_dim)\n self.scaler = MinMaxScaler()\n # Use bijecter in incode or not\n self.switch = switch\n\n self.encoder_.build(input_shape=(None, self.img_shape[0],\n self.img_shape[1], self.img_shape[2]))\n self.decoder_.build(input_shape=(None, self.z_dim))\n self.b.build(input_shape=(None, self.z_dim))\n self.encode_ = self.encoder_\n\n def load_weights_model(self, list_path):\n \"\"\"\n Load the weights of the model\n :param path_encoder: path of the encoder weights (.h5)\n :param path_decoder: path of the decoder weights (.h5)\n :param path_discriminator: path of the discriminator weights (.h5)\n :param path_scaler: path scaler (.pkl)\n :return:\n \"\"\"\n [path_encoder, path_decoder, path_discriminator, path_scaler] = list_path\n self.encoder_.load_weights(path_encoder)\n self.decoder_.load_weights(path_decoder)\n self.b.load_weights(path_discriminator)\n self.scaler = joblib.load(path_scaler)\n\n def save_weights_model(self, list_path):\n \"\"\"\n Save the weights of the model\n \"\"\"\n [path_encoder, path_decoder, path_discriminator, path_scaler] = list_path\n self.encoder_.save_weights(path_encoder)\n self.decoder_.save_weights(path_decoder)\n self.b.save_weights(path_discriminator)\n joblib.dump(self.scaler, path_scaler + 'scaler.pkl')\n\n def encode(self, x):\n \"\"\"\n Encode input\n :param x: input\n :return: input in the latent space\n \"\"\"\n if self.switch:\n z = self.encoder_(x)\n return self.bijecter(z)\n else:\n return self.encoder_(x)\n\n def decode(self, z):\n \"\"\"\n Decode with activation function sigmoid\n :param z: latent space\n :return: output model\n \"\"\"\n if self.switch:\n x = self.bijecter(z, inverse=True)\n return self.decoder_(x)\n else:\n return self.decoder_(z)\n\n def bijecter(self, z, inverse=False):\n if inverse:\n b_data, _ = self.b.predict(z)\n return self.scaler.inverse_transform(b_data)\n else:\n b_data = self.scaler.transform(z)\n b_data, _ = self.b(b_data)\n return b_data\n\n def call_(self, inputs, training=None, mask=None, index=None):\n \"\"\"\n Function that works as __call__\n :param inputs: input data\n :param training: (Not use)\n :param mask: (Not use)\n :return\n \"\"\"\n return self.decoder_(self.encoder_(inputs))" ]
[ [ "sklearn.preprocessing.MinMaxScaler" ] ]
crougeux/-a-i_v1.6.3_modif
[ "b499a812e79f335d082d3f9b1070e0465ad67bab" ]
[ "build/numpy/numpy/distutils/misc_util.py" ]
[ "from __future__ import division, absolute_import, print_function\n\nimport os\nimport re\nimport sys\nimport imp\nimport copy\nimport glob\nimport atexit\nimport tempfile\nimport subprocess\nimport shutil\n\nimport distutils\nfrom distutils.errors import DistutilsError\n\ntry:\n set\nexcept NameError:\n from sets import Set as set\n\nfrom numpy.distutils.compat import get_exception\n\n__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',\n 'dict_append', 'appendpath', 'generate_config_py',\n 'get_cmd', 'allpath', 'get_mathlibs',\n 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',\n 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',\n 'has_f_sources', 'has_cxx_sources', 'filter_sources',\n 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',\n 'get_script_files', 'get_lib_source_files', 'get_data_files',\n 'dot_join', 'get_frame', 'minrelpath', 'njoin',\n 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',\n 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info']\n\nclass InstallableLib(object):\n \"\"\"\n Container to hold information on an installable library.\n\n Parameters\n ----------\n name : str\n Name of the installed library.\n build_info : dict\n Dictionary holding build information.\n target_dir : str\n Absolute path specifying where to install the library.\n\n See Also\n --------\n Configuration.add_installed_library\n\n Notes\n -----\n The three parameters are stored as attributes with the same names.\n\n \"\"\"\n def __init__(self, name, build_info, target_dir):\n self.name = name\n self.build_info = build_info\n self.target_dir = target_dir\n\ndef quote_args(args):\n # don't used _nt_quote_args as it does not check if\n # args items already have quotes or not.\n args = list(args)\n for i in range(len(args)):\n a = args[i]\n if ' ' in a and a[0] not in '\"\\'':\n args[i] = '\"%s\"' % (a)\n return args\n\ndef allpath(name):\n \"Convert a /-separated pathname to one using the OS's path separator.\"\n splitted = name.split('/')\n return os.path.join(*splitted)\n\ndef rel_path(path, parent_path):\n \"\"\"Return path relative to parent_path.\n \"\"\"\n pd = os.path.abspath(parent_path)\n apath = os.path.abspath(path)\n if len(apath)<len(pd):\n return path\n if apath==pd:\n return ''\n if pd == apath[:len(pd)]:\n assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))\n path = apath[len(pd)+1:]\n return path\n\ndef get_path_from_frame(frame, parent_path=None):\n \"\"\"Return path of the module given a frame object from the call stack.\n\n Returned path is relative to parent_path when given,\n otherwise it is absolute path.\n \"\"\"\n\n # First, try to find if the file name is in the frame.\n try:\n caller_file = eval('__file__', frame.f_globals, frame.f_locals)\n d = os.path.dirname(os.path.abspath(caller_file))\n except NameError:\n # __file__ is not defined, so let's try __name__. We try this second\n # because setuptools spoofs __name__ to be '__main__' even though\n # sys.modules['__main__'] might be something else, like easy_install(1).\n caller_name = eval('__name__', frame.f_globals, frame.f_locals)\n __import__(caller_name)\n mod = sys.modules[caller_name]\n if hasattr(mod, '__file__'):\n d = os.path.dirname(os.path.abspath(mod.__file__))\n else:\n # we're probably running setup.py as execfile(\"setup.py\")\n # (likely we're building an egg)\n d = os.path.abspath('.')\n # hmm, should we use sys.argv[0] like in __builtin__ case?\n\n if parent_path is not None:\n d = rel_path(d, parent_path)\n\n return d or '.'\n\ndef njoin(*path):\n \"\"\"Join two or more pathname components +\n - convert a /-separated pathname to one using the OS's path separator.\n - resolve `..` and `.` from path.\n\n Either passing n arguments as in njoin('a','b'), or a sequence\n of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.\n \"\"\"\n paths = []\n for p in path:\n if is_sequence(p):\n # njoin(['a', 'b'], 'c')\n paths.append(njoin(*p))\n else:\n assert is_string(p)\n paths.append(p)\n path = paths\n if not path:\n # njoin()\n joined = ''\n else:\n # njoin('a', 'b')\n joined = os.path.join(*path)\n if os.path.sep != '/':\n joined = joined.replace('/', os.path.sep)\n return minrelpath(joined)\n\ndef get_mathlibs(path=None):\n \"\"\"Return the MATHLIB line from numpyconfig.h\n \"\"\"\n if path is not None:\n config_file = os.path.join(path, '_numpyconfig.h')\n else:\n # Look for the file in each of the numpy include directories.\n dirs = get_numpy_include_dirs()\n for path in dirs:\n fn = os.path.join(path, '_numpyconfig.h')\n if os.path.exists(fn):\n config_file = fn\n break\n else:\n raise DistutilsError('_numpyconfig.h not found in numpy include '\n 'dirs %r' % (dirs,))\n\n fid = open(config_file)\n mathlibs = []\n s = '#define MATHLIB'\n for line in fid:\n if line.startswith(s):\n value = line[len(s):].strip()\n if value:\n mathlibs.extend(value.split(','))\n fid.close()\n return mathlibs\n\ndef minrelpath(path):\n \"\"\"Resolve `..` and '.' from path.\n \"\"\"\n if not is_string(path):\n return path\n if '.' not in path:\n return path\n l = path.split(os.sep)\n while l:\n try:\n i = l.index('.', 1)\n except ValueError:\n break\n del l[i]\n j = 1\n while l:\n try:\n i = l.index('..', j)\n except ValueError:\n break\n if l[i-1]=='..':\n j += 1\n else:\n del l[i], l[i-1]\n j = 1\n if not l:\n return ''\n return os.sep.join(l)\n\ndef _fix_paths(paths, local_path, include_non_existing):\n assert is_sequence(paths), repr(type(paths))\n new_paths = []\n assert not is_string(paths), repr(paths)\n for n in paths:\n if is_string(n):\n if '*' in n or '?' in n:\n p = glob.glob(n)\n p2 = glob.glob(njoin(local_path, n))\n if p2:\n new_paths.extend(p2)\n elif p:\n new_paths.extend(p)\n else:\n if include_non_existing:\n new_paths.append(n)\n print('could not resolve pattern in %r: %r' %\n (local_path, n))\n else:\n n2 = njoin(local_path, n)\n if os.path.exists(n2):\n new_paths.append(n2)\n else:\n if os.path.exists(n):\n new_paths.append(n)\n elif include_non_existing:\n new_paths.append(n)\n if not os.path.exists(n):\n print('non-existing path in %r: %r' %\n (local_path, n))\n\n elif is_sequence(n):\n new_paths.extend(_fix_paths(n, local_path, include_non_existing))\n else:\n new_paths.append(n)\n return [minrelpath(p) for p in new_paths]\n\ndef gpaths(paths, local_path='', include_non_existing=True):\n \"\"\"Apply glob to paths and prepend local_path if needed.\n \"\"\"\n if is_string(paths):\n paths = (paths,)\n return _fix_paths(paths, local_path, include_non_existing)\n\n\n_temporary_directory = None\ndef clean_up_temporary_directory():\n global _temporary_directory\n if not _temporary_directory:\n return\n try:\n shutil.rmtree(_temporary_directory)\n except OSError:\n pass\n _temporary_directory = None\n\ndef make_temp_file(suffix='', prefix='', text=True):\n global _temporary_directory\n if not _temporary_directory:\n _temporary_directory = tempfile.mkdtemp()\n atexit.register(clean_up_temporary_directory)\n fid, name = tempfile.mkstemp(suffix=suffix,\n prefix=prefix,\n dir=_temporary_directory,\n text=text)\n fo = os.fdopen(fid, 'w')\n return fo, name\n\n# Hooks for colored terminal output.\n# See also http://www.livinglogic.de/Python/ansistyle\ndef terminal_has_colors():\n if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:\n # Avoid importing curses that causes illegal operation\n # with a message:\n # PYTHON2 caused an invalid page fault in\n # module CYGNURSES7.DLL as 015f:18bbfc28\n # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]\n # ssh to Win32 machine from debian\n # curses.version is 2.2\n # CYGWIN_98-4.10, release 1.5.7(0.109/3/2))\n return 0\n if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():\n try:\n import curses\n curses.setupterm()\n if (curses.tigetnum(\"colors\") >= 0\n and curses.tigetnum(\"pairs\") >= 0\n and ((curses.tigetstr(\"setf\") is not None\n and curses.tigetstr(\"setb\") is not None)\n or (curses.tigetstr(\"setaf\") is not None\n and curses.tigetstr(\"setab\") is not None)\n or curses.tigetstr(\"scp\") is not None)):\n return 1\n except Exception:\n pass\n return 0\n\nif terminal_has_colors():\n _colour_codes = dict(black=0, red=1, green=2, yellow=3,\n blue=4, magenta=5, cyan=6, white=7, default=9)\n def colour_text(s, fg=None, bg=None, bold=False):\n seq = []\n if bold:\n seq.append('1')\n if fg:\n fgcode = 30 + _colour_codes.get(fg.lower(), 0)\n seq.append(str(fgcode))\n if bg:\n bgcode = 40 + _colour_codes.get(fg.lower(), 7)\n seq.append(str(bgcode))\n if seq:\n return '\\x1b[%sm%s\\x1b[0m' % (';'.join(seq), s)\n else:\n return s\nelse:\n def colour_text(s, fg=None, bg=None):\n return s\n\ndef default_text(s):\n return colour_text(s, 'default')\ndef red_text(s):\n return colour_text(s, 'red')\ndef green_text(s):\n return colour_text(s, 'green')\ndef yellow_text(s):\n return colour_text(s, 'yellow')\ndef cyan_text(s):\n return colour_text(s, 'cyan')\ndef blue_text(s):\n return colour_text(s, 'blue')\n\n#########################\n\ndef cyg2win32(path):\n if sys.platform=='cygwin' and path.startswith('/cygdrive'):\n path = path[10] + ':' + os.path.normcase(path[11:])\n return path\n\ndef mingw32():\n \"\"\"Return true when using mingw32 environment.\n \"\"\"\n if sys.platform=='win32':\n if os.environ.get('OSTYPE', '')=='msys':\n return True\n if os.environ.get('MSYSTEM', '')=='MINGW32':\n return True\n return False\n\ndef msvc_runtime_library():\n \"Return name of MSVC runtime library if Python was built with MSVC >= 7\"\n msc_pos = sys.version.find('MSC v.')\n if msc_pos != -1:\n msc_ver = sys.version[msc_pos+6:msc_pos+10]\n lib = {'1300': 'msvcr70', # MSVC 7.0\n '1310': 'msvcr71', # MSVC 7.1\n '1400': 'msvcr80', # MSVC 8\n '1500': 'msvcr90', # MSVC 9 (VS 2008)\n '1600': 'msvcr100', # MSVC 10 (aka 2010)\n }.get(msc_ver, None)\n else:\n lib = None\n return lib\n\n\n#########################\n\n#XXX need support for .C that is also C++\ncxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\\Z', re.I).match\nfortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\\Z', re.I).match\nf90_ext_match = re.compile(r'.*[.](f90|f95)\\Z', re.I).match\nf90_module_name_match = re.compile(r'\\s*module\\s*(?P<name>[\\w_]+)', re.I).match\ndef _get_f90_modules(source):\n \"\"\"Return a list of Fortran f90 module names that\n given source file defines.\n \"\"\"\n if not f90_ext_match(source):\n return []\n modules = []\n f = open(source, 'r')\n for line in f:\n m = f90_module_name_match(line)\n if m:\n name = m.group('name')\n modules.append(name)\n # break # XXX can we assume that there is one module per file?\n f.close()\n return modules\n\ndef is_string(s):\n return isinstance(s, str)\n\ndef all_strings(lst):\n \"\"\"Return True if all items in lst are string objects. \"\"\"\n for item in lst:\n if not is_string(item):\n return False\n return True\n\ndef is_sequence(seq):\n if is_string(seq):\n return False\n try:\n len(seq)\n except:\n return False\n return True\n\ndef is_glob_pattern(s):\n return is_string(s) and ('*' in s or '?' is s)\n\ndef as_list(seq):\n if is_sequence(seq):\n return list(seq)\n else:\n return [seq]\n\ndef get_language(sources):\n # not used in numpy/scipy packages, use build_ext.detect_language instead\n \"\"\"Determine language value (c,f77,f90) from sources \"\"\"\n language = None\n for source in sources:\n if isinstance(source, str):\n if f90_ext_match(source):\n language = 'f90'\n break\n elif fortran_ext_match(source):\n language = 'f77'\n return language\n\ndef has_f_sources(sources):\n \"\"\"Return True if sources contains Fortran files \"\"\"\n for source in sources:\n if fortran_ext_match(source):\n return True\n return False\n\ndef has_cxx_sources(sources):\n \"\"\"Return True if sources contains C++ files \"\"\"\n for source in sources:\n if cxx_ext_match(source):\n return True\n return False\n\ndef filter_sources(sources):\n \"\"\"Return four lists of filenames containing\n C, C++, Fortran, and Fortran 90 module sources,\n respectively.\n \"\"\"\n c_sources = []\n cxx_sources = []\n f_sources = []\n fmodule_sources = []\n for source in sources:\n if fortran_ext_match(source):\n modules = _get_f90_modules(source)\n if modules:\n fmodule_sources.append(source)\n else:\n f_sources.append(source)\n elif cxx_ext_match(source):\n cxx_sources.append(source)\n else:\n c_sources.append(source)\n return c_sources, cxx_sources, f_sources, fmodule_sources\n\n\ndef _get_headers(directory_list):\n # get *.h files from list of directories\n headers = []\n for d in directory_list:\n head = glob.glob(os.path.join(d, \"*.h\")) #XXX: *.hpp files??\n headers.extend(head)\n return headers\n\ndef _get_directories(list_of_sources):\n # get unique directories from list of sources.\n direcs = []\n for f in list_of_sources:\n d = os.path.split(f)\n if d[0] != '' and not d[0] in direcs:\n direcs.append(d[0])\n return direcs\n\ndef get_dependencies(sources):\n #XXX scan sources for include statements\n return _get_headers(_get_directories(sources))\n\ndef is_local_src_dir(directory):\n \"\"\"Return true if directory is local directory.\n \"\"\"\n if not is_string(directory):\n return False\n abs_dir = os.path.abspath(directory)\n c = os.path.commonprefix([os.getcwd(), abs_dir])\n new_dir = abs_dir[len(c):].split(os.sep)\n if new_dir and not new_dir[0]:\n new_dir = new_dir[1:]\n if new_dir and new_dir[0]=='build':\n return False\n new_dir = os.sep.join(new_dir)\n return os.path.isdir(new_dir)\n\ndef general_source_files(top_path):\n pruned_directories = {'CVS':1, '.svn':1, 'build':1}\n prune_file_pat = re.compile(r'(?:[~#]|\\.py[co]|\\.o)$')\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n pruned = [ d for d in dirnames if d not in pruned_directories ]\n dirnames[:] = pruned\n for f in filenames:\n if not prune_file_pat.search(f):\n yield os.path.join(dirpath, f)\n\ndef general_source_directories_files(top_path):\n \"\"\"Return a directory name relative to top_path and\n files contained.\n \"\"\"\n pruned_directories = ['CVS', '.svn', 'build']\n prune_file_pat = re.compile(r'(?:[~#]|\\.py[co]|\\.o)$')\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n pruned = [ d for d in dirnames if d not in pruned_directories ]\n dirnames[:] = pruned\n for d in dirnames:\n dpath = os.path.join(dirpath, d)\n rpath = rel_path(dpath, top_path)\n files = []\n for f in os.listdir(dpath):\n fn = os.path.join(dpath, f)\n if os.path.isfile(fn) and not prune_file_pat.search(fn):\n files.append(fn)\n yield rpath, files\n dpath = top_path\n rpath = rel_path(dpath, top_path)\n filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \\\n if not prune_file_pat.search(f)]\n files = [f for f in filenames if os.path.isfile(f)]\n yield rpath, files\n\n\ndef get_ext_source_files(ext):\n # Get sources and any include files in the same directory.\n filenames = []\n sources = [_m for _m in ext.sources if is_string(_m)]\n filenames.extend(sources)\n filenames.extend(get_dependencies(sources))\n for d in ext.depends:\n if is_local_src_dir(d):\n filenames.extend(list(general_source_files(d)))\n elif os.path.isfile(d):\n filenames.append(d)\n return filenames\n\ndef get_script_files(scripts):\n scripts = [_m for _m in scripts if is_string(_m)]\n return scripts\n\ndef get_lib_source_files(lib):\n filenames = []\n sources = lib[1].get('sources', [])\n sources = [_m for _m in sources if is_string(_m)]\n filenames.extend(sources)\n filenames.extend(get_dependencies(sources))\n depends = lib[1].get('depends', [])\n for d in depends:\n if is_local_src_dir(d):\n filenames.extend(list(general_source_files(d)))\n elif os.path.isfile(d):\n filenames.append(d)\n return filenames\n\ndef get_shared_lib_extension(is_python_ext=False):\n \"\"\"Return the correct file extension for shared libraries.\n\n Parameters\n ----------\n is_python_ext : bool, optional\n Whether the shared library is a Python extension. Default is False.\n\n Returns\n -------\n so_ext : str\n The shared library extension.\n\n Notes\n -----\n For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,\n and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on\n POSIX systems according to PEP 3149. For Python 3.2 this is implemented on\n Linux, but not on OS X.\n\n \"\"\"\n confvars = distutils.sysconfig.get_config_vars()\n # SO is deprecated in 3.3.1, use EXT_SUFFIX instead\n so_ext = confvars.get('EXT_SUFFIX', None)\n if so_ext is None:\n so_ext = confvars.get('SO', '')\n\n if not is_python_ext:\n # hardcode known values, config vars (including SHLIB_SUFFIX) are\n # unreliable (see #3182)\n # darwin, windows and debug linux are wrong in 3.3.1 and older\n if (sys.platform.startswith('linux') or\n sys.platform.startswith('gnukfreebsd')):\n so_ext = '.so'\n elif sys.platform.startswith('darwin'):\n so_ext = '.dylib'\n elif sys.platform.startswith('win'):\n so_ext = '.dll'\n else:\n # fall back to config vars for unknown platforms\n # fix long extension for Python >=3.2, see PEP 3149.\n if 'SOABI' in confvars:\n # Does nothing unless SOABI config var exists\n so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)\n\n return so_ext\n\ndef get_data_files(data):\n if is_string(data):\n return [data]\n sources = data[1]\n filenames = []\n for s in sources:\n if hasattr(s, '__call__'):\n continue\n if is_local_src_dir(s):\n filenames.extend(list(general_source_files(s)))\n elif is_string(s):\n if os.path.isfile(s):\n filenames.append(s)\n else:\n print('Not existing data file:', s)\n else:\n raise TypeError(repr(s))\n return filenames\n\ndef dot_join(*args):\n return '.'.join([a for a in args if a])\n\ndef get_frame(level=0):\n \"\"\"Return frame object from call stack with given level.\n \"\"\"\n try:\n return sys._getframe(level+1)\n except AttributeError:\n frame = sys.exc_info()[2].tb_frame\n for _ in range(level+1):\n frame = frame.f_back\n return frame\n\n\n######################\n\nclass Configuration(object):\n\n _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',\n 'libraries', 'headers', 'scripts', 'py_modules',\n 'installed_libraries', 'define_macros']\n _dict_keys = ['package_dir', 'installed_pkg_config']\n _extra_keys = ['name', 'version']\n\n numpy_include_dirs = []\n\n def __init__(self,\n package_name=None,\n parent_name=None,\n top_path=None,\n package_path=None,\n caller_level=1,\n setup_name='setup.py',\n **attrs):\n \"\"\"Construct configuration instance of a package.\n\n package_name -- name of the package\n Ex.: 'distutils'\n parent_name -- name of the parent package\n Ex.: 'numpy'\n top_path -- directory of the toplevel package\n Ex.: the directory where the numpy package source sits\n package_path -- directory of package. Will be computed by magic from the\n directory of the caller module if not specified\n Ex.: the directory where numpy.distutils is\n caller_level -- frame level to caller namespace, internal parameter.\n \"\"\"\n self.name = dot_join(parent_name, package_name)\n self.version = None\n\n caller_frame = get_frame(caller_level)\n self.local_path = get_path_from_frame(caller_frame, top_path)\n # local_path -- directory of a file (usually setup.py) that\n # defines a configuration() function.\n # local_path -- directory of a file (usually setup.py) that\n # defines a configuration() function.\n if top_path is None:\n top_path = self.local_path\n self.local_path = ''\n if package_path is None:\n package_path = self.local_path\n elif os.path.isdir(njoin(self.local_path, package_path)):\n package_path = njoin(self.local_path, package_path)\n if not os.path.isdir(package_path or '.'):\n raise ValueError(\"%r is not a directory\" % (package_path,))\n self.top_path = top_path\n self.package_path = package_path\n # this is the relative path in the installed package\n self.path_in_package = os.path.join(*self.name.split('.'))\n\n self.list_keys = self._list_keys[:]\n self.dict_keys = self._dict_keys[:]\n\n for n in self.list_keys:\n v = copy.copy(attrs.get(n, []))\n setattr(self, n, as_list(v))\n\n for n in self.dict_keys:\n v = copy.copy(attrs.get(n, {}))\n setattr(self, n, v)\n\n known_keys = self.list_keys + self.dict_keys\n self.extra_keys = self._extra_keys[:]\n for n in attrs.keys():\n if n in known_keys:\n continue\n a = attrs[n]\n setattr(self, n, a)\n if isinstance(a, list):\n self.list_keys.append(n)\n elif isinstance(a, dict):\n self.dict_keys.append(n)\n else:\n self.extra_keys.append(n)\n\n if os.path.exists(njoin(package_path, '__init__.py')):\n self.packages.append(self.name)\n self.package_dir[self.name] = package_path\n\n self.options = dict(\n ignore_setup_xxx_py = False,\n assume_default_configuration = False,\n delegate_options_to_subpackages = False,\n quiet = False,\n )\n\n caller_instance = None\n for i in range(1, 3):\n try:\n f = get_frame(i)\n except ValueError:\n break\n try:\n caller_instance = eval('self', f.f_globals, f.f_locals)\n break\n except NameError:\n pass\n if isinstance(caller_instance, self.__class__):\n if caller_instance.options['delegate_options_to_subpackages']:\n self.set_options(**caller_instance.options)\n\n self.setup_name = setup_name\n\n def todict(self):\n \"\"\"\n Return a dictionary compatible with the keyword arguments of distutils\n setup function.\n\n Examples\n --------\n >>> setup(**config.todict()) #doctest: +SKIP\n \"\"\"\n\n self._optimize_data_files()\n d = {}\n known_keys = self.list_keys + self.dict_keys + self.extra_keys\n for n in known_keys:\n a = getattr(self, n)\n if a:\n d[n] = a\n return d\n\n def info(self, message):\n if not self.options['quiet']:\n print(message)\n\n def warn(self, message):\n sys.stderr.write('Warning: %s' % (message,))\n\n def set_options(self, **options):\n \"\"\"\n Configure Configuration instance.\n\n The following options are available:\n - ignore_setup_xxx_py\n - assume_default_configuration\n - delegate_options_to_subpackages\n - quiet\n\n \"\"\"\n for key, value in options.items():\n if key in self.options:\n self.options[key] = value\n else:\n raise ValueError('Unknown option: '+key)\n\n def get_distribution(self):\n \"\"\"Return the distutils distribution object for self.\"\"\"\n from numpy.distutils.core import get_distribution\n return get_distribution()\n\n def _wildcard_get_subpackage(self, subpackage_name,\n parent_name,\n caller_level = 1):\n l = subpackage_name.split('.')\n subpackage_path = njoin([self.local_path]+l)\n dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]\n config_list = []\n for d in dirs:\n if not os.path.isfile(njoin(d, '__init__.py')):\n continue\n if 'build' in d.split(os.sep):\n continue\n n = '.'.join(d.split(os.sep)[-len(l):])\n c = self.get_subpackage(n,\n parent_name = parent_name,\n caller_level = caller_level+1)\n config_list.extend(c)\n return config_list\n\n def _get_configuration_from_setup_py(self, setup_py,\n subpackage_name,\n subpackage_path,\n parent_name,\n caller_level = 1):\n # In case setup_py imports local modules:\n sys.path.insert(0, os.path.dirname(setup_py))\n try:\n fo_setup_py = open(setup_py, 'U')\n setup_name = os.path.splitext(os.path.basename(setup_py))[0]\n n = dot_join(self.name, subpackage_name, setup_name)\n setup_module = imp.load_module('_'.join(n.split('.')),\n fo_setup_py,\n setup_py,\n ('.py', 'U', 1))\n fo_setup_py.close()\n if not hasattr(setup_module, 'configuration'):\n if not self.options['assume_default_configuration']:\n self.warn('Assuming default configuration '\\\n '(%s does not define configuration())'\\\n % (setup_module))\n config = Configuration(subpackage_name, parent_name,\n self.top_path, subpackage_path,\n caller_level = caller_level + 1)\n else:\n pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))\n args = (pn,)\n def fix_args_py2(args):\n if setup_module.configuration.__code__.co_argcount > 1:\n args = args + (self.top_path,)\n return args\n def fix_args_py3(args):\n if setup_module.configuration.__code__.co_argcount > 1:\n args = args + (self.top_path,)\n return args\n if sys.version_info[0] < 3:\n args = fix_args_py2(args)\n else:\n args = fix_args_py3(args)\n config = setup_module.configuration(*args)\n if config.name!=dot_join(parent_name, subpackage_name):\n self.warn('Subpackage %r configuration returned as %r' % \\\n (dot_join(parent_name, subpackage_name), config.name))\n finally:\n del sys.path[0]\n return config\n\n def get_subpackage(self,subpackage_name,\n subpackage_path=None,\n parent_name=None,\n caller_level = 1):\n \"\"\"Return list of subpackage configurations.\n\n Parameters\n ----------\n subpackage_name : str or None\n Name of the subpackage to get the configuration. '*' in\n subpackage_name is handled as a wildcard.\n subpackage_path : str\n If None, then the path is assumed to be the local path plus the\n subpackage_name. If a setup.py file is not found in the\n subpackage_path, then a default configuration is used.\n parent_name : str\n Parent name.\n \"\"\"\n if subpackage_name is None:\n if subpackage_path is None:\n raise ValueError(\n \"either subpackage_name or subpackage_path must be specified\")\n subpackage_name = os.path.basename(subpackage_path)\n\n # handle wildcards\n l = subpackage_name.split('.')\n if subpackage_path is None and '*' in subpackage_name:\n return self._wildcard_get_subpackage(subpackage_name,\n parent_name,\n caller_level = caller_level+1)\n assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))\n if subpackage_path is None:\n subpackage_path = njoin([self.local_path] + l)\n else:\n subpackage_path = njoin([subpackage_path] + l[:-1])\n subpackage_path = self.paths([subpackage_path])[0]\n setup_py = njoin(subpackage_path, self.setup_name)\n if not self.options['ignore_setup_xxx_py']:\n if not os.path.isfile(setup_py):\n setup_py = njoin(subpackage_path,\n 'setup_%s.py' % (subpackage_name))\n if not os.path.isfile(setup_py):\n if not self.options['assume_default_configuration']:\n self.warn('Assuming default configuration '\\\n '(%s/{setup_%s,setup}.py was not found)' \\\n % (os.path.dirname(setup_py), subpackage_name))\n config = Configuration(subpackage_name, parent_name,\n self.top_path, subpackage_path,\n caller_level = caller_level+1)\n else:\n config = self._get_configuration_from_setup_py(\n setup_py,\n subpackage_name,\n subpackage_path,\n parent_name,\n caller_level = caller_level + 1)\n if config:\n return [config]\n else:\n return []\n\n def add_subpackage(self,subpackage_name,\n subpackage_path=None,\n standalone = False):\n \"\"\"Add a sub-package to the current Configuration instance.\n\n This is useful in a setup.py script for adding sub-packages to a\n package.\n\n Parameters\n ----------\n subpackage_name : str\n name of the subpackage\n subpackage_path : str\n if given, the subpackage path such as the subpackage is in\n subpackage_path / subpackage_name. If None,the subpackage is\n assumed to be located in the local path / subpackage_name.\n standalone : bool\n \"\"\"\n\n if standalone:\n parent_name = None\n else:\n parent_name = self.name\n config_list = self.get_subpackage(subpackage_name, subpackage_path,\n parent_name = parent_name,\n caller_level = 2)\n if not config_list:\n self.warn('No configuration returned, assuming unavailable.')\n for config in config_list:\n d = config\n if isinstance(config, Configuration):\n d = config.todict()\n assert isinstance(d, dict), repr(type(d))\n\n self.info('Appending %s configuration to %s' \\\n % (d.get('name'), self.name))\n self.dict_append(**d)\n\n dist = self.get_distribution()\n if dist is not None:\n self.warn('distutils distribution has been initialized,'\\\n ' it may be too late to add a subpackage '+ subpackage_name)\n\n def add_data_dir(self, data_path):\n \"\"\"Recursively add files under data_path to data_files list.\n\n Recursively add files under data_path to the list of data_files to be\n installed (and distributed). The data_path can be either a relative\n path-name, or an absolute path-name, or a 2-tuple where the first\n argument shows where in the install directory the data directory\n should be installed to.\n\n Parameters\n ----------\n data_path : seq or str\n Argument can be either\n\n * 2-sequence (<datadir suffix>, <path to data directory>)\n * path to data directory where python datadir suffix defaults\n to package dir.\n\n Notes\n -----\n Rules for installation paths:\n foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar\n (gun, foo/bar) -> parent/gun\n foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b\n (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun\n (gun/*, foo/*) -> parent/gun/a, parent/gun/b\n /foo/bar -> (bar, /foo/bar) -> parent/bar\n (gun, /foo/bar) -> parent/gun\n (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar\n\n Examples\n --------\n For example suppose the source directory contains fun/foo.dat and\n fun/bar/car.dat::\n\n >>> self.add_data_dir('fun') #doctest: +SKIP\n >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP\n >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP\n\n Will install data-files to the locations::\n\n <package install directory>/\n fun/\n foo.dat\n bar/\n car.dat\n sun/\n foo.dat\n bar/\n car.dat\n gun/\n foo.dat\n car.dat\n \"\"\"\n if is_sequence(data_path):\n d, data_path = data_path\n else:\n d = None\n if is_sequence(data_path):\n [self.add_data_dir((d, p)) for p in data_path]\n return\n if not is_string(data_path):\n raise TypeError(\"not a string: %r\" % (data_path,))\n if d is None:\n if os.path.isabs(data_path):\n return self.add_data_dir((os.path.basename(data_path), data_path))\n return self.add_data_dir((data_path, data_path))\n paths = self.paths(data_path, include_non_existing=False)\n if is_glob_pattern(data_path):\n if is_glob_pattern(d):\n pattern_list = allpath(d).split(os.sep)\n pattern_list.reverse()\n # /a/*//b/ -> /a/*/b\n rl = list(range(len(pattern_list)-1)); rl.reverse()\n for i in rl:\n if not pattern_list[i]:\n del pattern_list[i]\n #\n for path in paths:\n if not os.path.isdir(path):\n print('Not a directory, skipping', path)\n continue\n rpath = rel_path(path, self.local_path)\n path_list = rpath.split(os.sep)\n path_list.reverse()\n target_list = []\n i = 0\n for s in pattern_list:\n if is_glob_pattern(s):\n if i>=len(path_list):\n raise ValueError('cannot fill pattern %r with %r' \\\n % (d, path))\n target_list.append(path_list[i])\n else:\n assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))\n target_list.append(s)\n i += 1\n if path_list[i:]:\n self.warn('mismatch of pattern_list=%s and path_list=%s'\\\n % (pattern_list, path_list))\n target_list.reverse()\n self.add_data_dir((os.sep.join(target_list), path))\n else:\n for path in paths:\n self.add_data_dir((d, path))\n return\n assert not is_glob_pattern(d), repr(d)\n\n dist = self.get_distribution()\n if dist is not None and dist.data_files is not None:\n data_files = dist.data_files\n else:\n data_files = self.data_files\n\n for path in paths:\n for d1, f in list(general_source_directories_files(path)):\n target_path = os.path.join(self.path_in_package, d, d1)\n data_files.append((target_path, f))\n\n def _optimize_data_files(self):\n data_dict = {}\n for p, files in self.data_files:\n if p not in data_dict:\n data_dict[p] = set()\n for f in files:\n data_dict[p].add(f)\n self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]\n\n def add_data_files(self,*files):\n \"\"\"Add data files to configuration data_files.\n\n Parameters\n ----------\n files : sequence\n Argument(s) can be either\n\n * 2-sequence (<datadir prefix>,<path to data file(s)>)\n * paths to data files where python datadir prefix defaults\n to package dir.\n\n Notes\n -----\n The form of each element of the files sequence is very flexible\n allowing many combinations of where to get the files from the package\n and where they should ultimately be installed on the system. The most\n basic usage is for an element of the files argument sequence to be a\n simple filename. This will cause that file from the local path to be\n installed to the installation path of the self.name package (package\n path). The file argument can also be a relative path in which case the\n entire relative path will be installed into the package directory.\n Finally, the file can be an absolute path name in which case the file\n will be found at the absolute path name but installed to the package\n path.\n\n This basic behavior can be augmented by passing a 2-tuple in as the\n file argument. The first element of the tuple should specify the\n relative path (under the package install directory) where the\n remaining sequence of files should be installed to (it has nothing to\n do with the file-names in the source distribution). The second element\n of the tuple is the sequence of files that should be installed. The\n files in this sequence can be filenames, relative paths, or absolute\n paths. For absolute paths the file will be installed in the top-level\n package installation directory (regardless of the first argument).\n Filenames and relative path names will be installed in the package\n install directory under the path name given as the first element of\n the tuple.\n\n Rules for installation paths:\n\n #. file.txt -> (., file.txt)-> parent/file.txt\n #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt\n #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt\n #. *.txt -> parent/a.txt, parent/b.txt\n #. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt\n #. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt\n #. (sun, file.txt) -> parent/sun/file.txt\n #. (sun, bar/file.txt) -> parent/sun/file.txt\n #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt\n #. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt\n #. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt\n #. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt\n\n An additional feature is that the path to a data-file can actually be\n a function that takes no arguments and returns the actual path(s) to\n the data-files. This is useful when the data files are generated while\n building the package.\n\n Examples\n --------\n Add files to the list of data_files to be included with the package.\n\n >>> self.add_data_files('foo.dat',\n ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),\n ... 'bar/cat.dat',\n ... '/full/path/to/can.dat') #doctest: +SKIP\n\n will install these data files to::\n\n <package install directory>/\n foo.dat\n fun/\n gun.dat\n nun/\n pun.dat\n sun.dat\n bar/\n car.dat\n can.dat\n\n where <package install directory> is the package (or sub-package)\n directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:\n \\\\Python2.4 \\\\Lib \\\\site-packages \\\\mypackage') or\n '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:\n \\\\Python2.4 \\\\Lib \\\\site-packages \\\\mypackage \\\\mysubpackage').\n \"\"\"\n\n if len(files)>1:\n for f in files:\n self.add_data_files(f)\n return\n assert len(files)==1\n if is_sequence(files[0]):\n d, files = files[0]\n else:\n d = None\n if is_string(files):\n filepat = files\n elif is_sequence(files):\n if len(files)==1:\n filepat = files[0]\n else:\n for f in files:\n self.add_data_files((d, f))\n return\n else:\n raise TypeError(repr(type(files)))\n\n if d is None:\n if hasattr(filepat, '__call__'):\n d = ''\n elif os.path.isabs(filepat):\n d = ''\n else:\n d = os.path.dirname(filepat)\n self.add_data_files((d, files))\n return\n\n paths = self.paths(filepat, include_non_existing=False)\n if is_glob_pattern(filepat):\n if is_glob_pattern(d):\n pattern_list = d.split(os.sep)\n pattern_list.reverse()\n for path in paths:\n path_list = path.split(os.sep)\n path_list.reverse()\n path_list.pop() # filename\n target_list = []\n i = 0\n for s in pattern_list:\n if is_glob_pattern(s):\n target_list.append(path_list[i])\n i += 1\n else:\n target_list.append(s)\n target_list.reverse()\n self.add_data_files((os.sep.join(target_list), path))\n else:\n self.add_data_files((d, paths))\n return\n assert not is_glob_pattern(d), repr((d, filepat))\n\n dist = self.get_distribution()\n if dist is not None and dist.data_files is not None:\n data_files = dist.data_files\n else:\n data_files = self.data_files\n\n data_files.append((os.path.join(self.path_in_package, d), paths))\n\n ### XXX Implement add_py_modules\n\n def add_define_macros(self, macros):\n \"\"\"Add define macros to configuration\n\n Add the given sequence of macro name and value duples to the beginning\n of the define_macros list This list will be visible to all extension\n modules of the current package.\n \"\"\"\n dist = self.get_distribution()\n if dist is not None:\n if not hasattr(dist, 'define_macros'):\n dist.define_macros = []\n dist.define_macros.extend(macros)\n else:\n self.define_macros.extend(macros)\n\n\n def add_include_dirs(self,*paths):\n \"\"\"Add paths to configuration include directories.\n\n Add the given sequence of paths to the beginning of the include_dirs\n list. This list will be visible to all extension modules of the\n current package.\n \"\"\"\n include_dirs = self.paths(paths)\n dist = self.get_distribution()\n if dist is not None:\n if dist.include_dirs is None:\n dist.include_dirs = []\n dist.include_dirs.extend(include_dirs)\n else:\n self.include_dirs.extend(include_dirs)\n\n def add_numarray_include_dirs(self):\n import numpy.numarray.util as nnu\n self.add_include_dirs(*nnu.get_numarray_include_dirs())\n\n def add_headers(self,*files):\n \"\"\"Add installable headers to configuration.\n\n Add the given sequence of files to the beginning of the headers list.\n By default, headers will be installed under <python-\n include>/<self.name.replace('.','/')>/ directory. If an item of files\n is a tuple, then its first argument specifies the actual installation\n location relative to the <python-include> path.\n\n Parameters\n ----------\n files : str or seq\n Argument(s) can be either:\n\n * 2-sequence (<includedir suffix>,<path to header file(s)>)\n * path(s) to header file(s) where python includedir suffix will\n default to package name.\n \"\"\"\n headers = []\n for path in files:\n if is_string(path):\n [headers.append((self.name, p)) for p in self.paths(path)]\n else:\n if not isinstance(path, (tuple, list)) or len(path) != 2:\n raise TypeError(repr(path))\n [headers.append((path[0], p)) for p in self.paths(path[1])]\n dist = self.get_distribution()\n if dist is not None:\n if dist.headers is None:\n dist.headers = []\n dist.headers.extend(headers)\n else:\n self.headers.extend(headers)\n\n def paths(self,*paths,**kws):\n \"\"\"Apply glob to paths and prepend local_path if needed.\n\n Applies glob.glob(...) to each path in the sequence (if needed) and\n pre-pends the local_path if needed. Because this is called on all\n source lists, this allows wildcard characters to be specified in lists\n of sources for extension modules and libraries and scripts and allows\n path-names be relative to the source directory.\n\n \"\"\"\n include_non_existing = kws.get('include_non_existing', True)\n return gpaths(paths,\n local_path = self.local_path,\n include_non_existing=include_non_existing)\n\n def _fix_paths_dict(self, kw):\n for k in kw.keys():\n v = kw[k]\n if k in ['sources', 'depends', 'include_dirs', 'library_dirs',\n 'module_dirs', 'extra_objects']:\n new_v = self.paths(v)\n kw[k] = new_v\n\n def add_extension(self,name,sources,**kw):\n \"\"\"Add extension to configuration.\n\n Create and add an Extension instance to the ext_modules list. This\n method also takes the following optional keyword arguments that are\n passed on to the Extension constructor.\n\n Parameters\n ----------\n name : str\n name of the extension\n sources : seq\n list of the sources. The list of sources may contain functions\n (called source generators) which must take an extension instance\n and a build directory as inputs and return a source file or list of\n source files or None. If None is returned then no sources are\n generated. If the Extension instance has no sources after\n processing all source generators, then no extension module is\n built.\n include_dirs :\n define_macros :\n undef_macros :\n library_dirs :\n libraries :\n runtime_library_dirs :\n extra_objects :\n extra_compile_args :\n extra_link_args :\n extra_f77_compile_args :\n extra_f90_compile_args :\n export_symbols :\n swig_opts :\n depends :\n The depends list contains paths to files or directories that the\n sources of the extension module depend on. If any path in the\n depends list is newer than the extension module, then the module\n will be rebuilt.\n language :\n f2py_options :\n module_dirs :\n extra_info : dict or list\n dict or list of dict of keywords to be appended to keywords.\n\n Notes\n -----\n The self.paths(...) method is applied to all lists that may contain\n paths.\n \"\"\"\n ext_args = copy.copy(kw)\n ext_args['name'] = dot_join(self.name, name)\n ext_args['sources'] = sources\n\n if 'extra_info' in ext_args:\n extra_info = ext_args['extra_info']\n del ext_args['extra_info']\n if isinstance(extra_info, dict):\n extra_info = [extra_info]\n for info in extra_info:\n assert isinstance(info, dict), repr(info)\n dict_append(ext_args,**info)\n\n self._fix_paths_dict(ext_args)\n\n # Resolve out-of-tree dependencies\n libraries = ext_args.get('libraries', [])\n libnames = []\n ext_args['libraries'] = []\n for libname in libraries:\n if isinstance(libname, tuple):\n self._fix_paths_dict(libname[1])\n\n # Handle library names of the form libname@relative/path/to/library\n if '@' in libname:\n lname, lpath = libname.split('@', 1)\n lpath = os.path.abspath(njoin(self.local_path, lpath))\n if os.path.isdir(lpath):\n c = self.get_subpackage(None, lpath,\n caller_level = 2)\n if isinstance(c, Configuration):\n c = c.todict()\n for l in [l[0] for l in c.get('libraries', [])]:\n llname = l.split('__OF__', 1)[0]\n if llname == lname:\n c.pop('name', None)\n dict_append(ext_args,**c)\n break\n continue\n libnames.append(libname)\n\n ext_args['libraries'] = libnames + ext_args['libraries']\n ext_args['define_macros'] = \\\n self.define_macros + ext_args.get('define_macros', [])\n\n from numpy.distutils.core import Extension\n ext = Extension(**ext_args)\n self.ext_modules.append(ext)\n\n dist = self.get_distribution()\n if dist is not None:\n self.warn('distutils distribution has been initialized,'\\\n ' it may be too late to add an extension '+name)\n return ext\n\n def add_library(self,name,sources,**build_info):\n \"\"\"\n Add library to configuration.\n\n Parameters\n ----------\n name : str\n Name of the extension.\n sources : sequence\n List of the sources. The list of sources may contain functions\n (called source generators) which must take an extension instance\n and a build directory as inputs and return a source file or list of\n source files or None. If None is returned then no sources are\n generated. If the Extension instance has no sources after\n processing all source generators, then no extension module is\n built.\n build_info : dict, optional\n The following keys are allowed:\n\n * depends\n * macros\n * include_dirs\n * extra_compiler_args\n * extra_f77_compiler_args\n * extra_f90_compiler_args\n * f2py_options\n * language\n\n \"\"\"\n self._add_library(name, sources, None, build_info)\n\n dist = self.get_distribution()\n if dist is not None:\n self.warn('distutils distribution has been initialized,'\\\n ' it may be too late to add a library '+ name)\n\n def _add_library(self, name, sources, install_dir, build_info):\n \"\"\"Common implementation for add_library and add_installed_library. Do\n not use directly\"\"\"\n build_info = copy.copy(build_info)\n name = name #+ '__OF__' + self.name\n build_info['sources'] = sources\n\n # Sometimes, depends is not set up to an empty list by default, and if\n # depends is not given to add_library, distutils barfs (#1134)\n if not 'depends' in build_info:\n build_info['depends'] = []\n\n self._fix_paths_dict(build_info)\n\n # Add to libraries list so that it is build with build_clib\n self.libraries.append((name, build_info))\n\n def add_installed_library(self, name, sources, install_dir, build_info=None):\n \"\"\"\n Similar to add_library, but the specified library is installed.\n\n Most C libraries used with `distutils` are only used to build python\n extensions, but libraries built through this method will be installed\n so that they can be reused by third-party packages.\n\n Parameters\n ----------\n name : str\n Name of the installed library.\n sources : sequence\n List of the library's source files. See `add_library` for details.\n install_dir : str\n Path to install the library, relative to the current sub-package.\n build_info : dict, optional\n The following keys are allowed:\n\n * depends\n * macros\n * include_dirs\n * extra_compiler_args\n * extra_f77_compiler_args\n * extra_f90_compiler_args\n * f2py_options\n * language\n\n Returns\n -------\n None\n\n See Also\n --------\n add_library, add_npy_pkg_config, get_info\n\n Notes\n -----\n The best way to encode the options required to link against the specified\n C libraries is to use a \"libname.ini\" file, and use `get_info` to\n retrieve the required options (see `add_npy_pkg_config` for more\n information).\n\n \"\"\"\n if not build_info:\n build_info = {}\n\n install_dir = os.path.join(self.package_path, install_dir)\n self._add_library(name, sources, install_dir, build_info)\n self.installed_libraries.append(InstallableLib(name, build_info, install_dir))\n\n def add_npy_pkg_config(self, template, install_dir, subst_dict=None):\n \"\"\"\n Generate and install a npy-pkg config file from a template.\n\n The config file generated from `template` is installed in the\n given install directory, using `subst_dict` for variable substitution.\n\n Parameters\n ----------\n template : str\n The path of the template, relatively to the current package path.\n install_dir : str\n Where to install the npy-pkg config file, relatively to the current\n package path.\n subst_dict : dict, optional\n If given, any string of the form ``@key@`` will be replaced by\n ``subst_dict[key]`` in the template file when installed. The install\n prefix is always available through the variable ``@prefix@``, since the\n install prefix is not easy to get reliably from setup.py.\n\n See also\n --------\n add_installed_library, get_info\n\n Notes\n -----\n This works for both standard installs and in-place builds, i.e. the\n ``@prefix@`` refer to the source directory for in-place builds.\n\n Examples\n --------\n ::\n\n config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})\n\n Assuming the foo.ini.in file has the following content::\n\n [meta]\n Name=@foo@\n Version=1.0\n Description=dummy description\n\n [default]\n Cflags=-I@prefix@/include\n Libs=\n\n The generated file will have the following content::\n\n [meta]\n Name=bar\n Version=1.0\n Description=dummy description\n\n [default]\n Cflags=-Iprefix_dir/include\n Libs=\n\n and will be installed as foo.ini in the 'lib' subpath.\n\n \"\"\"\n if subst_dict is None:\n subst_dict = {}\n basename = os.path.splitext(template)[0]\n template = os.path.join(self.package_path, template)\n\n if self.name in self.installed_pkg_config:\n self.installed_pkg_config[self.name].append((template, install_dir,\n subst_dict))\n else:\n self.installed_pkg_config[self.name] = [(template, install_dir,\n subst_dict)]\n\n\n def add_scripts(self,*files):\n \"\"\"Add scripts to configuration.\n\n Add the sequence of files to the beginning of the scripts list.\n Scripts will be installed under the <prefix>/bin/ directory.\n\n \"\"\"\n scripts = self.paths(files)\n dist = self.get_distribution()\n if dist is not None:\n if dist.scripts is None:\n dist.scripts = []\n dist.scripts.extend(scripts)\n else:\n self.scripts.extend(scripts)\n\n def dict_append(self,**dict):\n for key in self.list_keys:\n a = getattr(self, key)\n a.extend(dict.get(key, []))\n for key in self.dict_keys:\n a = getattr(self, key)\n a.update(dict.get(key, {}))\n known_keys = self.list_keys + self.dict_keys + self.extra_keys\n for key in dict.keys():\n if key not in known_keys:\n a = getattr(self, key, None)\n if a and a==dict[key]: continue\n self.warn('Inheriting attribute %r=%r from %r' \\\n % (key, dict[key], dict.get('name', '?')))\n setattr(self, key, dict[key])\n self.extra_keys.append(key)\n elif key in self.extra_keys:\n self.info('Ignoring attempt to set %r (from %r to %r)' \\\n % (key, getattr(self, key), dict[key]))\n elif key in known_keys:\n # key is already processed above\n pass\n else:\n raise ValueError(\"Don't know about key=%r\" % (key))\n\n def __str__(self):\n from pprint import pformat\n known_keys = self.list_keys + self.dict_keys + self.extra_keys\n s = '<'+5*'-' + '\\n'\n s += 'Configuration of '+self.name+':\\n'\n known_keys.sort()\n for k in known_keys:\n a = getattr(self, k, None)\n if a:\n s += '%s = %s\\n' % (k, pformat(a))\n s += 5*'-' + '>'\n return s\n\n def get_config_cmd(self):\n \"\"\"\n Returns the numpy.distutils config command instance.\n \"\"\"\n cmd = get_cmd('config')\n cmd.ensure_finalized()\n cmd.dump_source = 0\n cmd.noisy = 0\n old_path = os.environ.get('PATH')\n if old_path:\n path = os.pathsep.join(['.', old_path])\n os.environ['PATH'] = path\n return cmd\n\n def get_build_temp_dir(self):\n \"\"\"\n Return a path to a temporary directory where temporary files should be\n placed.\n \"\"\"\n cmd = get_cmd('build')\n cmd.ensure_finalized()\n return cmd.build_temp\n\n def have_f77c(self):\n \"\"\"Check for availability of Fortran 77 compiler.\n\n Use it inside source generating function to ensure that\n setup distribution instance has been initialized.\n\n Notes\n -----\n True if a Fortran 77 compiler is available (because a simple Fortran 77\n code was able to be compiled successfully).\n \"\"\"\n simple_fortran_subroutine = '''\n subroutine simple\n end\n '''\n config_cmd = self.get_config_cmd()\n flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')\n return flag\n\n def have_f90c(self):\n \"\"\"Check for availability of Fortran 90 compiler.\n\n Use it inside source generating function to ensure that\n setup distribution instance has been initialized.\n\n Notes\n -----\n True if a Fortran 90 compiler is available (because a simple Fortran\n 90 code was able to be compiled successfully)\n \"\"\"\n simple_fortran_subroutine = '''\n subroutine simple\n end\n '''\n config_cmd = self.get_config_cmd()\n flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')\n return flag\n\n def append_to(self, extlib):\n \"\"\"Append libraries, include_dirs to extension or library item.\n \"\"\"\n if is_sequence(extlib):\n lib_name, build_info = extlib\n dict_append(build_info,\n libraries=self.libraries,\n include_dirs=self.include_dirs)\n else:\n from numpy.distutils.core import Extension\n assert isinstance(extlib, Extension), repr(extlib)\n extlib.libraries.extend(self.libraries)\n extlib.include_dirs.extend(self.include_dirs)\n\n def _get_svn_revision(self, path):\n \"\"\"Return path's SVN revision number.\n \"\"\"\n revision = None\n m = None\n cwd = os.getcwd()\n try:\n os.chdir(path or '.')\n p = subprocess.Popen(['svnversion'], shell=True,\n stdout=subprocess.PIPE, stderr=None,\n close_fds=True)\n sout = p.stdout\n m = re.match(r'(?P<revision>\\d+)', sout.read())\n except:\n pass\n os.chdir(cwd)\n if m:\n revision = int(m.group('revision'))\n return revision\n if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):\n entries = njoin(path, '_svn', 'entries')\n else:\n entries = njoin(path, '.svn', 'entries')\n if os.path.isfile(entries):\n f = open(entries)\n fstr = f.read()\n f.close()\n if fstr[:5] == '<?xml': # pre 1.4\n m = re.search(r'revision=\"(?P<revision>\\d+)\"', fstr)\n if m:\n revision = int(m.group('revision'))\n else: # non-xml entries file --- check to be sure that\n m = re.search(r'dir[\\n\\r]+(?P<revision>\\d+)', fstr)\n if m:\n revision = int(m.group('revision'))\n return revision\n\n def _get_hg_revision(self, path):\n \"\"\"Return path's Mercurial revision number.\n \"\"\"\n revision = None\n m = None\n cwd = os.getcwd()\n try:\n os.chdir(path or '.')\n p = subprocess.Popen(['hg identify --num'], shell=True,\n stdout=subprocess.PIPE, stderr=None,\n close_fds=True)\n sout = p.stdout\n m = re.match(r'(?P<revision>\\d+)', sout.read())\n except:\n pass\n os.chdir(cwd)\n if m:\n revision = int(m.group('revision'))\n return revision\n branch_fn = njoin(path, '.hg', 'branch')\n branch_cache_fn = njoin(path, '.hg', 'branch.cache')\n\n if os.path.isfile(branch_fn):\n branch0 = None\n f = open(branch_fn)\n revision0 = f.read().strip()\n f.close()\n\n branch_map = {}\n for line in file(branch_cache_fn, 'r'):\n branch1, revision1 = line.split()[:2]\n if revision1==revision0:\n branch0 = branch1\n try:\n revision1 = int(revision1)\n except ValueError:\n continue\n branch_map[branch1] = revision1\n\n revision = branch_map.get(branch0)\n return revision\n\n\n def get_version(self, version_file=None, version_variable=None):\n \"\"\"Try to get version string of a package.\n\n Return a version string of the current package or None if the version\n information could not be detected.\n\n Notes\n -----\n This method scans files named\n __version__.py, <packagename>_version.py, version.py, and\n __svn_version__.py for string variables version, __version\\__, and\n <packagename>_version, until a version number is found.\n \"\"\"\n version = getattr(self, 'version', None)\n if version is not None:\n return version\n\n # Get version from version file.\n if version_file is None:\n files = ['__version__.py',\n self.name.split('.')[-1]+'_version.py',\n 'version.py',\n '__svn_version__.py',\n '__hg_version__.py']\n else:\n files = [version_file]\n if version_variable is None:\n version_vars = ['version',\n '__version__',\n self.name.split('.')[-1]+'_version']\n else:\n version_vars = [version_variable]\n for f in files:\n fn = njoin(self.local_path, f)\n if os.path.isfile(fn):\n info = (open(fn), fn, ('.py', 'U', 1))\n name = os.path.splitext(os.path.basename(fn))[0]\n n = dot_join(self.name, name)\n try:\n version_module = imp.load_module('_'.join(n.split('.')),*info)\n except ImportError:\n msg = get_exception()\n self.warn(str(msg))\n version_module = None\n if version_module is None:\n continue\n\n for a in version_vars:\n version = getattr(version_module, a, None)\n if version is not None:\n break\n if version is not None:\n break\n\n if version is not None:\n self.version = version\n return version\n\n # Get version as SVN or Mercurial revision number\n revision = self._get_svn_revision(self.local_path)\n if revision is None:\n revision = self._get_hg_revision(self.local_path)\n\n if revision is not None:\n version = str(revision)\n self.version = version\n\n return version\n\n def make_svn_version_py(self, delete=True):\n \"\"\"Appends a data function to the data_files list that will generate\n __svn_version__.py file to the current package directory.\n\n Generate package __svn_version__.py file from SVN revision number,\n it will be removed after python exits but will be available\n when sdist, etc commands are executed.\n\n Notes\n -----\n If __svn_version__.py existed before, nothing is done.\n\n This is\n intended for working with source directories that are in an SVN\n repository.\n \"\"\"\n target = njoin(self.local_path, '__svn_version__.py')\n revision = self._get_svn_revision(self.local_path)\n if os.path.isfile(target) or revision is None:\n return\n else:\n def generate_svn_version_py():\n if not os.path.isfile(target):\n version = str(revision)\n self.info('Creating %s (version=%r)' % (target, version))\n f = open(target, 'w')\n f.write('version = %r\\n' % (version))\n f.close()\n\n import atexit\n def rm_file(f=target,p=self.info):\n if delete:\n try: os.remove(f); p('removed '+f)\n except OSError: pass\n try: os.remove(f+'c'); p('removed '+f+'c')\n except OSError: pass\n\n atexit.register(rm_file)\n\n return target\n\n self.add_data_files(('', generate_svn_version_py()))\n\n def make_hg_version_py(self, delete=True):\n \"\"\"Appends a data function to the data_files list that will generate\n __hg_version__.py file to the current package directory.\n\n Generate package __hg_version__.py file from Mercurial revision,\n it will be removed after python exits but will be available\n when sdist, etc commands are executed.\n\n Notes\n -----\n If __hg_version__.py existed before, nothing is done.\n\n This is intended for working with source directories that are\n in an Mercurial repository.\n \"\"\"\n target = njoin(self.local_path, '__hg_version__.py')\n revision = self._get_hg_revision(self.local_path)\n if os.path.isfile(target) or revision is None:\n return\n else:\n def generate_hg_version_py():\n if not os.path.isfile(target):\n version = str(revision)\n self.info('Creating %s (version=%r)' % (target, version))\n f = open(target, 'w')\n f.write('version = %r\\n' % (version))\n f.close()\n\n import atexit\n def rm_file(f=target,p=self.info):\n if delete:\n try: os.remove(f); p('removed '+f)\n except OSError: pass\n try: os.remove(f+'c'); p('removed '+f+'c')\n except OSError: pass\n\n atexit.register(rm_file)\n\n return target\n\n self.add_data_files(('', generate_hg_version_py()))\n\n def make_config_py(self,name='__config__'):\n \"\"\"Generate package __config__.py file containing system_info\n information used during building the package.\n\n This file is installed to the\n package installation directory.\n\n \"\"\"\n self.py_modules.append((self.name, name, generate_config_py))\n\n\n def get_info(self,*names):\n \"\"\"Get resources information.\n\n Return information (from system_info.get_info) for all of the names in\n the argument list in a single dictionary.\n \"\"\"\n from .system_info import get_info, dict_append\n info_dict = {}\n for a in names:\n dict_append(info_dict,**get_info(a))\n return info_dict\n\n\ndef get_cmd(cmdname, _cache={}):\n if cmdname not in _cache:\n import distutils.core\n dist = distutils.core._setup_distribution\n if dist is None:\n from distutils.errors import DistutilsInternalError\n raise DistutilsInternalError(\n 'setup distribution instance not initialized')\n cmd = dist.get_command_obj(cmdname)\n _cache[cmdname] = cmd\n return _cache[cmdname]\n\ndef get_numpy_include_dirs():\n # numpy_include_dirs are set by numpy/core/setup.py, otherwise []\n include_dirs = Configuration.numpy_include_dirs[:]\n if not include_dirs:\n import numpy\n include_dirs = [ numpy.get_include() ]\n # else running numpy/core/setup.py\n return include_dirs\n\ndef get_npy_pkg_dir():\n \"\"\"Return the path where to find the npy-pkg-config directory.\"\"\"\n # XXX: import here for bootstrapping reasons\n import numpy\n d = os.path.join(os.path.dirname(numpy.__file__),\n 'core', 'lib', 'npy-pkg-config')\n return d\n\ndef get_pkg_info(pkgname, dirs=None):\n \"\"\"\n Return library info for the given package.\n\n Parameters\n ----------\n pkgname : str\n Name of the package (should match the name of the .ini file, without\n the extension, e.g. foo for the file foo.ini).\n dirs : sequence, optional\n If given, should be a sequence of additional directories where to look\n for npy-pkg-config files. Those directories are searched prior to the\n NumPy directory.\n\n Returns\n -------\n pkginfo : class instance\n The `LibraryInfo` instance containing the build information.\n\n Raises\n ------\n PkgNotFound\n If the package is not found.\n\n See Also\n --------\n Configuration.add_npy_pkg_config, Configuration.add_installed_library,\n get_info\n\n \"\"\"\n from numpy.distutils.npy_pkg_config import read_config\n\n if dirs:\n dirs.append(get_npy_pkg_dir())\n else:\n dirs = [get_npy_pkg_dir()]\n return read_config(pkgname, dirs)\n\ndef get_info(pkgname, dirs=None):\n \"\"\"\n Return an info dict for a given C library.\n\n The info dict contains the necessary options to use the C library.\n\n Parameters\n ----------\n pkgname : str\n Name of the package (should match the name of the .ini file, without\n the extension, e.g. foo for the file foo.ini).\n dirs : sequence, optional\n If given, should be a sequence of additional directories where to look\n for npy-pkg-config files. Those directories are searched prior to the\n NumPy directory.\n\n Returns\n -------\n info : dict\n The dictionary with build information.\n\n Raises\n ------\n PkgNotFound\n If the package is not found.\n\n See Also\n --------\n Configuration.add_npy_pkg_config, Configuration.add_installed_library,\n get_pkg_info\n\n Examples\n --------\n To get the necessary information for the npymath library from NumPy:\n\n >>> npymath_info = np.distutils.misc_util.get_info('npymath')\n >>> npymath_info #doctest: +SKIP\n {'define_macros': [], 'libraries': ['npymath'], 'library_dirs':\n ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}\n\n This info dict can then be used as input to a `Configuration` instance::\n\n config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)\n\n \"\"\"\n from numpy.distutils.npy_pkg_config import parse_flags\n pkg_info = get_pkg_info(pkgname, dirs)\n\n # Translate LibraryInfo instance into a build_info dict\n info = parse_flags(pkg_info.cflags())\n for k, v in parse_flags(pkg_info.libs()).items():\n info[k].extend(v)\n\n # add_extension extra_info argument is ANAL\n info['define_macros'] = info['macros']\n del info['macros']\n del info['ignored']\n\n return info\n\ndef is_bootstrapping():\n if sys.version_info[0] >= 3:\n import builtins\n else:\n import __builtin__ as builtins\n\n try:\n builtins.__NUMPY_SETUP__\n return True\n except AttributeError:\n return False\n __NUMPY_SETUP__ = False\n\n\n#########################\n\ndef default_config_dict(name = None, parent_name = None, local_path=None):\n \"\"\"Return a configuration dictionary for usage in\n configuration() function defined in file setup_<name>.py.\n \"\"\"\n import warnings\n warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\\\n 'deprecated default_config_dict(%r,%r,%r)'\n % (name, parent_name, local_path,\n name, parent_name, local_path,\n ))\n c = Configuration(name, parent_name, local_path)\n return c.todict()\n\n\ndef dict_append(d, **kws):\n for k, v in kws.items():\n if k in d:\n ov = d[k]\n if isinstance(ov, str):\n d[k] = v\n else:\n d[k].extend(v)\n else:\n d[k] = v\n\ndef appendpath(prefix, path):\n if os.path.sep != '/':\n prefix = prefix.replace('/', os.path.sep)\n path = path.replace('/', os.path.sep)\n drive = ''\n if os.path.isabs(path):\n drive = os.path.splitdrive(prefix)[0]\n absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]\n pathdrive, path = os.path.splitdrive(path)\n d = os.path.commonprefix([absprefix, path])\n if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \\\n or os.path.join(path[:len(d)], path[len(d):]) != path:\n # Handle invalid paths\n d = os.path.dirname(d)\n subpath = path[len(d):]\n if os.path.isabs(subpath):\n subpath = subpath[1:]\n else:\n subpath = path\n return os.path.normpath(njoin(drive + prefix, subpath))\n\ndef generate_config_py(target):\n \"\"\"Generate config.py file containing system_info information\n used during building the package.\n\n Usage:\n config['py_modules'].append((packagename, '__config__',generate_config_py))\n \"\"\"\n from numpy.distutils.system_info import system_info\n from distutils.dir_util import mkpath\n mkpath(os.path.dirname(target))\n f = open(target, 'w')\n f.write('# This file is generated by %s\\n' % (os.path.abspath(sys.argv[0])))\n f.write('# It contains system_info results at the time of building this package.\\n')\n f.write('__all__ = [\"get_info\",\"show\"]\\n\\n')\n for k, i in system_info.saved_results.items():\n f.write('%s=%r\\n' % (k, i))\n f.write(r'''\ndef get_info(name):\n g = globals()\n return g.get(name, g.get(name + \"_info\", {}))\n\ndef show():\n for name,info_dict in globals().items():\n if name[0] == \"_\" or type(info_dict) is not type({}): continue\n print(name + \":\")\n if not info_dict:\n print(\" NOT AVAILABLE\")\n for k,v in info_dict.items():\n v = str(v)\n if k == \"sources\" and len(v) > 200:\n v = v[:60] + \" ...\\n... \" + v[-60:]\n print(\" %s = %s\" % (k,v))\n ''')\n\n f.close()\n return target\n\ndef msvc_version(compiler):\n \"\"\"Return version major and minor of compiler instance if it is\n MSVC, raise an exception otherwise.\"\"\"\n if not compiler.compiler_type == \"msvc\":\n raise ValueError(\"Compiler instance is not msvc (%s)\"\\\n % compiler.compiler_type)\n return compiler._MSVCCompiler__version\n\nif sys.version[:3] >= '2.5':\n def get_build_architecture():\n from distutils.msvccompiler import get_build_architecture\n return get_build_architecture()\nelse:\n #copied from python 2.5.1 distutils/msvccompiler.py\n def get_build_architecture():\n \"\"\"Return the processor architecture.\n\n Possible results are \"Intel\", \"Itanium\", or \"AMD64\".\n \"\"\"\n prefix = \" bit (\"\n i = sys.version.find(prefix)\n if i == -1:\n return \"Intel\"\n j = sys.version.find(\")\", i)\n return sys.version[i+len(prefix):j]\n" ]
[ [ "numpy.distutils.system_info.system_info.saved_results.items", "numpy.numarray.util.get_numarray_include_dirs", "numpy.distutils.compat.get_exception", "numpy.get_include", "numpy.distutils.core.get_distribution", "numpy.distutils.core.Extension", "numpy.distutils.npy_pkg_config.read_config" ] ]
VEDANTGHODKE/Swayatta-Autonomous-Driver-Assistance-System-ADAS-For-Indian-Environments
[ "7f0361c0f52e4e7623d975725497648cf582f36f" ]
[ "Swayatta - Autonomous Car Follower System/src/synchronous_mode.py" ]
[ "#!/usr/bin/env python\n\n# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de\n# Barcelona (UAB).\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\nimport glob\nimport os\nimport sys\nfrom CarDetector import CarDetector\nfrom DrivingControl import DrivingControl\nfrom VizualizeDrivingPath import VizualizeDrivingPath\nfrom PurePursuitAlgorithm import PurePursuitAlgorithm\nfrom SemanticSegmentation import SemanticSegmentation\nfrom DrivingControlAdvanced import DrivingControlAdvanced\nimport math\nimport pickle\n\ntry:\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n\nimport carla\n\nimport random\n\n# try:\nimport pygame\n# except ImportError:\n# raise RuntimeError('cannot import pygame, make sure pygame package is installed')\n\n# try:\nimport numpy as np\n# except ImportError:\n# raise RuntimeError('cannot import numpy, make sure numpy package is installed')\n\n# try:\nimport queue\n# except ImportError:\n# import Queue as queue\n\n\nclass CarlaSyncMode(object):\n \"\"\"\n Context manager to synchronize output from different sensors. Synchronous\n mode is enabled as long as we are inside this context\n\n with CarlaSyncMode(world, sensors) as sync_mode:\n while True:\n data = sync_mode.tick(timeout=1.0)\n\n \"\"\"\n\n def __init__(self, world, *sensors, **kwargs):\n self.world = world\n self.sensors = sensors\n self.frame = None\n self.delta_seconds = 1.0 / kwargs.get('fps', 20)\n self._queues = []\n self._settings = None\n\n def __enter__(self):\n self._settings = self.world.get_settings()\n self.frame = self.world.apply_settings(carla.WorldSettings(\n no_rendering_mode=False,\n synchronous_mode=True,\n fixed_delta_seconds=self.delta_seconds))\n\n def make_queue(register_event):\n q = queue.Queue()\n register_event(q.put)\n self._queues.append(q)\n\n make_queue(self.world.on_tick)\n for sensor in self.sensors:\n make_queue(sensor.listen)\n return self\n\n def tick(self, timeout):\n self.frame = self.world.tick()\n data = [self._retrieve_data(q, timeout) for q in self._queues]\n assert all(x.frame == self.frame for x in data)\n return data\n\n def __exit__(self, *args, **kwargs):\n self.world.apply_settings(self._settings)\n\n def _retrieve_data(self, sensor_queue, timeout):\n while True:\n data = sensor_queue.get(timeout=timeout)\n if data.frame == self.frame:\n return data\n\n\ndef BresenhamLine(x0,y0, x1,y1):\n if x0 > x1:\n tmpX = x1\n tmpY = y1\n x1 = x0\n x0 = tmpX\n y1 = y0\n y0 = tmpY\n\n coords = []\n dx = x1 - x0\n dy = abs(y1 - y0)\n D = 2*dy - dx\n y = y0\n\n for x in range(x0,x1+1):\n coords.append([x,y])\n if D > 0:\n y = y + (1 if y1 >= y0 else -1)\n D = D - 2*dx\n D = D + 2*dy\n return coords\n\nimport os\ndef myPrint(angle,predicted_angle, possibleAngle,real_dist, predicted_distance, chaseMode=True):\n return\n os.system('clear')\n if chaseMode == True:\n print('----- Chase mode -----')\n else:\n print('----- Follow mode -----')\n if chaseMode == False:\n print('The predicted angle is between the chasing car and some point in the trajectory.')\n print('Real angle:',angle)\n print('Predicted angle:',predicted_angle)\n print('Possible angle:',possibleAngle)\n print('Real distance:',real_dist)\n print('Predicted distance:',predicted_distance)\n\nimport imageio\nfrom copy import deepcopy\ndef draw_image(surface, image, image2,location1, location2, blend=False, record=False,driveName='',smazat=[]):\n if False:#image2.frame%5 == 0:\n # coords1 = BresenhamLine(0,image2.height-1,image2.width//2,image2.height//2)\n # coords2 = BresenhamLine(image2.width - 1, image2.height - 1, image2.width // 2, image2.height // 2)\n # print(coords2)\n # print(len(coords1),len(coords2))\n\n array = np.frombuffer(image2.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image2.height, image2.width, 4))\n # array = np.reshape(array, (image2.width, image2.height, 4))\n array = array[:, :, :3]\n # array = array[:, :, ::-1]\n arr = deepcopy(array)\n arr = np.array(arr,dtype=int)\n for i in range(len(array)):\n for j in range(len(array[i])):\n if array[i][j][2] == 7 or array[i][j][2] == 6:\n arr[i][j][0] = 0\n arr[i][j][1] = 255\n arr[i][j][2] = 43\n # for i in range(len(coords1)):\n # arr[coords1[i][1]][coords1[i][0]][0] = 255\n # arr[coords1[i][1]][coords1[i][0]][1] = 0\n # arr[coords1[i][1]][coords1[i][0]][2] = 0\n #\n # for i in range(len(coords2)):\n # arr[coords2[i][1]][coords2[i][0]][0] = 255\n # arr[coords2[i][1]][coords2[i][0]][1] = 0\n # arr[coords2[i][1]][coords2[i][0]][2] = 0\n\n for i in range(len(smazat)):\n arr[smazat[i][1]][smazat[i][0]][0] = 255\n arr[smazat[i][1]][smazat[i][0]][1] = 0\n arr[smazat[i][1]][smazat[i][0]][2] = 0\n\n dirName = os.path.join('test')\n if not os.path.exists(dirName):\n os.mkdir(dirName)\n filename = dirName + '/' + str(image2.frame) + '.png'\n imageio.imwrite(filename, arr)\n # image2.save_to_disk(dirName + '/%07d' % image2.frame)\n if image.frame % 10 == 0:#record:#image.frame % 10 == 0:\n driveName = driveName.split('/')[1]\n dirName = os.path.join('output',driveName)\n if not os.path.exists(dirName):\n os.mkdir(dirName)\n image.save_to_disk(dirName+'/%07d' % image.frame)#_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f.png' % (image.frame,location1.location.x,location1.location.y,location1.location.z, location1.rotation.pitch,location1.rotation.yaw, location1.rotation.roll,location2.location.x,location2.location.y,location2.location.z, location2.rotation.pitch,location2.rotation.yaw, location2.rotation.roll ))\n #image2.save_to_disk('output2/%07d_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f.png' % (image2.frame,location1.location.x,location1.location.y,location1.location.z, location1.rotation.pitch,location1.rotation.yaw, location1.rotation.roll\n # ,location2.location.x,location2.location.y,location2.location.z, location2.rotation.pitch,location2.rotation.yaw, location2.rotation.roll ))\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if blend:\n image_surface.set_alpha(100)\n surface.blit(image_surface, (0, 0))\n\n\ndef get_font():\n fonts = [x for x in pygame.font.get_fonts()]\n default_font = 'ubuntumono'\n font = default_font if default_font in fonts else fonts[0]\n font = pygame.font.match_font(font)\n return pygame.font.Font(font, 14)\n\n\ndef should_quit():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_ESCAPE:\n return True\n return False\n\n# try:\nimport pygame\nfrom pygame.locals import KMOD_CTRL\nfrom pygame.locals import KMOD_SHIFT\nfrom pygame.locals import K_0\nfrom pygame.locals import K_9\nfrom pygame.locals import K_BACKQUOTE\nfrom pygame.locals import K_BACKSPACE\nfrom pygame.locals import K_COMMA\nfrom pygame.locals import K_DOWN\nfrom pygame.locals import K_ESCAPE\nfrom pygame.locals import K_F1\nfrom pygame.locals import K_LEFT\nfrom pygame.locals import K_PERIOD\nfrom pygame.locals import K_RIGHT\nfrom pygame.locals import K_SLASH\nfrom pygame.locals import K_SPACE\nfrom pygame.locals import K_TAB\nfrom pygame.locals import K_UP\nfrom pygame.locals import K_a\nfrom pygame.locals import K_c\nfrom pygame.locals import K_d\nfrom pygame.locals import K_h\nfrom pygame.locals import K_m\nfrom pygame.locals import K_p\nfrom pygame.locals import K_q\nfrom pygame.locals import K_r\nfrom pygame.locals import K_s\nfrom pygame.locals import K_w\nfrom pygame.locals import K_MINUS\nfrom pygame.locals import K_EQUALS\n\nclass ManualControl(object):\n def __init__(self,filename,name=''):\n self.history = []\n self._control = carla.VehicleControl()\n self._steer_cache = 0.0\n self.outputDir = 'chaseOutput'+name\n self.fileName = filename.split('/')[-1]\n if self.fileName == '':\n self.fileName = 'test.p'\n self.startRecording = False\n\n def _parse_vehicle_keys(self, keys, milliseconds):\n self._control.throttle = 1.0 if keys[K_UP] or keys[K_w] else 0.0\n self._control.reverse=False\n if keys[K_DOWN] or keys[K_s]:\n self._control.throttle = 1\n self._control.reverse=True\n steer_increment = 8e-4 * milliseconds\n if keys[K_LEFT] or keys[K_a]:\n self._steer_cache -= steer_increment\n elif keys[K_RIGHT] or keys[K_d]:\n self._steer_cache += steer_increment\n else:\n self._steer_cache = 0.0\n self._steer_cache = min(0.5, max(-0.5, self._steer_cache))\n self._control.steer = round(self._steer_cache, 1)\n self._control.hand_brake = keys[K_SPACE]\n\n if keys[K_r]:\n self.startRecording = True\n \n def Run(self):\n clock = pygame.time.Clock()\n while True:\n clock.tick_busy_loop(30)\n #pygame.event.get()\n self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())\n self.vehicle.apply_control(self._control)\n pygame.display.flip()\n\n def SaveCarPosition(self,location):\n if self.startRecording:\n self.history.append([location.location.x, location.location.y, location.location.z, location.rotation.pitch, location.rotation.yaw, location.rotation.roll])\n\n def SaveHistoryToFile(self):\n if not os.path.exists(self.outputDir):\n os.mkdir(self.outputDir)\n if len(self.history) > 0:\n pickle.dump(self.history, open(os.path.join(self.outputDir,self.fileName), \"wb\"))\n\n\nclass Evaluation():\n def __init__(self):\n self.sumMAE = 0\n self.sumRMSE = 0\n self.n_of_frames = 0\n self.n_of_collisions = 0\n self.history = []\n\n def AddError(self, distance, goalDistance):\n self.n_of_frames += 1\n self.sumMAE += abs(goalDistance-distance)\n self.sumRMSE += abs(goalDistance-distance)*abs(goalDistance-distance)\n\n def WriteIntoFileFinal(self, filename, driveName):\n if self.n_of_frames > 0:\n self.sumMAE = self.sumMAE / float(self.n_of_frames)\n self.sumRMSE = self.sumRMSE / float(self.n_of_frames)\n\n with open(filename,'a') as f:\n f.write(str(driveName)+', '+str(self.sumMAE)+', '+str(self.sumRMSE)+', '+str(self.n_of_collisions)+'\\n')\n\n def LoadHistoryFromFile(self, fileName):\n self.history = pickle.load( open(fileName, \"rb\"))\n\n def CollisionHandler(self,event):\n self.n_of_collisions += 1\n\ndef DrawDrivable(indexes, w, h, display):\n if len(indexes) != 0:\n BB_COLOR = (11, 102, 35)\n for i in range(10):\n for j in range(10):\n if indexes[i*10+j] == 1:\n pygame.draw.line(display, BB_COLOR, (j*w,i*h) , (j*w+w,i*h))\n pygame.draw.line(display, BB_COLOR, (j*w,i*h), (j*w,i*h+h))\n pygame.draw.line(display, BB_COLOR, (j*w+w,i*h), (j*w+w,i*h+h))\n pygame.draw.line(display, BB_COLOR, (j*w,i*h+h), (j*w+w,i*h+h))\n\nimport copy\ndef main(optimalDistance, followDrivenPath, chaseMode, evaluateChasingCar, driveName='',record=False, followMode=False,\n resultsName='results',P=None,I=None,D=None,nOfFramesToSkip=0):\n counter = 1\n\n actor_list = []\n pygame.init()\n\n carDetector = CarDetector()\n drivingControl = DrivingControl(optimalDistance=optimalDistance)\n if P!=None:\n drivingControlAdvanced = DrivingControlAdvanced(optimalDistance=optimalDistance,P=P,I=I,D=D)\n else:\n drivingControlAdvanced = DrivingControlAdvanced(optimalDistance=optimalDistance)\n visualisation = VizualizeDrivingPath()\n myControl = ManualControl(driveName,name=str(nOfFramesToSkip))\n myControl.startRecording = True\n advanced = False\n extrapolate = True\n\n evaluation = Evaluation()\n semantic = SemanticSegmentation()\n\n lookAheadDistance = 5\n purePursuit = PurePursuitAlgorithm(lookAheadDistance=lookAheadDistance)\n\n display = pygame.display.set_mode(\n (800, 600),\n pygame.HWSURFACE | pygame.DOUBLEBUF)\n font = get_font()\n clock = pygame.time.Clock()\n\n client = carla.Client('localhost', 2000)\n client.set_timeout(2.0)\n\n world = client.get_world()\n\n vehicleToFollowSpawned = False\n\n\n\n try:\n # if True:\n m = world.get_map()\n # if not followDrivenPath:\n start_pose = random.choice(m.get_spawn_points())\n # else:\n # first = evaluation.history[0]\n # print(first)\n # start_pose = random.choice(m.get_spawn_points())\n # start_pose = carla.Transform(carla.Location(first[0],first[1],first[2]),carla.Rotation(first[3],first[4],first[5]))\n # print('Start pose:',start_pose)\n\n blueprint_library = world.get_blueprint_library()\n \n vehicle = world.spawn_actor(\n random.choice(blueprint_library.filter('jeep')),\n start_pose)\n actor_list.append(vehicle)\n vehicle.set_simulate_physics(True)\n if followDrivenPath:\n evaluation.LoadHistoryFromFile(driveName)\n first = evaluation.history[0]\n start_pose = carla.Transform(carla.Location(first[0], first[1], first[2]),\n carla.Rotation(first[3], first[4], first[5]))\n vehicle.set_transform(start_pose)\n\n collision_sensor = world.spawn_actor(blueprint_library.find('sensor.other.collision'),\n carla.Transform(), attach_to=vehicle)\n\n collision_sensor.listen(lambda event: evaluation.CollisionHandler(event))\n actor_list.append(collision_sensor)\n\n # Find the blueprint of the sensor.\n blueprint = world.get_blueprint_library().find('sensor.camera.rgb')\n # Modify the attributes of the blueprint to set image resolution and field of view.\n blueprint.set_attribute('image_size_x', '800')\n blueprint.set_attribute('image_size_y', '600')\n blueprint.set_attribute('fov', '90')\n\n camera_rgb = world.spawn_actor(\n blueprint_library.find('sensor.camera.rgb'),\n carla.Transform(carla.Location(x=1.5, z=1.4,y=0.3), carla.Rotation(pitch=0)), #5,3,0 # -0.3\n attach_to=vehicle)\n actor_list.append(camera_rgb)\n\n camera_rgb2 = world.spawn_actor(\n blueprint_library.find('sensor.camera.rgb'),\n carla.Transform(carla.Location(x=1.5, z=1.4,y=-0.3), carla.Rotation(pitch=0))) #x=-5.5, z=4.4,y=0\n #attach_to=vehicle)\n actor_list.append(camera_rgb2)\n\n camera_segmentation = world.spawn_actor(\n blueprint_library.find('sensor.camera.semantic_segmentation'),\n carla.Transform(carla.Location(x=1.5, z=1.4,y=0), carla.Rotation(pitch=0)), #5,3,0 # -0.3\n attach_to=vehicle)\n actor_list.append(camera_segmentation)\n \n\n # Create a synchronous mode context.\n with CarlaSyncMode(world,camera_rgb, camera_rgb2, camera_segmentation, fps=30) as sync_mode:\n\n while True:\n if should_quit():\n return\n clock.tick(30)\n\n # Advance the simulation and wait for the data.\n snapshot, img_rgb, image_rgb2, image_segmentation = sync_mode.tick(timeout=2.0)\n\n line = []\n \n if not vehicleToFollowSpawned and not followDrivenPath:\n vehicleToFollowSpawned = True\n start_pose2 = carla.Transform()\n start_pose2.rotation = start_pose.rotation\n\n start_pose2.location.x = start_pose.location.x\n start_pose2.location.y = start_pose.location.y\n start_pose2.location.z = start_pose.location.z\n\n location1 = vehicle.get_transform()\n rotation1 = location1.rotation\n print(rotation1.yaw,abs(rotation1.yaw))\n if abs(rotation1.yaw - 180.0) < 45.0 or abs(rotation1.yaw + 180.0) < 45.0:\n print('1')\n start_pose2.location.x = start_pose.location.x - 5\n elif abs(rotation1.yaw) < 45.0:\n print('2')\n start_pose2.location.x = start_pose.location.x + 5\n elif abs(rotation1.yaw + 90.0) < 45.0:\n print('3')\n start_pose2.location.y = start_pose.location.y - 5\n elif abs(rotation1.yaw - 90.0) < 45.0:\n print('4')\n start_pose2.location.y = start_pose.location.y + 5\n\n bp = blueprint_library.filter('model3')[0]\n\n bp.set_attribute('color', '0,101,189')\n vehicleToFollow = world.spawn_actor(\n bp,\n start_pose2)\n\n actor_list.append(vehicleToFollow)\n vehicleToFollow.set_simulate_physics(True)\n vehicleToFollow.set_autopilot(True)\n elif not vehicleToFollowSpawned and followDrivenPath:\n vehicleToFollowSpawned = True\n location1 = vehicle.get_transform()\n newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,\n location1.rotation.yaw)\n diffX = newX - location1.location.x\n diffY = newY - location1.location.y\n newX = location1.location.x - (diffX*5)\n newY = location1.location.y - (diffY*5)\n\n start_pose.location.x = newX\n start_pose.location.y = newY\n\n vehicle.set_transform(start_pose)\n\n start_pose2 = random.choice(m.get_spawn_points())\n\n bp = blueprint_library.filter('model3')[0]\n bp.set_attribute('color', '0,101,189')\n vehicleToFollow = world.spawn_actor(\n bp,\n start_pose2)\n\n start_pose2 = carla.Transform()\n start_pose2.rotation = start_pose.rotation\n\n start_pose2.location.x = start_pose.location.x\n start_pose2.location.y = start_pose.location.y\n start_pose2.location.z = start_pose.location.z\n\n vehicleToFollow.set_transform(start_pose2)\n\n actor_list.append(vehicleToFollow)\n vehicleToFollow.set_simulate_physics(True)\n vehicleToFollow.set_autopilot(False)\n\n if followDrivenPath:\n if counter >= len(evaluation.history):\n break\n tmp = evaluation.history[counter]\n currentPos = carla.Transform(carla.Location(tmp[0],tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5]))\n vehicleToFollow.set_transform(currentPos)\n counter += 1\n\n fps = round(1.0 / snapshot.timestamp.delta_seconds)\n\n # manual control\n if not followDrivenPath:\n myControl._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())\n vehicle.apply_control(myControl._control)\n\n\n location1 = vehicle.get_transform()\n location2 = vehicleToFollow.get_transform()\n\n myControl.SaveCarPosition(location1)\n newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,location1.rotation.yaw)\n angle = carDetector.getAngle([location1.location.x, location1.location.y], [newX, newY],\n [location2.location.x, location2.location.y])\n\n possibleAngle = 0\n drivableIndexes = []\n bbox = []\n if chaseMode:\n carInTheImage = semantic.IsThereACarInThePicture(image_segmentation)\n bbox, predicted_distance,predicted_angle = carDetector.getDistance(vehicleToFollow, camera_rgb,carInTheImage,extrapolation=extrapolate,nOfFramesToSkip=nOfFramesToSkip)\n\n if advanced:\n possibleAngle, drivableIndexes = semantic.FindPossibleAngle(image_segmentation,bbox,predicted_angle)\n\n steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle(predicted_distance, possibleAngle,None)\n else:\n steer, throttle = drivingControl.PredictSteerAndThrottle(predicted_distance,predicted_angle,None)\n\n # if followDrivenPath:\n vehicle.apply_control(carla.VehicleControl(throttle=throttle,steer=steer))\n\n if evaluateChasingCar:\n evaluation.AddError(location1.location.distance(location2.location),optimalDistance)\n elif followMode:\n angle = 0\n carInTheImage = semantic.IsThereACarInThePicture(image_segmentation)\n bbox, predicted_distance, predicted_angle = carDetector.getDistance(vehicleToFollow, camera_rgb,carInTheImage)\n purePursuit.AddPathPoint(location2.location.x,location2.location.y)\n newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,\n location1.rotation.yaw)\n targetX, targetY = purePursuit.GetNextPoint(location1.location.x,location1.location.y)\n predicted_angle = carDetector.getAngle([location1.location.x,location1.location.y],[newX,newY],[targetX,targetY])\n possibleAngle = predicted_angle\n steer, throttle = drivingControl.PredictSteerAndThrottle(predicted_distance,predicted_angle,None)\n\n # if followDrivenPath:\n vehicle.apply_control(carla.VehicleControl(throttle=throttle,steer=steer))\n if evaluateChasingCar:\n evaluation.AddError(location1.location.distance(location2.location),optimalDistance)\n\n velocity1 = vehicle.get_velocity()\n velocity2 = vehicleToFollow.get_velocity()\n\n visualisation.Add(velocity1,velocity2,location1.location.distance(location2.location), angle)\n\n\n draw_image(display, image_rgb2, image_segmentation,location1, location2,record=record,driveName=driveName,smazat=line)\n display.blit(\n font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)),\n (8, 10))\n display.blit(\n font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)),\n (8, 28))\n\n if len(bbox) != 0:\n points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)]\n BB_COLOR = (248, 64, 24)\n # draw lines\n # base\n pygame.draw.line(display, BB_COLOR, points[0], points[1])\n pygame.draw.line(display, BB_COLOR, points[1], points[2])\n pygame.draw.line(display, BB_COLOR, points[2], points[3])\n pygame.draw.line(display, BB_COLOR, points[3], points[0])\n # top\n pygame.draw.line(display, BB_COLOR, points[4], points[5])\n pygame.draw.line(display, BB_COLOR, points[5], points[6])\n pygame.draw.line(display, BB_COLOR, points[6], points[7])\n pygame.draw.line(display, BB_COLOR, points[7], points[4])\n # base-top\n pygame.draw.line(display, BB_COLOR, points[0], points[4])\n pygame.draw.line(display, BB_COLOR, points[1], points[5])\n pygame.draw.line(display, BB_COLOR, points[2], points[6])\n pygame.draw.line(display, BB_COLOR, points[3], points[7])\n DrawDrivable(drivableIndexes, image_segmentation.width // 10, image_segmentation.height // 10, display)\n\n real_dist = location1.location.distance(location2.location)\n if chaseMode or followMode:\n myPrint(angle,predicted_angle, possibleAngle,real_dist, predicted_distance,chaseMode)\n pygame.display.flip()\n except Exception as ex:\n print(ex)\n finally:\n print('Ending')\n if evaluateChasingCar:\n evaluation.WriteIntoFileFinal(os.path.join('res',resultsName+'.txt'),driveName=driveName)\n myControl.SaveHistoryToFile()\n print('destroying actors.')\n for actor in actor_list:\n actor.destroy()\n\n pygame.quit()\n print('done.')\n\nimport os\nif __name__ == '__main__':\n nOfFramesToSkip = 0\n try:\n # if True:\n optimalDistance = 8\n followDrivenPath = True\n evaluateChasingCar = True\n record = False\n chaseMode = True\n followMode = False\n\n drivesDir = 'drives'\n drivesFileNames = os.listdir(drivesDir)\n drivesFileNames.sort()\n\n # drivesFileNames = ['ride7.p']\n # drivesFileNames = ['ride1.p','ride2.p','ride3.p','ride4.p','ride5.p','ride6.p','ride7.p','ride8.p','ride9.p','ride10.p']\n # drivesFileNames = ['ride11.p', 'ride12.p', 'ride13.p', 'ride14.p', 'ride15.p', 'ride16.p', 'ride17.p', 'ride18.p','ride19.p', 'ride20.p']\n drivesFileNames = ['ride1.p','ride2.p','ride3.p','ride4.p','ride5.p','ride6.p','ride7.p','ride8.p','ride9.p','ride10.p',\n 'ride11.p', 'ride12.p', 'ride13.p', 'ride14.p', 'ride15.p', 'ride16.p', 'ride17.p', 'ride18.p','ride19.p', 'ride20.p']\n\n if evaluateChasingCar:\n for i in range(0, 101, 5):\n nOfFramesToSkip = i\n for fileName in drivesFileNames:\n main(optimalDistance=optimalDistance,followDrivenPath=followDrivenPath,chaseMode=chaseMode, evaluateChasingCar=evaluateChasingCar,driveName=os.path.join(drivesDir,fileName),record=record,followMode=followMode,nOfFramesToSkip=nOfFramesToSkip)\n os.rename('res/results.txt','chaseOutput'+str(nOfFramesToSkip)+'/results.txt')\n\n else:\n main(optimalDistance=optimalDistance, followDrivenPath=followDrivenPath, chaseMode=chaseMode, evaluateChasingCar=evaluateChasingCar,followMode=followMode)\n\n except Exception as ex:\n with open('problem.txt','a') as f:\n f.write(str(ex)+'\\n')\n # print('\\nCancelled by user. Bye!')\n" ]
[ [ "numpy.array", "numpy.dtype", "numpy.reshape" ] ]
lauromoraes/CapsNet-promoter
[ "9b08912648ff5d58a11ebb42225d9ad9851c61ac" ]
[ "teste_plot.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 1 19:35:08 2018\n\n@author: fnord\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n################################################################################# MCC\n\n#k = [0.8593737651, 0.8553389745, 0.7784318972, 0.9113220823, 0.8003214083, 0.8503145005, 0.8212621392, 0.8153599144, 0.8392105873, 0.7344867993]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.8914618592 , 0.8569780601 , 0.8909567193 , 0.9027168258 , 0.8788289546]),\n# 'Arabidopsis_tata' : np.array([0.945435053 , 0.965500981 , 0.950218117 , 0.959896776 , 0.955468017]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.83631664, 0.81214853, 0.84394381, 0.79153428, 0.79061714]),\n# 'Human_non_tata' : np.array([0.6829905720948718, 0.6806508241752528, 0.6771045795907203, 0.7131432179128229, 0.6889706432976804]),\n# 'Mouse_tata' : np.array([0.9262202846, 0.921279092, 0.9362495065, 0.9030511015, 0.8914067925]),\n# 'Mouse_non_tata' : np.array([0.8219135468, 0.8070214523, 0.8126171665, 0.8446262532, 0.8433015854]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Coeficiente de Matthews\")\n#plt.ylabel(\"MCC\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#plt.savefig('mcc.eps', format='eps', dpi=1000)\n\n####################################################################### F1\n\n#k = [0.89743590, 0.89473684, 0.83783784, 0.93506494, 0.85365854, 0.88888889, 0.86746988, 0.86486486, 0.88311688, 0.80000000]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.92863140, 0.90632506, 0.92845528, 0.93620547, 0.92058347]),\n# 'Arabidopsis_tata' : np.array([0.96416938, 0.97719870, 0.96732026, 0.97368421, 0.97068404]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.86956522, 0.85380117, 0.87861272, 0.83798883, 0.83720930]),\n# 'Human_non_tata' : np.array([0.8113842173350582, 0.8190294047334449, 0.816718712466971, 0.8360535931790499, 0.8244705882352941]),\n# 'Mouse_tata' : np.array([0.94573643, 0.94208494, 0.95312500, 0.92830189, 0.91935484]),\n# 'Mouse_non_tata' : np.array([0.89419994, 0.88541973, 0.88648982, 0.90741840, 0.90660321]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"F-score\")\n#plt.ylabel(\"F1\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#plt.savefig('f1.eps', format='eps', dpi=1000)\n\n####################################################################### Sn\n\n#k = [0.94594595, 0.91891892, 0.83783784, 0.97297297, 0.94594595, 0.86486486, 0.97297297, 0.86486486, 0.91891892, 0.75675676]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.93570220, 0.95769882, 0.96615905, 0.95600677, 0.96108291]),\n# 'Arabidopsis_tata' : np.array([0.98666667, 1.00000000, 0.98666667, 0.98666667, 0.99333333]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.83333333, 0.86904762, 0.90476190, 0.89285714, 0.85714286]),\n# 'Human_non_tata' : np.array([0.79151943, 0.86471479, 0.85815245, 0.86622918, 0.88440182]),\n# 'Mouse_tata' : np.array([0.96825397, 0.96825397, 0.96825397, 0.97619048, 0.90476190]),\n# 'Mouse_non_tata' : np.array([0.94226044, 0.92321867, 0.88267813, 0.93918919, 0.93611794]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Sensibilidade\")\n#plt.ylabel(\"Sn\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#plt.savefig('sn.eps', format='eps', dpi=1000)\n\n####################################################################### Sp\n\n#k = [0.94059406, 0.95049505, 0.94059406, 0.96039604, 0.90099010, 0.97029703, 0.90099010, 0.95049505, 0.94059406, 0.95049505]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.95898778, 0.91972077, 0.94066318, 0.95549738, 0.93455497]),\n# 'Arabidopsis_tata' : np.array([0.96875000, 0.97569444, 0.97222222, 0.97916667, 0.97222222]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.97666667, 0.95333333, 0.95666667, 0.93333333, 0.94666667]),\n# 'Human_non_tata' : np.array([0.88608508, 0.82372026, 0.82624369, 0.85291997, 0.81362653]),\n# 'Mouse_tata' : np.array([0.97167139, 0.96883853, 0.97733711, 0.95467422, 0.97733711]),\n# 'Mouse_non_tata' : np.array([0.89166331, 0.89367700, 0.92871526, 0.91421667, 0.91542489]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Especificidade\")\n#plt.ylabel(\"Sp\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#\n#plt.savefig('sp.eps', format='eps', dpi=1000)\n\n####################################################################### Acc\n\n#k = [0.94202899, 0.94202899, 0.91304348, 0.96376812, 0.91304348, 0.94202899, 0.92028986, 0.92753623, 0.93478261, 0.89855072]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.95106505, 0.93264249, 0.94933794, 0.95567070, 0.94358089]),\n# 'Arabidopsis_tata' : np.array([0.97488584, 0.98401826, 0.97716895, 0.98173516, 0.97945205]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.94531250, 0.93489583, 0.94531250, 0.92447917, 0.92708333]),\n# 'Human_non_tata' : np.array([0.84668770, 0.84079916, 0.83953733, 0.85846477, 0.84311251]),\n# 'Mouse_tata' : np.array([0.97077244, 0.96868476, 0.97494781, 0.96033403, 0.95824635]),\n# 'Mouse_non_tata' : np.array([0.91170032, 0.90537582, 0.91048407, 0.92410606, 0.92361956]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Acurácia\")\n#plt.ylabel(\"Acc\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#\n#plt.savefig('acc.eps', format='eps', dpi=1000)\n\n####################################################################### Prec\n\n#k = [0.85365854, 0.87179487, 0.83783784, 0.90000000, 0.77777778, 0.91428571, 0.78260870, 0.86486486, 0.85000000, 0.84848485]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.92166667, 0.86018237, 0.89358372, 0.91720779, 0.88335925]),\n# 'Arabidopsis_tata' : np.array([0.94267516, 0.95541401, 0.94871795, 0.96103896, 0.94904459]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.90909091, 0.83908046, 0.85393258, 0.78947368, 0.81818182]),\n# 'Human_non_tata' : np.array([0.83227176, 0.77792916, 0.77910174, 0.80790960, 0.77214632]),\n# 'Mouse_tata' : np.array([0.92424242, 0.91729323, 0.93846154, 0.88489209, 0.93442623]),\n# 'Mouse_non_tata' : np.array([0.85080422, 0.85059423, 0.89033457, 0.87772675, 0.87889273]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Precisão\")\n#plt.ylabel(\"Prec\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#\n#plt.savefig('prec.eps', format='eps', dpi=1000)\n\n\n####################################################################### Comp Mcc\n#\n#cnn = np.array([0.86, 0.91, 0.86, 0.84, 0.90, 0.83, 0.93])\n#caps = np.array([0.88, 0.96, 0.83, 0.81, 0.69, 0.83, 0.92])\n#std = np.array([0.02, 0.01, 0.05, 0.02, 0.01, 0.02, 0.02])\n#\n#ind = np.arange(len(cnn))\n#width = 0.2\n#\n#ax = plt.subplot(111)\n#ax.bar(ind, cnn, width, color='#EDC951', label='CNN')\n#ax.bar(ind+width, caps, width, yerr=std, color='#EB6841', label='CapsNet')\n#\n#ax.set_ylabel('MCC')\n#plt.xticks(ind, ('Arabidopsis_non_tata', 'Arabidopsis_tata', 'Bacillus', 'Ecoli', 'Human_non_tata', 'Mouse_non_tata', 'Mouse_tata'), rotation='vertical')\n##plt.subplots_adjust(top=10.0)\n#\n##ax.legend(loc='upper right', shadow=True)\n#plt.title(\"Comparação do Coeficiente de Matthews\")\n##plt.legend(loc='upper left', prop={'size':10}, bbox_to_anchor=(1,1))\n#plt.legend(loc='bottom right')\n#\n#\n#\n#\n#plt.tight_layout()\n#plt.savefig('comp_mcc.eps', format='eps', dpi=1000)\n#plt.show()\n\n####################################################################### Comp Sn\n\n#cnn = np.array([0.94, 0.95, 0.91, 0.90, 0.90, 0.88, 0.97])\n#caps = np.array([0.96, 0.99, 0.90, 0.87, 0.85, 0.92, 0.96])\n#std = np.array([0.01, 0.01, 0.07, 0.03, 0.04, 0.02, 0.03])\n#\n#ind = np.arange(len(cnn))\n#width = 0.2\n#\n#ax = plt.subplot(111)\n#ax.bar(ind, cnn, width, color='#EDC951', label='CNN')\n#ax.bar(ind+width, caps, width, yerr=std, color='#EB6841', label='CapsNet')\n#\n#ax.set_ylabel('Sn')\n#plt.xticks(ind, ('Arabidopsis_non_tata', 'Arabidopsis_tata', 'Bacillus', 'Ecoli', 'Human_non_tata', 'Mouse_non_tata', 'Mouse_tata'), rotation='vertical')\n##plt.subplots_adjust(top=10.0)\n#\n##ax.legend(loc='upper right', shadow=True)\n#plt.title(\"Comparação da Sensibilidade\")\n##plt.legend(loc='upper left', prop={'size':10}, bbox_to_anchor=(1,1))\n#plt.legend(loc='bottom right')\n#\n#\n#\n#\n#plt.tight_layout()\n#plt.savefig('comp_sn.eps', format='eps', dpi=1000)\n#plt.show()\n\n####################################################################### Comp Sp\n\ncnn = np.array([0.94, 0.97, 0.95, 0.96, 0.98, 0.94, 0.97])\ncaps = np.array([0.94, 0.97, 0.94, 0.95, 0.84, 0.91, 0.97])\nstd = np.array([0.02, 0.00, 0.02, 0.02, 0.03, 0.02, 0.01])\n\nind = np.arange(len(cnn))\nwidth = 0.2\n\nax = plt.subplot(111)\nax.bar(ind, cnn, width, color='#EDC951', label='CNN')\nax.bar(ind+width, caps, width, yerr=std, color='#EB6841', label='CapsNet')\n\nax.set_ylabel('Sp')\nplt.xticks(ind, ('Arabidopsis_non_tata', 'Arabidopsis_tata', 'Bacillus', 'Ecoli', 'Human_non_tata', 'Mouse_non_tata', 'Mouse_tata'), rotation='vertical')\n#plt.subplots_adjust(top=10.0)\n\n#ax.legend(loc='upper right', shadow=True)\nplt.title(\"Comparação da Especificidade\")\n#plt.legend(loc='upper left', prop={'size':10}, bbox_to_anchor=(1,1))\nplt.legend(loc='bottom right')\n\n\n\n\nplt.tight_layout()\nplt.savefig('comp_sp.eps', format='eps', dpi=1000)\nplt.show()\n\n\nprint('END')" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.xticks", "matplotlib.pyplot.savefig", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.array" ] ]
unseenme/mindspore
[ "4ba052f0cd9146ac0ccc4880a778706f1b2d0af8" ]
[ "tests/ut/python/dataset/test_pyfunc.py" ]
[ "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport numpy as np\nimport pytest\n\nimport mindspore.dataset as ds\nfrom mindspore import log as logger\n\nDATA_DIR = [\"../data/dataset/testPyfuncMap/data.data\"]\nSCHEMA_DIR = \"../data/dataset/testPyfuncMap/schema.json\"\nCOLUMNS = [\"col0\", \"col1\", \"col2\"]\nGENERATE_GOLDEN = False\n\n\ndef test_case_0():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test 1-1 PyFunc : lambda x : x + x\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations=(lambda x: x + x))\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_case_1():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test 1-n PyFunc : lambda x : (x , x + x) \")\n\n col = \"col0\"\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n data1 = data1.map(input_columns=col, output_columns=[\"out0\", \"out1\"], operations=(lambda x: (x, x + x)),\n columns_order=[\"out0\", \"out1\"])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n assert np.array_equal(item[\"out0\"], golden)\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out1\"], golden)\n i = i + 4\n\n\ndef test_case_2():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test n-1 PyFunc : lambda x, y : x + y \")\n\n col = [\"col0\", \"col1\"]\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=col, output_columns=\"out\", operations=(lambda x, y: x + y),\n columns_order=[\"out\"])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_case_3():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test n-m PyFunc : lambda x, y : (x , x + 1, x + y)\")\n\n col = [\"col0\", \"col1\"]\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=col, output_columns=[\"out0\", \"out1\", \"out2\"],\n operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=[\"out0\", \"out1\", \"out2\"])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n assert np.array_equal(item[\"out0\"], golden)\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out1\"], golden)\n golden = np.array([[i * 2 + 1, (i + 1) * 2 + 1], [(i + 2) * 2 + 1, (i + 3) * 2 + 1]])\n assert np.array_equal(item[\"out2\"], golden)\n i = i + 4\n\n\ndef test_case_4():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test Parallel n-m PyFunc : lambda x, y : (x , x + 1, x + y)\")\n\n col = [\"col0\", \"col1\"]\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=col, output_columns=[\"out0\", \"out1\", \"out2\"], num_parallel_workers=4,\n operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=[\"out0\", \"out1\", \"out2\"])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n assert np.array_equal(item[\"out0\"], golden)\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out1\"], golden)\n golden = np.array([[i * 2 + 1, (i + 1) * 2 + 1], [(i + 2) * 2 + 1, (i + 3) * 2 + 1]])\n assert np.array_equal(item[\"out2\"], golden)\n i = i + 4\n\n\n# The execution of this function will acquire GIL\ndef func_5(x):\n return np.ones(x.shape, dtype=x.dtype)\n\n\ndef test_case_5():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test 1-1 PyFunc : lambda x: np.ones(x.shape)\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations=func_5)\n\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[1, 1], [1, 1]])\n assert np.array_equal(item[\"out\"], golden)\n\n\ndef test_case_6():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test PyFunc ComposeOp : (lambda x : x + x), (lambda x : x + x)\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\",\n operations=[(lambda x: x + x), (lambda x: x + x)])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 4, (i + 1) * 4], [(i + 2) * 4, (i + 3) * 4]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_case_7():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test 1-1 PyFunc Multiprocess: lambda x : x + x\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations=(lambda x: x + x),\n num_parallel_workers=4, python_multiprocessing = True)\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_case_8():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test Multiprocess n-m PyFunc : lambda x, y : (x , x + 1, x + y)\")\n\n col = [\"col0\", \"col1\"]\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=col, output_columns=[\"out0\", \"out1\", \"out2\"], num_parallel_workers=4,\n operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=[\"out0\", \"out1\", \"out2\"],\n python_multiprocessing=True)\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n assert np.array_equal(item[\"out0\"], golden)\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out1\"], golden)\n golden = np.array([[i * 2 + 1, (i + 1) * 2 + 1], [(i + 2) * 2 + 1, (i + 3) * 2 + 1]])\n assert np.array_equal(item[\"out2\"], golden)\n i = i + 4\n\n\ndef test_case_9():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test multiple 1-1 PyFunc Multiprocess: lambda x : x + x\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations=[(lambda x: x + x), (lambda x: x + 1),\n (lambda x: x + 2)],\n num_parallel_workers=4, python_multiprocessing=True)\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 2 + 3, (i + 1) * 2 + 3], [(i + 2) * 2 + 3, (i + 3) * 2 + 3]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_pyfunc_execption():\n logger.info(\"Test PyFunc Execption Throw: lambda x : raise Execption()\")\n\n def pyfunc(x):\n raise Exception(\"Pyfunc Throw\")\n\n with pytest.raises(RuntimeError) as info:\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations= pyfunc,\n num_parallel_workers=4)\n for _ in data1:\n pass\n assert \"Pyfunc Throw\" in str(info.value)\n\n\ndef skip_test_pyfunc_execption_multiprocess():\n logger.info(\"Test Multiprocess PyFunc Execption Throw: lambda x : raise Execption()\")\n\n def pyfunc(x):\n raise Exception(\"MP Pyfunc Throw\")\n\n with pytest.raises(RuntimeError) as info:\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations= pyfunc,\n num_parallel_workers=4, python_multiprocessing = True)\n for _ in data1:\n pass\n assert \"MP Pyfunc Throw\" in str(info.value)\n\n\nif __name__ == \"__main__\":\n test_case_0()\n test_case_1()\n test_case_2()\n test_case_3()\n test_case_4()\n test_case_5()\n test_case_6()\n test_case_7()\n test_case_8()\n test_case_9()\n test_pyfunc_execption()\n skip_test_pyfunc_execption_multiprocess()\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.array_equal" ] ]
cimat/data-visualization-patterns
[ "7ca363ffd50d3d2d9da48b650588cd5503449cb3" ]
[ "display-patterns/Discrete Quantities/Pruebas/A36Span_Chart_Seaborn.py" ]
[ "import seaborn as sns\nimport matplotlib.pyplot as plt\nfrom datos import data\nimport pandas as pd\n\nsns.set(style=\"white\")\nf, ax = plt.subplots(figsize=(6, 15))\nd=data('mtcars')\nsubset1, subset2, subset3= d[d.cyl==4], d[d.cyl==6], d[d.cyl==8]\ndatos=pd.DataFrame ({'Max': [max(subset1.mpg), max(subset2.mpg), max(subset3.mpg)],\n\t\t\t \t 'Min': [min(subset1.mpg), min(subset2.mpg), min(subset3.mpg)],\n\t\t\t \t 'Span': [max(subset1.mpg)-min(subset1.mpg), max(subset2.mpg)-min(subset2.mpg), max(subset3.mpg)-min(subset3.mpg)]})\ndatos.index=[4,6,8]\nsns.barplot(x=datos.index, y=datos.Max, color=\"#2ecc71\", linewidth=0)\nsns.barplot(x=datos.index, y=datos.Min, color=\"white\", linewidth=0)\nsns.axlabel('Cylindres','Milles Per Gall')\nplt.title('Range of Milles per Gallon (mpg) by Cylindres (cyl)', family='Serif', size=16)\nplt.show()" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
rahulk29/sram22
[ "9539f4bebd8577163fbab2181c1aef8f33e0ded4" ]
[ "sramgen/testbenches/column_mux_4/column_mux_4.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\nsaved = [\n \"din0\",\n \"din1\",\n \"din2\",\n \"din3\",\n \"sel0\",\n \"sel1\",\n \"sel_b0\",\n \"sel_b1\",\n \"dout\",\n]\n\n\ndef read_data(f):\n data = defaultdict(lambda: [])\n for line in f.readlines():\n values = line.split()\n ctr = 0\n for key in saved:\n if ctr == 0:\n data[\"time\"].append(float(values[ctr]))\n ctr += 1\n data[key].append(float(values[ctr]))\n ctr += 1\n return {k: np.array(v) for k, v in data.items()}\n\n\ndef read_test_data():\n with open(\"./column_mux_4.dat\") as f:\n return read_data(f)\n\n\ndef plot_data(data):\n plt.figure()\n plt.plot(data[\"time\"], data[\"sel0\"])\n plt.plot(data[\"time\"], data[\"sel1\"])\n plt.plot(data[\"time\"], data[\"dout\"])\n plt.legend([\"sel0\", \"sel1\", \"dout\"])\n plt.savefig(\"column_mux_4.png\")\n\n\nif __name__ == \"__main__\":\n data = read_test_data()\n plot_data(data)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.pyplot.plot" ] ]
comword/TCD20CS4CS7-MLFinal
[ "bb1b1cba25ce4c5cf0338b7b75af3b6f12931c96" ]
[ "src/bert/train_early_access.py" ]
[ "import numpy as np\nfrom keras_bert import load_trained_model_from_checkpoint\nimport os\n\nfrom dataloader import Tokeniser, load_data\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.layers import *\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\npretrained_path = 'pretrained/uncased_L-12_H-768_A-12'\nconfig_path = os.path.join(pretrained_path, 'bert_config.json')\ncheckpoint_path = os.path.join(pretrained_path, 'bert_model.ckpt')\nvocab_path = os.path.join(pretrained_path, 'vocab.txt')\n\nSEQ_LEN = 128\nBATCH_SIZE = 25\nEPOCHS = 5\nLR = 1e-5\nMODEL_NAME = \"bert_early_access\"\n\nbert_model = load_trained_model_from_checkpoint(\n config_path,\n checkpoint_path,\n training=True,\n trainable=True,\n seq_len=SEQ_LEN,\n)\n\ntokeniser = Tokeniser(vocab_path)\nX, y = load_data(tokeniser, 'data/reviews_112_trans-en.jl', target_label='early_access', max_len=SEQ_LEN, batch_size=BATCH_SIZE)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n\ninputs = bert_model.inputs[:2]\ndense = bert_model.get_layer('NSP-Dense').output\noutputs = Dense(units=2, activation='softmax')(dense)\n\nmodel = Model(inputs, outputs)\nmodel.compile(\n optimizer=Adam(LR),\n loss='sparse_categorical_crossentropy',\n metrics=['sparse_categorical_accuracy'],\n)\nmodel.summary()\n\nmcp_save = ModelCheckpoint(\"result/\"+MODEL_NAME+'.best.h5', save_best_only=True, monitor='val_sparse_categorical_accuracy', mode='max')\n\nmodel.fit(\n [X_train, np.zeros_like(X_train)],\n y_train,\n epochs=EPOCHS,\n validation_split=0.1,\n batch_size=BATCH_SIZE,\n callbacks=[EarlyStopping(monitor='val_loss', patience=4), mcp_save]\n)\n\nmodel.save_weights(\"result/\"+MODEL_NAME+\".h5\")\n\npredicts = model.predict([X_test, np.zeros_like(X_test)], verbose=True).argmax(axis=-1)\n\ntp, fp, fn, tn = 0, 0, 0, 0\nfor i in range(len(predicts)):\n if predicts[i] == 1:\n if y_test[i] == 1:\n tp += 1\n else:\n fp += 1\n else:\n if y_test[i] == 1:\n fn += 1\n else:\n tn += 1\n\nprint('Confusion matrix:')\nprint('[{}, {}]'.format(tp, fp))\nprint('[{}, {}]'.format(fn, tn))\n\nprint('Accuracy: %.2f' % (100.0 * (tp + tn) / len(results)))" ]
[ [ "numpy.zeros_like", "sklearn.model_selection.train_test_split" ] ]
qing42102/deep_learning_examples
[ "d7695673e0c4bfe211f303ea5444765e8d4fe5f4" ]
[ "Logistic_Regression.py" ]
[ "# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nimport numpy as np\n# import tensorflow as tf\nfrom PIL import Image\nimport os\nimport matplotlib.pyplot as plt\nimport pickle\n\n# %%\ndef load_images(path: str) -> list:\n '''\n Load images from a directory. Normalize the image to [0, 1]. Return a list of images array\n '''\n imgs = []\n for f in os.listdir(path):\n ext = os.path.splitext(f)[1]\n if ext.lower() == \".jpg\":\n file_name = os.path.join(path,f)\n \n # Conver the image to an array of floats normalized to [0, 1]\n image = Image.open(file_name).convert(\"F\")\n image_array = np.array(image.getdata())/255.\n\n imgs.append(image_array)\n\n return imgs\n\ntrain_imgs = np.array(load_images(\"train_data/\"))\ntest_imgs = np.array(load_images(\"test_data/\"))\n\nos.chdir(\"labels/\")\ntrain_label = np.loadtxt(\"train_label.txt\")\ntest_label = np.loadtxt(\"test_label.txt\")\nos.chdir(\"..\")\n\nprint(train_imgs.shape, train_label.shape)\nprint(test_imgs.shape, test_label.shape)\n\n# %%\ndef softmax_func(X: np.ndarray, W: np.ndarray) -> np.ndarray:\n '''\n Softmax function for calculating the posterior probability\n X should be Mx(N+1)\n Return a MxK matrix\n '''\n\n exp = np.exp(X @ W)\n sum = np.sum(np.exp(X @ W), axis=1)\n return exp/sum[:, None]\n\ndef logistic_loss(X: np.ndarray, W: np.ndarray, y: np.ndarray) -> float:\n '''\n Logistic regression cross-entropy loss\n '''\n\n log_likelihood = np.log(softmax_func(X, W))\n\n # Create the Kx1 binary vector with 1‐of‐K encoding\n t = np.zeros((y.shape[0], W.shape[1]))\n t[np.arange(y.size), y.astype(int)-1] = 1\n\n total_loss = np.tensordot(t, log_likelihood, axes=2)\n \n return -total_loss/X.shape[0]\n\ndef logistic_loss_grad(X: np.ndarray, W: np.ndarray, y: np.ndarray) -> np.ndarray:\n '''\n Calculate the gradient for each class\n Return a (N+1)xK matrix\n '''\n\n # Create the Kx1 binary vector with 1‐of‐K encoding\n t = np.zeros((y.shape[0], W.shape[1]))\n t[np.arange(y.size), y.astype(int)-1] = 1\n\n y_diff = t-softmax_func(X, W)\n total_grad = X.T @ y_diff\n \n return -total_grad/X.shape[0]\n\ndef classification_accuracy(X: np.ndarray, W: np.ndarray, y: np.ndarray) -> float:\n '''\n Classification accuracy for the predicted and true labels\n '''\n\n # Select the largest probability\n y_pred = np.argmax(softmax_func(X, W), axis=1)+1\n \n accuracy = np.sum(y_pred == y)/X.shape[0]\n\n return accuracy*100\n\ndef digit_accuracy(X: np.ndarray, W: np.ndarray, y: np.ndarray):\n '''\n Classification accuracy for each of the digits\n '''\n\n # Select the largest probability\n y_pred = np.argmax(softmax_func(X, W), axis=1)+1\n \n for i in range(W.shape[1]):\n y_i = y[y==i+1]\n y_pred_i = y_pred[y==i+1]\n accuracy = np.sum(y_pred_i == y_i)/y_i.shape[0]\n print(\"Digit\", i+1, \"accuracy:\", accuracy)\n \n print(\"\\n\")\n\n\ndef gradient_descent(train_X: np.ndarray, train_y: np.ndarray, test_X: np.ndarray, test_y: np.ndarray,\\\n W: np.ndarray, tolerance: float):\n '''\n Steepest gradient descent with a stepsize of inverse square root of iteration number\n The stopping condition is the residual of the gradient and a maximum iteration number of 200\n\n X should be Mx(N+1)\n W should be (N+1)xK\n y should be Mx1\n '''\n\n # Add the bias coefficient to the data\n train_X = np.hstack((train_X, np.ones((train_X.shape[0], 1))))\n test_X = np.hstack((test_X, np.ones((test_X.shape[0], 1))))\n\n training_accuracy_list = []\n testing_accuracy_list = []\n training_loss_list = []\n testing_loss_list = []\n\n grad = logistic_loss_grad(train_X, W, train_y)\n\n # Calculate the residual of the gradient\n res = np.linalg.norm(grad)\n\n iteration = 1\n while res > tolerance and iteration != 200:\n alpha = 1/np.sqrt(iteration)\n W = W - alpha*grad\n\n grad = logistic_loss_grad(train_X, W, train_y)\n res = np.linalg.norm(grad)\n\n training_accuracy = classification_accuracy(train_X, W, train_y)\n training_loss = logistic_loss(train_X, W, train_y)\n\n testing_accuracy = classification_accuracy(test_X, W, test_y)\n testing_loss = logistic_loss(test_X, W, test_y)\n\n training_accuracy_list.append(training_accuracy)\n testing_accuracy_list.append(testing_accuracy)\n training_loss_list.append(training_loss)\n testing_loss_list.append(testing_loss)\n\n print(iteration)\n print(\"Norm of gradient:\", res)\n print(\"Training Accuracy:\", training_accuracy, \"Training Loss:\", training_loss)\n print(\"Testing Accuracy:\", testing_accuracy, \"Testing Loss:\", testing_loss)\n print(\"\\n\")\n\n iteration += 1\n\n print(\"Training digits\")\n digit_accuracy(train_X, W, train_y)\n print(\"Testing digits\")\n digit_accuracy(test_X, W, test_y)\n\n return training_accuracy_list, testing_accuracy_list, training_loss_list, testing_loss_list, W\n\n\n# %%\n\nnum_features = test_imgs.shape[1]\nnum_classes = len(np.unique(test_label))\n\n# Initialize the weight vectors including the bias \nW = np.zeros(shape=(num_features+1, num_classes))\n\nresults = gradient_descent(train_X=train_imgs, train_y=train_label, test_X=test_imgs, \\\n test_y=test_label, W=W, tolerance=10**-2)\n\ntraining_accuracy, testing_accuracy, training_loss, testing_loss, W_optimal = results\niteration = np.arange(len(training_accuracy))\n\nplt.figure(figsize=(8, 5))\nplt.plot(iteration, training_accuracy, label=\"Training accuracy\")\nplt.plot(iteration, testing_accuracy, label=\"Testing accuracy\")\nplt.xlabel(\"Iteration\", fontsize=14)\nplt.ylabel(\"Percentage\", fontsize=14)\nplt.legend()\nplt.show()\n\nplt.figure(figsize=(8, 5))\nplt.plot(iteration, training_loss, label=\"Training loss\")\nplt.plot(iteration, testing_loss, label=\"Testing loss\")\nplt.xlabel(\"Iteration\", fontsize=14)\nplt.ylabel(\"Loss\", fontsize=14)\nplt.legend()\nplt.show()\n\nfor i in range(num_classes):\n plt.imshow(W_optimal[:num_features, i].reshape(28,28))\n plt.colorbar()\n plt.show()\n\nfilehandler = open(\"multiclass_parameters.txt\",\"wb\")\npickle.dump(W_optimal, filehandler)\nfilehandler.close()\n\n# %%\n" ]
[ [ "numpy.sum", "numpy.ones", "matplotlib.pyplot.legend", "numpy.unique", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.exp", "numpy.tensordot", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.colorbar", "numpy.sqrt", "matplotlib.pyplot.plot", "numpy.linalg.norm", "matplotlib.pyplot.xlabel", "numpy.loadtxt" ] ]
jzpang/forte
[ "489fb9cafba6faf5739bda935836b61b5e3d02b6" ]
[ "examples/data_augmentation/reinforcement/main.py" ]
[ "# Copyright 2020 The Forte Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nExample of building a reinforcement learning based,\ndata augmentation enhanced sentence classifier\nbased on pre-trained BERT model.\n\"\"\"\nimport argparse\nimport functools\nimport logging\nimport os\n\nimport torch\nimport torch.nn.functional as F\nimport texar.torch as tx\nfrom transformers import BertForMaskedLM\n\nfrom config import config_data, config_classifier\nfrom utils import model_utils\nfrom forte.models.da_rl import MetaAugmentationWrapper, TexarBertMetaModule\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--pretrained-model-name\",\n type=str,\n default=\"bert-base-uncased\",\n choices=tx.modules.BERTEncoder.available_checkpoints(),\n help=\"Name of the pre-trained downstream checkpoint to load.\",\n)\nparser.add_argument(\n \"--output-dir\",\n default=\"output/\",\n help=\"The output directory where the model checkpoints will be written.\",\n)\nparser.add_argument(\n \"--do-train\", action=\"store_true\", help=\"Whether to run training.\"\n)\nparser.add_argument(\n \"--do-eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\"\n)\nparser.add_argument(\n \"--do-test\",\n action=\"store_true\",\n help=\"Whether to run test on the test set.\",\n)\nparser.add_argument(\n \"--augmentation-model-name\",\n type=str,\n default=\"bert-base-uncased\",\n choices=tx.modules.BERTEncoder.available_checkpoints(),\n help=\"Name of the pre-trained augmentation model checkpoint to load.\",\n)\nparser.add_argument(\n \"--num-aug\",\n type=int,\n default=4,\n help=\"number of augmentation samples when fine-tuning aug model\",\n)\nparser.add_argument(\n \"--classifier-pretrain-epoch\",\n type=int,\n default=10,\n help=\"number of epochs to pretrain the classifier\",\n)\nargs = parser.parse_args()\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nlogging.root.setLevel(logging.INFO)\n\n\nclass RLAugmentClassifierTrainer:\n def __init__(self):\n self._prepare_data_iterator()\n self._init_aug_model()\n self._init_classifier()\n\n def _prepare_data_iterator(self):\n tx.utils.maybe_create_dir(args.output_dir)\n\n # Loads data\n num_train_data = config_data.num_train_data\n self.num_train_steps = int(\n num_train_data\n / config_data.train_batch_size\n * config_data.max_train_epoch\n )\n\n train_dataset = tx.data.RecordData(\n hparams=config_data.train_hparam, device=device\n )\n val_dataset = tx.data.RecordData(\n hparams=config_data.eval_hparam, device=device\n )\n test_dataset = tx.data.RecordData(\n hparams=config_data.test_hparam, device=device\n )\n self.iterator = tx.data.DataIterator(\n {\"train\": train_dataset, \"dev\": val_dataset, \"test\": test_dataset}\n )\n\n self.val_data_iterator = tx.data.DataIterator({\"dev\": val_dataset})\n self.val_data_iterator.switch_to_dataset(\"dev\")\n\n def _init_aug_model(self):\n # pylint: disable=protected-access\n # Builds data augmentation BERT\n aug_model = BertForMaskedLM.from_pretrained(\n args.augmentation_model_name\n )\n aug_model.to(device)\n aug_tokenizer = tx.data.BERTTokenizer(\n pretrained_model_name=args.augmentation_model_name\n )\n input_mask_ids = aug_tokenizer._map_token_to_id(\"[MASK]\")\n # Builds augmentation optimizer\n aug_lr = 4e-5\n param_optimizer = list(aug_model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p\n for n, p in param_optimizer\n if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.01,\n },\n {\n \"params\": [\n p\n for n, p in param_optimizer\n if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n aug_optim = tx.core.BertAdam(\n optimizer_grouped_parameters,\n betas=(0.9, 0.999),\n eps=1e-6,\n lr=aug_lr,\n )\n # Builds data augmentation wrapper\n self.aug_wrapper = MetaAugmentationWrapper(\n aug_model, aug_optim, input_mask_ids, device, args.num_aug\n )\n\n def _init_classifier(self):\n # Builds BERT for classification task.\n config_downstream = {\n k: v\n for k, v in config_classifier.__dict__.items()\n if not k.startswith(\"__\") and k != \"hyperparams\"\n }\n\n self.classifier = tx.modules.BERTClassifier(\n pretrained_model_name=args.pretrained_model_name,\n hparams=config_downstream,\n )\n self.classifier.to(device)\n\n # Builds learning rate decay scheduler\n classifier_lr = 4e-5\n vars_with_decay = []\n vars_without_decay = []\n for name, param in self.classifier.named_parameters():\n if \"layer_norm\" in name or name.endswith(\"bias\"):\n vars_without_decay.append(param)\n else:\n vars_with_decay.append(param)\n opt_params = [\n {\n \"params\": vars_with_decay,\n \"weight_decay\": 0.01,\n },\n {\n \"params\": vars_without_decay,\n \"weight_decay\": 0.0,\n },\n ]\n self.optim = tx.core.BertAdam(\n opt_params, betas=(0.9, 0.999), eps=1e-6, lr=classifier_lr\n )\n num_warmup_steps = int(\n self.num_train_steps * config_data.warmup_proportion\n )\n self.scheduler = torch.optim.lr_scheduler.LambdaLR(\n self.optim,\n functools.partial(\n model_utils.get_lr_multiplier,\n total_steps=self.num_train_steps,\n warmup_steps=num_warmup_steps,\n ),\n )\n\n def pre_train_classifier_epoch(self):\n r\"\"\"Pre-trains model on the training set\n for better weight initialization.\n \"\"\"\n self.iterator.switch_to_dataset(\"train\")\n self.classifier.train()\n\n for _ in range(args.classifier_pretrain_epoch):\n for batch in self.iterator:\n self.optim.zero_grad()\n\n input_ids = batch[\"input_ids\"]\n segment_ids = batch[\"segment_ids\"]\n labels = batch[\"label_ids\"]\n input_length = (1 - (input_ids == 0).int()).sum(dim=1)\n\n logits, _ = self.classifier(\n input_ids, input_length, segment_ids\n )\n loss = self._compute_loss(logits, labels)\n\n loss.backward()\n self.optim.step()\n self.scheduler.step()\n\n def train_epoch(self):\n r\"\"\"Trains on the training set, and evaluates on the validation set\n periodically.\n \"\"\"\n self.iterator.switch_to_dataset(\"train\")\n self.classifier.train()\n self.optim.zero_grad()\n\n for batch in self.iterator:\n input_ids = batch[\"input_ids\"]\n input_mask = batch[\"input_mask\"]\n segment_ids = batch[\"segment_ids\"]\n labels = batch[\"label_ids\"]\n\n # Train augmentation model params phi.\n self.aug_wrapper.reset_model()\n # Iterate over training instances.\n num_instances = len(input_ids)\n for i in range(num_instances):\n features = (\n input_ids[i],\n input_mask[i],\n segment_ids[i],\n labels[i],\n )\n\n # Augmented instance with params phi exposed\n (\n aug_probs,\n input_mask_aug,\n segment_ids_aug,\n label_ids_aug,\n ) = self.aug_wrapper.augment_instance(features)\n\n # Compute classifier loss.\n self.classifier.zero_grad()\n input_length_aug = ((input_mask_aug == 1).int()).sum(dim=1)\n logits, _ = self.classifier(\n aug_probs, input_length_aug, segment_ids_aug\n )\n loss = self._compute_loss(logits, label_ids_aug)\n # Update classifier params on meta_model.\n meta_model = TexarBertMetaModule(self.classifier)\n meta_model = self.aug_wrapper.update_meta_model(\n meta_model, loss, self.classifier, self.optim\n )\n\n # Compute grads of aug_model on validation data.\n for val_batch in self.val_data_iterator: # one batch\n val_input_ids = val_batch[\"input_ids\"]\n val_segment_ids = val_batch[\"segment_ids\"]\n val_labels = val_batch[\"label_ids\"]\n val_input_length = (1 - (val_input_ids == 0).int()).sum(\n dim=1\n )\n val_logits, _ = meta_model(\n val_input_ids, val_input_length, val_segment_ids\n )\n val_loss = self._compute_loss(val_logits, val_labels)\n val_loss = (\n val_loss\n / num_instances\n / args.num_aug\n / len(self.val_data_iterator)\n )\n val_loss.backward()\n\n # Update aug_model param phi.\n self.aug_wrapper.update_phi()\n\n # Train classifier with augmented batch\n (\n input_probs,\n input_masks,\n segment_ids,\n label_ids,\n ) = self.aug_wrapper.augment_batch(\n (input_ids, input_mask, segment_ids, labels)\n )\n\n input_length = ((input_masks == 1).int()).sum(dim=1)\n self.optim.zero_grad()\n logits, _ = self.classifier(input_probs, input_length, segment_ids)\n loss = self._compute_loss(logits, label_ids)\n loss.backward()\n self.optim.step()\n self.scheduler.step()\n self._display_logging(loss)\n\n @torch.no_grad()\n def eval_epoch(self):\n \"\"\"Evaluates on the dev set.\"\"\"\n self.iterator.switch_to_dataset(\"dev\")\n self.classifier.eval()\n\n nsamples = 0\n avg_rec = tx.utils.AverageRecorder()\n for batch in self.iterator:\n input_ids = batch[\"input_ids\"]\n segment_ids = batch[\"segment_ids\"]\n labels = batch[\"label_ids\"]\n input_length = (1 - (input_ids == 0).int()).sum(dim=1)\n\n logits, preds = self.classifier(\n input_ids, input_length, segment_ids\n )\n loss = self._compute_loss(logits, labels)\n accu = tx.evals.accuracy(labels, preds)\n\n batch_size = input_ids.size()[0]\n avg_rec.add([accu, loss], batch_size)\n nsamples += batch_size\n logging.info(\n \"eval accu: %.4f; loss: %.4f; nsamples: %d\",\n avg_rec.avg(0),\n avg_rec.avg(1),\n nsamples,\n )\n\n @torch.no_grad()\n def test_epoch(self, test_file):\n \"\"\"Does predictions on the test set.\"\"\"\n self.iterator.switch_to_dataset(\"test\")\n self.classifier.eval()\n\n _all_preds = []\n nsamples = 0\n avg_rec = tx.utils.AverageRecorder()\n for batch in self.iterator:\n input_ids = batch[\"input_ids\"]\n segment_ids = batch[\"segment_ids\"]\n labels = batch[\"label_ids\"]\n input_length = (1 - (input_ids == 0).int()).sum(dim=1)\n\n logits, preds = self.classifier(\n input_ids, input_length, segment_ids\n )\n loss = self._compute_loss(logits, labels)\n accu = tx.evals.accuracy(labels, preds)\n\n batch_size = input_ids.size()[0]\n avg_rec.add([accu, loss], batch_size)\n nsamples += batch_size\n\n _all_preds.extend(preds.tolist())\n\n logging.info(\n \"test accu: %.4f; loss: %.4f; nsamples: %d\",\n avg_rec.avg(0),\n avg_rec.avg(1),\n nsamples,\n )\n\n output_file = os.path.join(args.output_dir, test_file)\n with open(output_file, \"w+\") as writer:\n writer.write(\"\\n\".join(str(p) for p in _all_preds))\n logging.info(\"test output written to %s\", output_file)\n\n def _compute_loss(self, logits, labels):\n r\"\"\"Compute loss.\"\"\"\n if self.classifier.is_binary:\n loss = F.binary_cross_entropy(\n logits.view(-1), labels.view(-1), reduction=\"mean\"\n )\n else:\n loss = F.cross_entropy(\n logits.view(-1, self.classifier.num_classes),\n labels.view(-1),\n reduction=\"mean\",\n )\n return loss\n\n def _display_logging(self, loss):\n step = self.scheduler.last_epoch\n dis_steps = config_data.display_steps\n if dis_steps > 0 and step % dis_steps == 0:\n logging.info(\"step: %d; loss: %f\", step, loss)\n\n eval_steps = config_data.eval_steps\n if eval_steps > 0 and step % eval_steps == 0:\n self._eval_epoch()\n self.classifier.train()\n\n\ndef main():\n trainer = RLAugmentClassifierTrainer()\n trainer.pre_train_classifier_epoch()\n if args.do_train:\n for k in range(config_data.max_train_epoch):\n logging.info(\"training epoch %d\", k)\n trainer.train_epoch()\n if args.do_eval:\n trainer.eval_epoch()\n if args.do_test:\n trainer.test_epoch(\"test_results.tsv\")\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available" ] ]
ChristianFeldmann/PresentationMaterial
[ "a5182a5d50ed944fa7738f3919267ea056b72e63" ]
[ "VideoCodingBasics/Figures/Tnsformation/quantization.py" ]
[ "import numpy\n\ndata = numpy.array(\n [[968., 205.69, 120.6, -38.29, -81., -28.4, 33.12, 41.77],\n [89.13, 224.38, 132.1, -56.46, -102.6, -36.72, 39.05, 47.77],\n [-4.85, 58., 47.38, -62.13, -67.61, -21.83, 22.13, 23.34],\n [-111.6, -23.74, -2.54, -36.53, -19.46, -1.74, 0.23, -3.56],\n [-77., -18.35, -12.42, -7.35, 17., 9.14, -9.74, -13.01],\n [-19.02, 18.74, -9.08, 6.43, 19.95, 4.73, -8.09, -10.21],\n [20.95, 27.95, -11.87, -8.28, -1.22, -4.14, -3.88, -4.11],\n [16.52, 13.44, -14.14, -14.81, -12.66, -6.63, -0.68, 1.91]])\n\ndataQuant = numpy.round(data / 10).astype(int)\nprint(dataQuant)\n\ndataQuant2 = numpy.round(data / 20).astype(int)\nprint(dataQuant2)\n\nreconstructions2 = dataQuant2 * 20\nprint(reconstructions2)\n" ]
[ [ "numpy.array", "numpy.round" ] ]
cics-nd/rans-uncertainty
[ "1ee554d64550377dfa4295bb05e61bab98e43ee4" ]
[ "training-data/periodic-hills/preProcess.py" ]
[ "\"\"\"\nA simple pre-processing file for converting raw OpenFOAM data to \nPyTorch tensors. This makes reading the data by the neural network\nsignifcantly faster. Additionally, depending on the flow, spacial\naverages can be taken to increase smoothness of R-S fields.\n===\nDistributed by: Notre Dame CICS (MIT Liscense)\n- Associated publication:\nurl: https://www.sciencedirect.com/science/article/pii/S0021999119300464\ndoi: https://doi.org/10.1016/j.jcp.2019.01.021\ngithub: https://github.com/cics-nd/rans-uncertainty\n===\n\"\"\"\nimport sys, random, re, os\nimport numpy as np\nimport torch as th\nimport scipy as sc\n\ndef readFieldData(fileName):\n \"\"\"\n Reads in openFoam field (vector, or tensor)\n Args:\n fileName(string): File name\n Returns:\n data (FloatTensor): tensor of data read from file\n \"\"\"\n #Attempt to read text file and extact data into a list\n try:\n print('Attempting to read file: '+str(fileName))\n rgx = re.compile('[%s]' % '(){}<>')\n rgx2 = re.compile('\\((.*?)\\)') #regex to get stuff in parenthesis\n file_object = open(str(fileName), \"r\").read().splitlines()\n \n #Find line where the internal field starts\n print('Parsing file...')\n fStart = [file_object.index(i) for i in file_object if 'internalField' in i][-1] + 1\n fEnd = [file_object.index(i) for i in file_object[fStart:] if ';' in i][0]\n \n data_list = [[float(rgx.sub('',elem)) for elem in vector.split()] for vector in file_object[fStart+1:fEnd] if not rgx2.search(vector) is None]\n #For scalar fields\n if(len(data_list) == 0):\n data_list = [float(rgx.sub('',elem)) for elem in file_object[fStart+1:fEnd] if not len(rgx.sub('',elem)) is 0]\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n return\n except IOError as err:\n print(\"File read error: {0}\".format(err))\n return\n except:\n print(\"Unexpected error:{0}\".format(sys.exc_info()[0]))\n return\n\n print('Data field file successfully read.')\n data = th.DoubleTensor(data_list)\n return data\n\ndef readScalarData(timeStep, fileName, dir = ''):\n return readFieldData(str(dir)+'/'+str(timeStep)+'/'+fileName)\n\ndef readVectorData(timeStep, fileName, dir = ''):\n return readFieldData(str(dir)+'/'+str(timeStep)+'/'+fileName)\n\ndef readTensorData(timeStep, fileName, dir = ''):\n data0 = readFieldData(str(dir)+'/'+str(timeStep)+'/'+fileName)\n #Reshape into [nCells,3,3] Tensor\n return data0.view(data0.size()[0],3,-1)\n\ndef readSymTensorData(timeStep, fileName, dir = ''):\n data0 = readFieldData(str(dir)+'/'+str(timeStep)+'/'+fileName)\n # Reshape into [nCells,3,3] Tensor\n # Following symmTensor.H indexes since this is RAW openFOAM output\n data = th.DoubleTensor(data0.size()[0], 3, 3)\n data[:,0,:] = data0[:,0:3] #First Row is consistent\n data[:,1,0] = data0[:,1] #YX = XY\n data[:,1,1] = data0[:,3] #YY\n data[:,1,2] = data0[:,4] #YZ\n data[:,2,0] = data0[:,2] #ZX = XZ\n data[:,2,1] = data0[:,4] #ZY = YZ\n data[:,2,2] = data0[:,5]\n\n return data.view(-1,9)\n\ndef readCellCenters(timeStep, dir=''):\n \"\"\"\n Reads in openFoam cellCenters field which contains a list of\n coordinates associated with each finite volume cell center.\n Generated using the following utility:\n https://bitbucket.org/peterjvonk/cellcenters\n Args:\n timeStep (float): Time value to read in at\n fileName(string): File name\n Returns:\n data (FloatTensor): array of data read from file\n \"\"\"\n #Attempt to read text file and extact data into a list\n try:\n file_path = dir+\"/\"+str(timeStep)+\"/cellCenters\"\n print('Reading mesh cell centers '+file_path)\n\n rgx = re.compile('\\((.*?)\\)') #regex to get stuff in parenthesis\n file_object = open(file_path, \"r\").read().splitlines()\n #Find line where the internal field starts\n commentLines = [file_object.index(line) for line in file_object if \"//*****\" in line.replace(\" \", \"\")]\n fStart = [file_object.index(i) for i in file_object if 'internalField' in i][-1] + 1\n fEnd = [file_object.index(i) for i in file_object[fStart:] if ';' in i][0]\n \n cell_list0 = [rgx.search(center).group(1) for center in file_object[fStart+1:fEnd] if not rgx.search(center) is None]\n cell_list = [[float(elem) for elem in c0.split()] for c0 in cell_list0]\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n return\n except IOError as err:\n print(\"File read error: {0}\".format(err))\n return\n except:\n print(\"Unexpected error:{0}\".format(sys.exc_info()[0]))\n return\n\n return th.FloatTensor(cell_list)\n\ndef saveTensor(tensor, fieldName, timeStep, dir=''):\n \"\"\"\n Save PyTorch field tensor\n \"\"\"\n print('Saving tensor field: {}-torch.th'.format(fieldName))\n th.save(tensor, '{}/{}/{}-torch.th'.format(dir, timeStep, fieldName))\n\ndef fieldAverage(field, index_list):\n f0 = []\n for z_i in index_list:\n f0.append(th.sum(field[z_i],0)/len(z_i))\n\n return th.stack(f0)\n\nif __name__ == '__main__':\n\n # LES\n les_dir = 'LES' # Directory\n les_time = 1000 # Time step\n\n # RANS\n rans_dir = 'RANS'\n rans_time = 90\n\n # Cell Centers\n cell_dir = 'RANS'\n cell_time = 90\n\n # First read cell centers\n # Cell centers field is generated using the following utility:\n # https://bitbucket.org/peterjvonk/cellcenters\n cell_centers = readCellCenters(cell_time, cell_dir)\n\n # Get unique x & y coords\n cell_n = cell_centers.numpy()\n cell_coord = np.array([cell_n[:,0], cell_n[:,1]])\n cell_xy = np.unique(cell_n[:,0:2], axis=0)\n saveTensor(cell_xy, 'cellCenters', rans_time, rans_dir)\n \n # Now get averaging indexes (where x & y are the same)\n avg_index = []\n for i in range(cell_xy.shape[0]):\n if(i%100 == 0):\n print('Finding average indexes {}/{}'.format(i, len(cell_xy)))\n avg_index.append(np.where(np.all(cell_n[:,0:2] == cell_xy[i], axis=1))[0])\n\n # Read in fields\n k = readScalarData(rans_time, 'k', dir=rans_dir)\n s = readTensorData(rans_time, 'S', dir=rans_dir)\n r = readTensorData(rans_time, 'R', dir=rans_dir)\n\n les_UPrime = readSymTensorData(les_time, 'UPrime2Mean', dir=les_dir)\n \n # Now average fields in Z direction\n k0 = fieldAverage(k, avg_index)\n r0 = fieldAverage(r, avg_index)\n s0 = fieldAverage(s, avg_index)\n\n les_UPrime0 = fieldAverage(les_UPrime, avg_index)\n \n #Save averaged fields\n saveTensor(k0, 'k', rans_time, rans_dir)\n saveTensor(s0, 'S', rans_time, rans_dir)\n saveTensor(r0, 'R', rans_time, rans_dir)\n\n saveTensor(les_UPrime0, 'UPrime2Mean', les_time, les_dir)\n" ]
[ [ "torch.sum", "torch.FloatTensor", "torch.stack", "torch.DoubleTensor", "numpy.all", "numpy.array", "numpy.unique" ] ]
pyri-project/pyri-robotics
[ "c957b00bfef664519f49140d9dd65736cdc8b053" ]
[ "src/pyri/robotics/util/invkin.py" ]
[ "import numpy as np\nimport general_robotics_toolbox as rox\nfrom scipy.optimize import lsq_linear\n\ndef update_ik_info3(robot_rox, T_desired, q_current): # inverse kinematics that uses Least Square solver\n \n # R_d, p_d: Desired orientation and position\n R_d = T_desired.R\n p_d = T_desired.p\n d_q = q_current\n\n num_joints = len(robot_rox.joint_type)\n \n q_cur = d_q # initial guess on the current joint angles\n q_cur = q_cur.reshape((num_joints,1)) \n \n max_steps = 200 # number of steps to for convergence\n \n # print_div( \"<br> q_cur \" + str(q_cur) ) # DEBUG\n\n hist_b = []\n \n itr = 0 # Iterations\n converged = False\n while itr < max_steps and not converged:\n \n pose = rox.fwdkin(robot_rox,q_cur.flatten())\n R_cur = pose.R\n p_cur = pose.p\n \n #calculate current Jacobian\n J0T = rox.robotjacobian(robot_rox,q_cur.flatten())\n \n # Transform Jacobian to End effector frame from the base frame\n Tr = np.zeros((6,6))\n Tr[:3,:3] = R_cur.T \n Tr[3:,3:] = R_cur.T\n J0T = Tr @ J0T\n \n # Jp=J0T[3:,:]\n # JR=J0T[:3,:] #decompose to position and orientation Jacobian\n \n # Error in position and orientation\n # ER = np.matmul(R_cur, np.transpose(R_d))\n ER = np.matmul(np.transpose(R_d),R_cur)\n #print_div( \"<br> ER \" + str(ER) ) # DEBUG\n\n # EP = p_cur - p_d \n EP = R_cur.T @ (p_cur - p_d) \n #print_div( \"<br> EP \" + str(EP) ) # DEBUG\n\n #decompose ER to (k,theta) pair\n k, theta = rox.R2rot(ER) \n # print_div( \"<br> k \" + str(k) ) # DEBUG\n # print_div( \"<br> theta \" + str(theta) ) # DEBUG\n \n ## set up s for different norm for ER\n # s=2*np.dot(k,np.sin(theta)) #eR1\n # s = np.dot(k,np.sin(theta/2)) #eR2\n s = np.sin(theta/2) * np.array(k) #eR2\n # s=2*theta*k #eR3\n # s=np.dot(J_phi,phi) #eR4\n # print_div( \"<br> s \" + str(s) ) # DEBUG \n\n Kp = np.eye(3)\n KR = np.eye(3) #gains for position and orientation error\n \n vd = - Kp @ EP\n wd = - KR @ s\n \n b = np.concatenate([wd,vd])\n np.nan_to_num(b, copy=False, nan=0.0, posinf=None, neginf=None)\n # print(b)\n # print(J0T)\n \n # DEBUG --------------\n hist_b.append(b)\n if itr > 0:\n error_cur = np.linalg.norm(hist_b[itr-1]) - np.linalg.norm(hist_b[itr])\n #print(\"Error= \" + str(error_cur))\n # DEBUG --------------\n\n res = lsq_linear(J0T,b)\n\n if res.success: \n qdot_star = res.x \n else:\n print(\"Any solution could not found\")\n qdot_star = np.finfo(float).eps * np.ones(num_joints)\n\n # find best step size to take\n # alpha=fminbound(min_alpha,0,1,args=(q_cur,qdot_star,Sawyer_def,Rd,pd,w,Kp))\n alpha = 0.3 # Step size # 1.0 \n delta = alpha * qdot_star \n # print_div( \"<br> delta \" + str(delta) ) # DEBUG\n \n # Convergence Check\n converged = (np.abs(np.hstack((s,EP))) < 0.0001).all()\n\n if not converged:\n # Update for next iteration\n q_cur = q_cur + delta.reshape((num_joints,1))\n\n # Normalize angles betweeen -pi to pi\n q_cur = normalizeAngles(q_cur)\n \n # print_div( \"<br> converged? \" + str(converged) ) # DEBUG\n # print( \"converged? \" + str(converged) ) # DEBUG\n \n itr += 1 # Increase the iteration\n #print(itr)\n #print(converged)\n # print(delta)\n # print(q_cur)\n \n # joints_text=\"\"\n # for i in q_cur:\n # joints_text+= \"(%.3f, %.3f) \" % (np.rad2deg(i), i) \n # print_div_ik_info(str(rox.Transform(R_d,p_d)) +\"<br>\"+ joints_text +\"<br>\"+ str(converged) + \", itr = \" + str(itr))\n return np.squeeze(q_cur), converged\n\ndef normalizeAngle(angle):\n \"\"\"\n :param angle: (float)\n :return: (float) the angle in [-pi, pi]\n \"\"\"\n # while angle > np.pi:\n # angle -= 2 * np.pi\n # while angle < -np.pi:\n # angle += 2 * np.pi\n # return angle \n return angle\n\ndef normalizeAngles(angles):\n for idx, angle in np.ndenumerate(angles):\n angles[idx] = normalizeAngle(angle)\n\n return angles" ]
[ [ "numpy.eye", "numpy.ones", "numpy.transpose", "numpy.zeros", "numpy.squeeze", "numpy.finfo", "scipy.optimize.lsq_linear", "numpy.ndenumerate", "numpy.hstack", "numpy.array", "numpy.sin", "numpy.concatenate", "numpy.nan_to_num", "numpy.linalg.norm" ] ]
BogdanMarghescu/Deep-Learning-Coursera
[ "af2c71c024f0ea911f89ed476686bd09ce37e87c" ]
[ "Sequence Models/Emojify/emo_utils.py" ]
[ "import csv\nimport emoji\nimport numpy as np\nemoji_dictionary = {\"0\": \"\\u2764\\uFE0F\", \"1\": \":baseball:\", \"2\": \":smile:\", \"3\": \":disappointed:\", \"4\": \":fork_and_knife:\"}\n\n\ndef read_glove_vecs(glove_file):\n with open(glove_file, encoding=\"utf8\") as f:\n words = set()\n word_to_vec_map = {}\n for line in f:\n line = line.strip().split()\n curr_word = line[0]\n words.add(curr_word)\n word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)\n i = 1\n words_to_index = {}\n index_to_words = {}\n for w in sorted(words):\n words_to_index[w] = i\n index_to_words[i] = w\n i = i + 1\n return words_to_index, index_to_words, word_to_vec_map\n\n\ndef softmax(x):\n # Compute softmax values for each sets of scores in x\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()\n\n\ndef read_csv(filename='data/emojify_data.csv'):\n phrase = []\n emoji = []\n with open(filename) as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n phrase.append(row[0])\n emoji.append(row[1])\n X = np.asarray(phrase)\n Y = np.asarray(emoji, dtype=int)\n return X, Y\n\n\ndef convert_to_one_hot(Y, C):\n Y = np.eye(C)[Y.reshape(-1)]\n return Y\n\n\ndef label_to_emoji(label):\n # Converts a label (int or string) into the corresponding emoji code (string) ready to be printed\n return emoji.emojize(emoji_dictionary[str(label)], use_aliases=True)\n\n\ndef print_predictions(X, pred):\n for i in range(X.shape[0]):\n print(X[i], label_to_emoji(int(pred[i])))\n\n\ndef predict(X, Y, W, b, word_to_vec_map):\n \"\"\"\n Given X (sentences) and Y (emoji indices), predict emojis and compute the accuracy of your model over the given set.\n \n Arguments:\n X -- input data containing sentences, numpy array of shape (m, None)\n Y -- labels, containing index of the label emoji, numpy array of shape (m, 1)\n \n Returns:\n pred -- numpy array of shape (m, 1) with your predictions\n \"\"\"\n m = X.shape[0]\n pred = np.zeros((m, 1))\n for j in range(m): # Loop over training examples\n # Split jth test example (sentence) into list of lower case words\n words = X[j].lower().split()\n # Average words' vectors\n avg = np.zeros((50,))\n for w in words:\n avg += word_to_vec_map[w]\n avg /= len(words)\n # Forward propagation\n Z = W @ avg + b\n A = softmax(Z)\n pred[j] = np.argmax(A)\n print(\"Accuracy: \" + str(np.mean((pred[:] == Y.reshape(Y.shape[0], 1)[:]))))\n return pred\n" ]
[ [ "numpy.eye", "numpy.zeros", "numpy.asarray", "numpy.argmax", "numpy.max", "numpy.array" ] ]
Prithwijit-Chak/simpeg
[ "d93145d768b5512621cdd75566b4a8175fee9ed3" ]
[ "tutorials/13-joint_inversion/plot_inv_1_joint_pf_pgi_full_info_tutorial.py" ]
[ "\"\"\"\nJoint PGI of Gravity + Magnetic on an Octree mesh using full petrophysical information\n======================================================================================\n\n\nThis tutorial shows through a joint inversion of Gravity and Magnetic data on an\nOctree mesh how to use the PGI framework introduced in Astic & Oldenburg (2019)\nand Astic et al. (2021) to include petrophysical information into geophysical\ninversions for mutli-physics inversion.\n\nThibaut Astic, Douglas W. Oldenburg,\nA framework for petrophysically and geologically guided geophysical inversion\nusing a dynamic Gaussian mixture model prior, Geophysical Journal International,\nVolume 219, Issue 3, December 2019, Pages 1989–2012, DOI:\n`10.1093/gji/ggz389 <https://doi.org/10.1093/gji/ggz389>`_.\n\n\nThibaut Astic, Lindsey J. Heagy, Douglas W Oldenburg,\nPetrophysically and geologically guided multi-physics inversion using a dynamic\nGaussian mixture model, Geophysical Journal International,\nVolume 224, Issue 1, January 2021, Pages 40-68, DOI: `10.1093/gji/ggaa378\n<https://doi.org/10.1093/gji/ggaa378>`_.\n\n\"\"\"\n#########################################################################\n# Import modules\n# --------------\n#\n\nimport discretize as ds\nimport SimPEG.potential_fields as pf\nfrom SimPEG import (\n maps,\n utils,\n simulation,\n inverse_problem,\n inversion,\n optimization,\n regularization,\n data_misfit,\n directives,\n)\nfrom SimPEG.utils import io_utils\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\n# Reproducible science\nnp.random.seed(518936)\n\n#########################################################################\n# Setup\n# -----\n#\n\n# Load Mesh\nmesh_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/mesh_tutorial.ubc\"\n)\nmesh = ds.TreeMesh.read_UBC(mesh_file)\n\n# Load True geological model for comparison with inversion result\ntrue_geology_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/geology_true.mod\"\n)\ntrue_geology = mesh.read_model_UBC(true_geology_file)\n\n# Plot true geology model\nfig, ax = plt.subplots(1, 4, figsize=(20, 4))\nticksize, labelsize = 14, 16\nfor _, axx in enumerate(ax):\n axx.set_aspect(1)\n axx.tick_params(labelsize=ticksize)\nmesh.plotSlice(\n true_geology,\n normal=\"X\",\n ax=ax[0],\n ind=-17,\n clim=[0, 2],\n pcolorOpts={\"cmap\": \"inferno_r\"},\n grid=True,\n)\nmesh.plotSlice(\n true_geology,\n normal=\"Y\",\n ax=ax[1],\n clim=[0, 2],\n pcolorOpts={\"cmap\": \"inferno_r\"},\n grid=True,\n)\ngeoplot = mesh.plotSlice(\n true_geology,\n normal=\"Z\",\n ax=ax[2],\n clim=[0, 2],\n ind=-10,\n pcolorOpts={\"cmap\": \"inferno_r\"},\n grid=True,\n)\ngeocb = plt.colorbar(geoplot[0], cax=ax[3], ticks=[0, 1, 2])\ngeocb.set_label(\n \"True geology model\\n(classification/density/mag. susc.)\", fontsize=labelsize\n)\ngeocb.set_ticklabels(\n [\"BCKGRD (0 g/cc; 0 SI)\", \"PK (-0.8 g/cc; 5e-3 SI)\", \"VK (-0.2 g/cc; 2e-2 SI)\"]\n)\ngeocb.ax.tick_params(labelsize=ticksize)\nax[3].set_aspect(10)\nplt.show()\n\n# Load geophysical data\ndata_grav_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/gravity_data.obs\"\n)\ndata_grav = io_utils.read_grav3d_ubc(data_grav_file)\ndata_mag_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/magnetic_data.obs\"\n)\ndata_mag = io_utils.read_mag3d_ubc(data_mag_file)\n\n# plot data and mesh\nfig, ax = plt.subplots(2, 2, figsize=(15, 10))\nax = ax.reshape(-1)\nplt.gca().set_aspect(\"equal\")\nplt.gca().set_xlim(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n)\nplt.gca().set_ylim(\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n)\nmesh.plotSlice(\n np.ones(mesh.nC),\n normal=\"Z\",\n ind=int(-10),\n grid=True,\n pcolorOpts={\"cmap\": \"Greys\"},\n ax=ax[0],\n)\nmm = utils.plot2Ddata(\n data_grav.survey.receiver_locations,\n -data_grav.dobs,\n ax=ax[0],\n level=True,\n nx=20,\n ny=20,\n dataloc=True,\n ncontour=12,\n shade=True,\n contourOpts={\"cmap\": \"Blues_r\", \"alpha\": 0.8},\n levelOpts={\"colors\": \"k\", \"linewidths\": 0.5, \"linestyles\": \"dashed\"},\n)\nax[0].set_aspect(1)\nax[0].set_title(\n \"Gravity data values and locations,\\nwith mesh and geology overlays\", fontsize=16\n)\nplt.colorbar(mm[0], cax=ax[2], orientation=\"horizontal\")\nax[2].set_aspect(0.05)\nax[2].set_title(\"mGal\", fontsize=16)\nmesh.plotSlice(\n np.ones(mesh.nC),\n normal=\"Z\",\n ind=int(-10),\n grid=True,\n pcolorOpts={\"cmap\": \"Greys\"},\n ax=ax[1],\n)\nmm = utils.plot2Ddata(\n data_mag.survey.receiver_locations,\n data_mag.dobs,\n ax=ax[1],\n level=True,\n nx=20,\n ny=20,\n dataloc=True,\n ncontour=11,\n shade=True,\n contourOpts={\"cmap\": \"Reds\", \"alpha\": 0.8},\n levelOpts={\"colors\": \"k\", \"linewidths\": 0.5, \"linestyles\": \"dashed\"},\n)\nax[1].set_aspect(1)\nax[1].set_title(\n \"Magnetic data values and locations,\\nwith mesh and geology overlays\", fontsize=16\n)\nplt.colorbar(mm[0], cax=ax[3], orientation=\"horizontal\")\nax[3].set_aspect(0.05)\nax[3].set_title(\"nT\", fontsize=16)\n# overlay true geology model for comparison\nindz = -9\nindslicezplot = mesh.gridCC[:, 2] == mesh.vectorCCz[indz]\nfor i in range(2):\n utils.plot2Ddata(\n mesh.gridCC[indslicezplot][:, [0, 1]],\n true_geology[indslicezplot],\n nx=200,\n ny=200,\n contourOpts={\"alpha\": 0},\n clim=[0, 2],\n ax=ax[i],\n level=True,\n ncontour=2,\n levelOpts={\"colors\": \"k\", \"linewidths\": 2, \"linestyles\": \"--\"},\n method=\"nearest\",\n )\nplt.subplots_adjust(hspace=-0.25, wspace=0.1)\nplt.show()\n\n# Load Topo\ntopo_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/CDED_Lake_warp.xyz\"\n)\ntopo = np.genfromtxt(topo_file, skip_header=1)\n# find the active cells\nactv = utils.surface2ind_topo(mesh, topo, gridLoc=\"CC\")\n# Create active map to go from reduce set to full\nndv = np.nan\nactvMap = maps.InjectActiveCells(mesh, actv, ndv)\nnactv = int(actv.sum())\n\n# Create simulations and data misfits\n# Wires mapping\nwires = maps.Wires((\"den\", actvMap.nP), (\"sus\", actvMap.nP))\ngravmap = actvMap * wires.den\nmagmap = actvMap * wires.sus\nidenMap = maps.IdentityMap(nP=nactv)\n# Grav problem\nsimulation_grav = pf.gravity.simulation.Simulation3DIntegral(\n survey=data_grav.survey, mesh=mesh, rhoMap=wires.den, actInd=actv,\n)\ndmis_grav = data_misfit.L2DataMisfit(data=data_grav, simulation=simulation_grav)\n# Mag problem\nsimulation_mag = pf.magnetics.simulation.Simulation3DIntegral(\n survey=data_mag.survey, mesh=mesh, chiMap=wires.sus, actInd=actv,\n)\ndmis_mag = data_misfit.L2DataMisfit(data=data_mag, simulation=simulation_mag)\n\n#########################################################################\n# Create a joint Data Misfit\n#\n\n# Joint data misfit\ndmis = 0.5 * dmis_grav + 0.5 * dmis_mag\n\n# initial model\nm0 = np.r_[-1e-4 * np.ones(actvMap.nP), 1e-4 * np.ones(actvMap.nP)]\n\n#########################################################################\n# Inversion with full petrophysical information\n# ---------------------------------------------\n#\n\n#########################################################################\n# Create and plot a petrophysical GMM with full information\n# ---------------------------------------------------------\n#\n# The GMM is our representation of the petrophysical and geological information.\n# Here, we focus on the petrophysical aspect, with the means and covariances of\n# the physical properties of each rock unit.\n# To generate the data above, the PK unit was populated with a density contrast\n# of -0.8 g/cc and a magnetic susceptibility of 0.005 SI. The properties of the\n# HK unit were set at -0.2 g/cc and 0.02 SI. The covariances matrices are set\n# so that we assume petrophysical noise levels of around 0.05 g/cc and 0.001 SI\n# for both unit. Finally the background unit is set at null contrasts (0 g/cc\n# 0 SI) with a petrophysical noise level of half of the above.\n#\n\ngmmref = utils.WeightedGaussianMixture(\n n_components=3, # number of rock units: bckgrd, PK, HK\n mesh=mesh, # inversion mesh\n actv=actv, # actv cells\n covariance_type=\"diag\", # diagonal covariances\n)\n# required: initialization with fit\n# fake random samples, size of the mesh, number of physical properties: 2 (density and mag.susc)\ngmmref.fit(np.random.randn(nactv, 2))\n# set parameters manually\n# set phys. prop means for each unit\ngmmref.means_ = np.c_[\n [0.0, 0.0], # BCKGRD density contrast and mag. susc\n [-0.8, 0.005], # PK\n [-0.2, 0.02], # HK\n].T\n# set phys. prop covariances for each unit\ngmmref.covariances_ = np.array(\n [[6e-04, 3.175e-07], [2.4e-03, 1.5e-06], [2.4e-03, 1.5e-06]]\n)\n# important after setting cov. manually: compute precision matrices and cholesky\ngmmref.compute_clusters_precisions()\n# set global proportions; low-impact as long as not 0 or 1 (total=1)\ngmmref.weights_ = np.r_[0.9, 0.075, 0.025]\n\n# Plot the 2D GMM\nax = gmmref.plot_pdf(flag2d=True)\nax[0].set_xlabel(\"Density contrast [g/cc]\")\nax[0].set_ylim([0, 5])\nax[2].set_ylabel(\"magnetic Susceptibility [SI]\")\nax[2].set_xlim([0, 100])\nplt.show()\n\n#########################################################################\n# Create PGI regularization\n# -------------------------\n#\n\n# Sensitivity weighting\nwr_grav = np.sum(simulation_grav.G ** 2.0, axis=0) ** 0.5 / (mesh.cell_volumes[actv])\nwr_grav = wr_grav / np.max(wr_grav)\n\nwr_mag = np.sum(simulation_mag.G ** 2.0, axis=0) ** 0.5 / (mesh.cell_volumes[actv])\nwr_mag = wr_mag / np.max(wr_mag)\n\n# create joint PGI regularization with smoothness\nreg = utils.make_PGI_regularization(\n gmmref=gmmref,\n mesh=mesh,\n wiresmap=wires,\n maplist=[idenMap, idenMap],\n indActive=actv,\n alpha_s=1.0,\n alpha_x=1.0,\n alpha_y=1.0,\n alpha_z=1.0,\n alpha_xx=0.0,\n alpha_yy=0.0,\n alpha_zz=0.0,\n cell_weights_list=[wr_grav, wr_mag], # weights each phys. prop. by correct sensW\n)\n\n#########################################################################\n# Inverse problem with full petrophysical information\n# ---------------------------------------------------\n#\n\n# Directives\n# Add directives to the inversion\n# ratio to use for each phys prop. smoothness in each direction;\n# roughly the ratio of the order of magnitude of each phys. prop.\nalpha0_ratio = np.r_[\n np.zeros(len(reg.objfcts[0].objfcts)),\n 1e-4 * np.ones(len(reg.objfcts[1].objfcts)),\n 100.0 * 1e-4 * np.ones(len(reg.objfcts[2].objfcts)),\n]\nAlphas = directives.AlphasSmoothEstimate_ByEig(alpha0_ratio=alpha0_ratio, verbose=True)\n# initialize beta and beta/alpha_s schedule\nbeta = directives.BetaEstimate_ByEig(beta0_ratio=1e-2)\nbetaIt = directives.PGI_BetaAlphaSchedule(\n verbose=True, coolingFactor=2.0, tolerance=0.2, progress=0.2,\n)\n# geophy. and petro. target misfits\ntargets = directives.MultiTargetMisfits(verbose=True,)\n# add learned mref in smooth once stable\nMrefInSmooth = directives.PGI_AddMrefInSmooth(wait_till_stable=True, verbose=True,)\n# update the parameters in smallness (L2-approx of PGI)\nupdate_smallness = directives.PGI_UpdateParameters(\n update_gmm=False # keep GMM model fixed\n)\n# pre-conditioner\nupdate_Jacobi = directives.UpdatePreconditioner()\n# iteratively balance the scaling of the data misfits\nscaling_init = directives.ScalingMultipleDataMisfits_ByEig(chi0_ratio=[1.0, 100.0])\nscale_schedule = directives.JointScalingSchedule(verbose=True)\n\n# Create inverse problem\n# Optimization\n# set lower and upper bounds\nlowerbound = np.r_[-2.0 * np.ones(actvMap.nP), 0.0 * np.ones(actvMap.nP)]\nupperbound = np.r_[0.0 * np.ones(actvMap.nP), 1e-1 * np.ones(actvMap.nP)]\nopt = optimization.ProjectedGNCG(\n maxIter=30,\n lower=lowerbound,\n upper=upperbound,\n maxIterLS=20,\n maxIterCG=100,\n tolCG=1e-4,\n)\n# create inverse problem\ninvProb = inverse_problem.BaseInvProblem(dmis, reg, opt)\ninv = inversion.BaseInversion(\n invProb,\n # directives: evaluate alphas (and data misfits scales) before beta\n directiveList=[\n Alphas,\n scaling_init,\n beta,\n update_smallness,\n targets,\n scale_schedule,\n betaIt,\n MrefInSmooth,\n update_Jacobi,\n ],\n)\n\n# invert\npgi_model = inv.run(m0)\n\n# Extract the results\ndensity_model = gravmap * pgi_model\nmagsus_model = magmap * pgi_model\nquasi_geology_model = actvMap * reg.objfcts[0].compute_quasi_geology_model()\n\n# Plot the result with full petrophysical information\nfig, ax = plt.subplots(3, 4, figsize=(15, 10))\nfor _, axx in enumerate(ax):\n for _, axxx in enumerate(axx):\n axxx.set_aspect(1)\n axxx.tick_params(labelsize=ticksize)\n\nindx = 15\nindy = 17\nindz = -9\n# geology model\nmesh.plotSlice(\n quasi_geology_model,\n normal=\"X\",\n ax=ax[0, 0],\n clim=[0, 2],\n ind=indx,\n pcolorOpts={\"cmap\": \"inferno_r\"},\n)\nmesh.plotSlice(\n quasi_geology_model,\n normal=\"Y\",\n ax=ax[0, 1],\n clim=[0, 2],\n ind=indy,\n pcolorOpts={\"cmap\": \"inferno_r\"},\n)\ngeoplot = mesh.plotSlice(\n quasi_geology_model,\n normal=\"Z\",\n ax=ax[0, 2],\n clim=[0, 2],\n ind=indz,\n pcolorOpts={\"cmap\": \"inferno_r\"},\n)\ngeocb = plt.colorbar(geoplot[0], cax=ax[0, 3], ticks=[0, 1, 2])\ngeocb.set_ticklabels([\"BCK\", \"PK\", \"VK\"])\ngeocb.set_label(\"Quasi-Geology model\\n(Rock units classification)\", fontsize=16)\nax[0, 3].set_aspect(10)\n\n# gravity model\nmesh.plotSlice(\n density_model,\n normal=\"X\",\n ax=ax[1, 0],\n clim=[-1, 0],\n ind=indx,\n pcolorOpts={\"cmap\": \"Blues_r\"},\n)\nmesh.plotSlice(\n density_model,\n normal=\"Y\",\n ax=ax[1, 1],\n clim=[-1, 0],\n ind=indy,\n pcolorOpts={\"cmap\": \"Blues_r\"},\n)\ndenplot = mesh.plotSlice(\n density_model,\n normal=\"Z\",\n ax=ax[1, 2],\n clim=[-1, 0],\n ind=indz,\n pcolorOpts={\"cmap\": \"Blues_r\"},\n)\ndencb = plt.colorbar(denplot[0], cax=ax[1, 3])\ndencb.set_label(\"Density contrast\\nmodel (g/cc)\", fontsize=16)\nax[1, 3].set_aspect(10)\n\n# magnetic model\nmesh.plotSlice(\n magsus_model,\n normal=\"X\",\n ax=ax[2, 0],\n clim=[0, 0.025],\n ind=indx,\n pcolorOpts={\"cmap\": \"Reds\"},\n)\nmesh.plotSlice(\n magsus_model,\n normal=\"Y\",\n ax=ax[2, 1],\n clim=[0, 0.025],\n ind=indy,\n pcolorOpts={\"cmap\": \"Reds\"},\n)\nsusplot = mesh.plotSlice(\n magsus_model,\n normal=\"Z\",\n ax=ax[2, 2],\n clim=[0, 0.025],\n ind=indz,\n pcolorOpts={\"cmap\": \"Reds\"},\n)\nsuscb = plt.colorbar(susplot[0], cax=ax[2, 3])\nsuscb.set_label(\"Magnetic susceptibility\\nmodel (SI)\", fontsize=16)\nax[2, 3].set_aspect(10)\n\n# overlay true geology model for comparison\nindslicexplot = mesh.gridCC[:, 0] == mesh.vectorCCx[indx]\nindsliceyplot = mesh.gridCC[:, 1] == mesh.vectorCCy[indy]\nindslicezplot = mesh.gridCC[:, 2] == mesh.vectorCCz[indz]\nfor i in range(3):\n for j, (plane, indd) in enumerate(\n zip([[1, 2], [0, 2], [0, 1]], [indslicexplot, indsliceyplot, indslicezplot])\n ):\n utils.plot2Ddata(\n mesh.gridCC[indd][:, plane],\n true_geology[indd],\n nx=100,\n ny=100,\n contourOpts={\"alpha\": 0},\n clim=[0, 2],\n ax=ax[i, j],\n level=True,\n ncontour=2,\n levelOpts={\"colors\": \"grey\", \"linewidths\": 2, \"linestyles\": \"--\"},\n method=\"nearest\",\n )\n\n# plot the locations of the cross-sections\nfor i in range(3):\n ax[i, 0].plot(\n mesh.vectorCCy[indy] * np.ones(2), [-300, 500], c=\"k\", linestyle=\"dotted\"\n )\n ax[i, 0].plot(\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n mesh.vectorCCz[indz] * np.ones(2),\n c=\"k\",\n linestyle=\"dotted\",\n )\n ax[i, 0].set_xlim(\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n )\n\n ax[i, 1].plot(\n mesh.vectorCCx[indx] * np.ones(2), [-300, 500], c=\"k\", linestyle=\"dotted\"\n )\n ax[i, 1].plot(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n mesh.vectorCCz[indz] * np.ones(2),\n c=\"k\",\n linestyle=\"dotted\",\n )\n ax[i, 1].set_xlim(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n )\n\n ax[i, 2].plot(\n mesh.vectorCCx[indx] * np.ones(2),\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n c=\"k\",\n linestyle=\"dotted\",\n )\n ax[i, 2].plot(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n mesh.vectorCCy[indy] * np.ones(2),\n c=\"k\",\n linestyle=\"dotted\",\n )\n ax[i, 2].set_xlim(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n )\n ax[i, 2].set_ylim(\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n )\n\nplt.tight_layout()\nplt.show()\n\n# Plot the 2D GMM\nfig = plt.figure(figsize=(10, 10))\nax0 = plt.subplot2grid((4, 4), (3, 1), colspan=3)\nax1 = plt.subplot2grid((4, 4), (0, 1), colspan=3, rowspan=3)\nax2 = plt.subplot2grid((4, 4), (0, 0), rowspan=3)\nax = [ax0, ax1, ax2]\nreg.objfcts[0].gmm.plot_pdf(flag2d=True, ax=ax, padding=0.5)\nax[0].set_xlabel(\"Density contrast [g/cc]\")\nax[0].set_ylim([0, 5])\nax[2].set_xlim([0, 50])\nax[2].set_ylabel(\"magnetic Susceptibility [SI]\")\nax[1].scatter(\n density_model[actv],\n magsus_model[actv],\n c=quasi_geology_model[actv],\n cmap=\"inferno_r\",\n edgecolors=\"k\",\n label=\"recovered PGI model\",\n alpha=0.5,\n)\nax[1].legend()\nax[0].hist(density_model[actv], density=True, bins=50)\nax[2].hist(magsus_model[actv], density=True, bins=50, orientation=\"horizontal\")\nplt.show()\n" ]
[ [ "numpy.ones", "numpy.sum", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.gca", "numpy.random.seed", "numpy.random.randn", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "numpy.max", "numpy.array", "matplotlib.pyplot.colorbar", "numpy.genfromtxt", "matplotlib.pyplot.subplot2grid" ] ]
nicococo/ClusterSvdd
[ "2f61c187a3197c807b239202b72d9c84cb46400c" ]
[ "ClusterSVDD/svdd_primal_sgd.py" ]
[ "__author__ = 'nicococo'\nimport numpy as np\n\nfrom numba import autojit\n\n\nclass SvddPrimalSGD(object):\n \"\"\" Primal subgradient descent solver for the support vector data description (SVDD).\n Author: Nico Goernitz, TU Berlin, 2015\n \"\"\"\n PRECISION = 10**-3 # important: effects the threshold, support vectors and speed!\n nu = 0.95\t # (scalar) the regularization constant > 0\n c = None # (vecor) center of the hypersphere\n radius2 = 0.0 # (scalar) the optimized threshold (rho)\n pobj = 0.0 # (scalar) primal objective after training\n\n def __init__(self, nu):\n self.nu = nu\n print('Creating new primal SVDD with nu={0}.'.format(nu))\n\n @autojit\n def fit(self, X, max_iter=20000, prec=1e-6, rate=0.01):\n if X.shape[1] < 1:\n print('Invalid training data.')\n return -1, -1\n self.c, self.radius2, self.pobj, iter = fit_extern(X, self.nu, max_iter, prec, rate)\n print('Iter={2}: obj={0} T={1}'.format(self.pobj, self.radius2, iter+1))\n return self.c, self.radius2\n\n def get_radius(self):\n return self.radius2\n\n def predict(self, X):\n # X : (dims x samples)\n dist = self.c.T.dot(self.c) - 2.*self.c.T.dot(X) + np.sum(X*X, axis=0)\n return dist - self.radius2\n\n\n@autojit(nopython=True)\ndef fit_extern(X, nu, max_iter, prec, rate):\n \"\"\" Subgradient descent solver for primal SVDD.\n Optimized for 'numba'\n \"\"\"\n (dims, samples) = X.shape\n\n # number of training examples\n reg = 1./(np.float64(samples)*nu)\n\n # center of mass\n c = np.zeros(dims, dtype=np.float64)\n # np.sum(X*X, axis=0)\n sum_XX = np.zeros(samples)\n for s in range(samples):\n foo = 0.0\n for d in range(dims):\n foo += X[d, s]*X[d, s]\n c[d] += X[d, s] / np.float64(samples)\n sum_XX[s] = foo\n # print np.sum(np.abs(c-np.mean(X, axis=1)))\n\n dot_2cX = np.zeros(samples, dtype=np.float64)\n for s in range(samples):\n dot_2cX[s] = 2.0 * np.sum(c*X[:, s])\n dist = np.sum(c*c) - dot_2cX + sum_XX\n\n T = 0.4 * np.max(dist) * (1.0-nu) # starting heuristic T\n # if nu exceeds 1.0, then T^* is always 0 and c can\n # be computed analytically (as center-of-mass, mean)\n if nu >= 1.0:\n return c, 0.0, 0.0, 0\n\n is_converged = False\n best_c = c\n best_radius2 = T\n obj_best = np.float64(1e20)\n\n obj_bak = -100.\n iter = 0\n\n # gradient step for center\n dc = np.zeros(dims, dtype=np.float64)\n inds = np.zeros(samples, dtype=np.int64)\n while not is_converged and iter < max_iter:\n # print iter\n for s in range(samples):\n dot_2cX[s] = 2.0 * np.sum(c*X[:, s])\n\n # calculate the distances of the center to each datapoint\n dist = np.sum(c*c) - dot_2cX + sum_XX\n inds_size = 0\n for s in range(samples):\n if dist[s]-T >= 1e-12:\n inds[inds_size] = s\n inds_size += 1\n # we need at least 1 entry, hence lower T to the maximum entry\n if inds_size == 0:\n inds_size = 1\n inds[0] = np.argmax(dist)\n T = dist[inds[0]]\n\n # real objective value given the current center c and threshold T\n ds = 0.0\n for s in range(inds_size):\n ds += dist[inds[s]] - T\n obj = T + reg*ds\n\n # this is subgradient, hence need to store the best solution so far\n if obj_best >= obj:\n best_c = c\n best_radius2 = T\n obj_best = obj\n\n # stop, if progress is too slow\n if obj > 0.:\n if np.abs((obj-obj_bak)/obj) < prec:\n is_converged = True\n continue\n obj_bak = obj\n\n # stepsize should be not more than 0.1 % of the maximum value encountered in dist\n max_change = rate * np.max(dist) / np.float(iter+1)*10.\n\n # gradient step for threshold\n dT = 1.0 - reg*np.float(inds_size)\n T -= np.sign(dT) * max_change\n\n # gradient step for center\n norm_dc = 0.0\n for d in range(dims):\n dc[d] = 0.0\n for s in range(inds_size):\n dc[d] += 2.*reg*(c[d] - X[d, inds[s]])\n norm_dc += dc[d]*dc[d]\n norm_dc = np.sqrt(norm_dc)\n\n if np.abs(norm_dc) < 1e-12:\n norm_dc = 1.0\n\n for d in range(dims):\n c[d] -= dc[d]/norm_dc * max_change\n iter += 1\n\n return best_c, best_radius2, obj_best, iter\n" ]
[ [ "numpy.sum", "numpy.sign", "numpy.zeros", "numpy.abs", "numpy.argmax", "numpy.float", "numpy.max", "numpy.sqrt", "numpy.float64" ] ]
jhhugo/DeepCTR
[ "12012b06097a4ad69d68e61989b16d2d6f02d741" ]
[ "deepctr/models/ccpm.py" ]
[ "# -*- coding:utf-8 -*-\n\"\"\"\n\nAuthor:\n Weichen Shen,[email protected]\n\nReference:\n [1] Liu Q, Yu F, Wu S, et al. A convolutional click prediction model[C]//Proceedings of the 24th ACM International on Conference on Information and Knowledge Management. ACM, 2015: 1743-1746.\n (http://ir.ia.ac.cn/bitstream/173211/12337/1/A%20Convolutional%20Click%20Prediction%20Model.pdf)\n\n\"\"\"\nimport tensorflow as tf\n\nfrom ..feature_column import build_input_features, get_linear_logit, input_from_feature_columns\nfrom ..layers.core import DNN, PredictionLayer\n\nfrom ..layers.sequence import KMaxPooling\nfrom ..layers.utils import concat_func, add_func\n\n\ndef CCPM(linear_feature_columns, dnn_feature_columns, conv_kernel_width=(6, 5), conv_filters=(4, 4),\n dnn_hidden_units=(256,), l2_reg_linear=1e-5, l2_reg_embedding=1e-5, l2_reg_dnn=0, dnn_dropout=0,\n seed=1024, task='binary'):\n \"\"\"Instantiates the Convolutional Click Prediction Model architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param conv_kernel_width: list,list of positive integer or empty list,the width of filter in each conv layer.\n :param conv_filters: list,list of positive integer or empty list,the number of filters in each conv layer.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN.\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param init_std: float,to use as the initialize std of embedding vector\n :param task: str, ``\"binary\"`` for binary logloss or ``\"regression\"`` for regression loss\n :return: A Keras model instance.\n \"\"\"\n\n if len(conv_kernel_width) != len(conv_filters):\n raise ValueError(\n \"conv_kernel_width must have same element with conv_filters\")\n\n features = build_input_features(\n linear_feature_columns + dnn_feature_columns)\n inputs_list = list(features.values())\n\n linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed,\n l2_reg=l2_reg_linear)\n\n sparse_embedding_list, _ = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding,\n seed, support_dense=False)\n\n n = len(sparse_embedding_list)\n l = len(conv_filters)\n\n conv_input = concat_func(sparse_embedding_list, axis=1)\n pooling_result = tf.keras.layers.Lambda(\n lambda x: tf.expand_dims(x, axis=3))(conv_input)\n\n for i in range(1, l + 1):\n filters = conv_filters[i - 1]\n width = conv_kernel_width[i - 1]\n k = max(1, int((1 - pow(i / l, l - i)) * n)) if i < l else 3\n\n conv_result = tf.keras.layers.Conv2D(filters=filters, kernel_size=(width, 1), strides=(1, 1), padding='same',\n activation='tanh', use_bias=True, )(pooling_result)\n pooling_result = KMaxPooling(\n k=min(k, int(conv_result.shape[1])), axis=1)(conv_result)\n\n flatten_result = tf.keras.layers.Flatten()(pooling_result)\n dnn_out = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout)(flatten_result)\n dnn_logit = tf.keras.layers.Dense(1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_out)\n\n final_logit = add_func([dnn_logit, linear_logit])\n\n output = PredictionLayer(task)(final_logit)\n model = tf.keras.models.Model(inputs=inputs_list, outputs=output)\n return model\n" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.expand_dims", "tensorflow.keras.initializers.glorot_normal", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Conv2D" ] ]
UoA-eResearch/dynamic_network_graph
[ "350a22a40dd7425eb08a688651df13af8826ea52" ]
[ "stress_test.py" ]
[ "#!/usr/bin/env python3\n\nimport asyncio\nimport websockets\nimport json\nimport random\nimport time\nimport numpy as np\n\nURI = \"wss://api-proxy.auckland-cer.cloud.edu.au/dynamic_network_graph\"\n#URI = \"ws://api-proxy.auckland-cer.cloud.edu.au:6789\"\n#URI = \"ws://localhost:6789\"\nSESSION_ID = \"STRESS_TEST\"\nconnections = []\n\nasync def read_all(websocket):\n try:\n while True:\n await asyncio.wait_for(websocket.recv(), 0)\n except:\n return\n\nasync def test():\n start = time.time()\n websocket = await websockets.connect(URI)\n connections.append(websocket)\n await websocket.send(json.dumps({\n \"action\": \"connect\",\n \"session_id\": SESSION_ID\n }))\n await websocket.send(json.dumps({\n \"session_id\": SESSION_ID,\n \"action\": \"upsert_entry\",\n \"entry\": {\n \"id\": random.randint(0, 100),\n \"donor\": random.randint(0, 100),\n \"resourceType\": \"$\",\n \"recipient\": random.randint(0, 100)\n }\n }))\n return time.time() - start\n\nasync def run_n_tests(n):\n results = await asyncio.gather(*[test() for i in range(n)])\n return results\n\nasync def main():\n print(\"n_clients,t,wall_time\")\n start = time.time()\n for i in range(100):\n result = await run_n_tests(15)\n result = np.mean(result)\n print(f\"{len(connections)},{result},{time.time() - start}\")\n for ws in connections:\n await read_all(ws)\n\nasyncio.get_event_loop().run_until_complete(main())" ]
[ [ "numpy.mean" ] ]
pfinashx/openvino
[ "1d417e888b508415510fb0a92e4a9264cf8bdef7" ]
[ "tests/layer_tests/onnx_tests/test_mean_variance_normalization.py" ]
[ "# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport pytest\n\nfrom common.onnx_layer_test_class import OnnxRuntimeLayerTest\n\n\nclass TestMeanVarianceNormalization(OnnxRuntimeLayerTest):\n def _prepare_input(self, inputs_dict):\n for input in inputs_dict.keys():\n inputs_dict[input] = np.random.randn(*inputs_dict[input]).astype(np.float32)\n return inputs_dict\n\n def create_net(self, shape, axes, ir_version):\n \"\"\"\n ONNX net IR net\n\n Input->MeanVarianceNormalization->Output => Input->MVN\n \"\"\"\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)\n\n node_def = onnx.helper.make_node(\n 'MeanVarianceNormalization',\n inputs=['input'],\n outputs=['output'],\n axes=axes\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_def],\n 'test_model',\n [input],\n [output]\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_model')\n\n #\n # Create reference IR net\n # Please, specify 'type': 'Input' for input node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net\n\n test_data = [\n dict(shape=[7, 2, 3, 5], axes=[2, 3]),\n dict(shape=[7, 2, 3, 5], axes=[1, 2, 3]),\n dict(shape=[7, 2, 3, 5, 11], axes=[2, 3, 4]),\n dict(shape=[7, 2, 3, 5, 11], axes=[1, 2, 3, 4])\n ]\n\n @pytest.mark.parametrize(\"params\", test_data)\n @pytest.mark.nightly\n def test_mvn(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_net(**params, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n" ]
[ [ "numpy.random.randn" ] ]
renyixiang/xmind_to_testcase
[ "25f3a5377e67138fc6707c0a14dcf6ed8501c845" ]
[ "webtool/tow_csvfile_compare.py" ]
[ "# _*_ coding:utf-8 _*_\n\n'''\ncsv文件的合并和去重\n主要是针对测试用例增加使用此脚本\n'''\nimport pandas as pd\nimport glob\n#输出文件\noutputfile = '/Users/huaan720/Downloads/百度网盘/xmind2testcase-master/docs/case_csvfile/new.csv'\n#合并csv的文件夹\ncsv_list = glob.glob('/Users/huaan720/Downloads/百度网盘/xmind2testcase-master/docs/case_csvfile/*.csv')\nprint(u'共发现%s个CSV文件' % len(csv_list))\nprint(u'正在处理............')\n\ndef hebing():\n for inputfile in csv_list:\n f = open(inputfile,encoding='gbk')\n data = pd.read_csv(f)\n data.to_csv(outputfile, mode='a', index=False, header=None)\n print('完成合并')\n\ndef quchong(file):\n df = pd.read_csv(file, header=0)\n datalist = df.drop_duplicates()\n datalist.to_csv(file)\n print('完成去重')\n\nif __name__ == '__main__':\n hebing()\n quchong(outputfile)\n" ]
[ [ "pandas.read_csv" ] ]
mchapman87501/mars_perseverance_images
[ "9d138ffba25fcb039051cda724e15e994153d90c" ]
[ "tools/band_finder/src/band_finder/image_matcher.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nimage_matcher adjusts image contrast based on two image samples.\nCopyright 2021, Mitch Chapman All rights reserved\n\"\"\"\n\nimport numpy as np\n\n\nclass ChannelAdjuster:\n def __init__(self, src_sample, target_sample, channel, vmin, vmax):\n src = src_sample.astype(np.float64)\n targ = target_sample.astype(np.float64)\n\n src_values = src[:, :, channel].flatten()\n targ_values = targ[:, :, channel].flatten()\n\n samples = dict()\n for s, t in zip(src_values, targ_values):\n samples.setdefault(s, []).append(t)\n\n # Default the left and right edges to the channel extreme values.\n value_map = {\n vmin: vmin,\n vmax: vmax\n }\n for s, tvals in samples.items():\n value_map[s] = np.mean(tvals)\n\n ordered_src = sorted(value_map.keys())\n ordered_targ = [value_map[src] for src in ordered_src]\n\n self._osrc = ordered_src\n self._otarg = ordered_targ\n self._channel = channel\n\n def adjust(self, image_data):\n values = image_data[:, :, self._channel]\n new_values = np.interp(values, self._osrc, self._otarg)\n image_data[:, :, self._channel] = new_values\n\n\nclass ImageMatcher:\n \"\"\"\n ImageMatcher tries to make a source image match\n the appearance of a target image.\n\n It uses samples of the source and target image, that presumably depict the\n same scene, to characterize the mapping from source to target.\n It does this poorly, by considering image color components separately.\n \"\"\"\n\n def __init__(self, src_sample, target_sample):\n \"\"\"Create an instance.\n src_sample and target_sample are numpy image_data.\n Both show the same scene, but with potentially different colors -\n intensity, saturation, etc.\n\n Args:\n src_sample (array): A numpy image\n target_sample (array): A numpy image, depicting\n the same scene as src_sample but with\n possibly different color ranges\n \"\"\"\n src = src_sample.astype(np.float64)\n targ = target_sample.astype(np.float64)\n\n # Assume Lab channels.\n # TODO let caller specify this, perhaps via a class method.\n # This same information is encoded in find_pano.\n chan_info = [\n [0, 0.0, 100.0],\n [1, -127.0, 128.0],\n [2, -128.0, 127.0]\n ]\n self._adjusters = [\n ChannelAdjuster(src, targ, channel, vmin, vmax)\n for channel, vmin, vmax in chan_info\n ]\n\n def adjusted(self, src_image):\n \"\"\"Get a copy of a source image, adjusted to\n match self's target_sample.\n\n Note: the result's value data may not be bounded to, e.g., 0..255.0\n\n Args:\n image (array): numpy image array\n\n Returns:\n array: the adjusted image array\n \"\"\"\n result = src_image.copy()\n for adjuster in self._adjusters:\n adjuster.adjust(result)\n return result\n" ]
[ [ "numpy.mean", "numpy.interp" ] ]
NeversayEverLin/PyOCT
[ "d2c221142ebc3c13050ad26ea09ad9d031ddab31" ]
[ "PyOCT/misc.py" ]
[ "import os\nfrom h5py._hl.files import File \nimport numpy as np \nimport xml.etree.ElementTree as ET\nimport time\nfrom scipy.linalg import dft\nimport numpy.matlib \nimport matplotlib.pyplot as plt \nimport matplotlib \nfrom PyOCT import CAO \nimport re \nimport h5py\nfrom scipy.linalg.misc import norm \nfrom scipy.signal import fftconvolve\nimport matplotlib.patches as patches\nimport cv2 \nimport pickle\nfrom scipy import ndimage\nimport scipy.stats\nimport matplotlib.colors\nfrom matplotlib import cm \nimport math \ndef find_all_dataset(root_dir,saveFolder, saveOption='in'):\n \"\"\"\n Looking for all datasets under root_dir and create a saveFolder under root_dir for data save. \n : root_dir: root directory of all data files\n : saveFolder: name of folder where the data should be saved \n Return:\n : NumOfFile: total of raw data files \n : RawDataFileID: sorted raw data file ID\n : SettingsFileID: sorted settings file ID of corresponding raw data file \n : BkgndFileID: background data file \n : save_path: the path to save data \n : saveOption: 'in' or 'out', indicating save the processed files into current root directory with folder name as saveFolder ('in')\n : or save the processed files into an independent directory with saveFolder as a full directory path. \n \"\"\"\n if saveOption.lower() == 'in':\n save_path = os.path.join(root_dir,saveFolder) \n elif saveOption.lower() == 'out':\n save_path = saveFolder \n if not os.path.exists(save_path):\n os.mkdir(save_path)\n subfolders = os.listdir(root_dir)\n SettingsFileID = [] \n RawDataFileID = []\n BkgndFileID = []\n for item in subfolders:\n if item.endswith('_settings.xml'):\n SettingsFileID.append(item) \n if item.endswith('_raw.bin'):\n if 'bkgnd' not in item:\n RawDataFileID.append(item)\n else:\n BkgndFileID.append(item) \n # sort file name by numerical order\n pattern = re.compile(r'_\\d+_') \n pattern2 = re.compile(r'\\d+')\n RawDataFileID = sorted(RawDataFileID, key=lambda x:int(pattern2.findall(pattern.findall(x)[0])[0]))\n SettingsFileID = sorted(SettingsFileID , key=lambda x:int(pattern2.findall(pattern.findall(x)[0])[0]))\n NumOfFile = len(RawDataFileID) \n return NumOfFile, RawDataFileID, BkgndFileID, SettingsFileID, save_path\n\ndef ListAllDataFile(data_path,endsWith,startsWith=None,searchPattern=r\"_\\d+_\",searchPattern2=r\"\\d+\",returnNum=False):\n \"\"\"\n Serach for all data file under the condition of endsWith and return with a sorted results. \n The data file name can only have one number indicating the sequential order of file name, otherwise it might not right. \n searchPattern and searchPattern2 are intial and refined rearch target. \n \"\"\"\n if data_path.endswith(\"/\") or data_path.endswith(\"\\\\\"):\n data_path = data_path[:-1]\n dataID = []\n for dfile in os.listdir(data_path):\n if startsWith == None:\n if dfile.endswith(endsWith):\n dataID.append(dfile) \n else:\n if dfile.startswith(startsWith) and dfile.endswith(endsWith):\n dataID.append(dfile) \n searchPattern = re.compile(searchPattern)\n searchPattern2 = re.compile(searchPattern2)\n if searchPattern2 == None:\n dataID = sorted(dataID, key=lambda x:int(searchPattern.findall(x)[0])) \n else:\n dataID = sorted(dataID, key=lambda x:int(searchPattern2.findall(searchPattern.findall(x)[0])[0])) \n sortNum = []\n for x in dataID: \n sortNum.append(int(searchPattern2.findall(searchPattern.findall(x)[0])[0]))\n if returnNum:\n return dataID, np.asarray(sortNum)\n else:\n return dataID \n\ndef SaveData(save_path,FileName,inData,datatype='data',varName = 'OCTData'):\n \"\"\"\n Save data in the format of .hdf5\n : save_path: directory path where the data will be saved. \n : FileName: name of file name. Therefore, the file will be FileName.hdf5 \n : inData: input data. This should be an ndarray or Settings file. \n\n \"\"\"\n if save_path.endswith(\"/\") or save_path.endswith(\"\\\\\"):\n save_path = save_path[:-1]\n\n if datatype.lower() == 'data':\n if np.iscomplexobj(inData):\n DataFileSave = h5py.File(save_path+'/'+FileName+'.hdf5','w')\n DataFileSave.create_dataset(varName+'_real',shape=np.shape(inData),data=np.real(inData),compression=\"gzip\")\n DataFileSave.create_dataset(varName+'_imag',shape=np.shape(inData),data=np.imag(inData),compression=\"gzip\")\n DataFileSave.close()\n else:\n DataFileSave = h5py.File(save_path+'/'+FileName+'.hdf5','w')\n DataFileSave.create_dataset(varName,shape=np.shape(inData),data=inData,compression=\"gzip\")\n DataFileSave.close()\n elif datatype.lower() == 'settings':\n SettingsFile = h5py.File(save_path+'/'+FileName+'.hdf5','w')\n for k, v in inData.items():\n SettingsFile.create_dataset(k,data=v)\n SettingsFile.close() \n else:\n raise ValueError(\"Wrong data type!\") \ndef LoadSettings(path,FileName):\n \"\"\"\n Loading Settings file. \n path should NOT end with \"/\" or \"\\\\\".\n \"\"\"\n Settings = dict.fromkeys([], []) \n fid = h5py.File(path+'/'+FileName,'r')\n for key in fid.keys():\n Settings[key] = fid[key][()]\n return Settings \n\ndef mean2(x):\n y = np.sum(x) / np.size(x);\n return y\n\ndef corr2(a,b):\n \"\"\"Calculating correlation coefficient between two input 2D array\n with same definition to corr2() in MATLAB\n \"\"\"\n a = a - mean2(a)\n b = b - mean2(b)\n r = (a*b).sum() / np.sqrt((a*a).sum() * (b*b).sum())\n return np.abs(r)\n\n\ndef normxcorr2(template, image, mode=\"full\"):\n \"\"\"\n Input arrays should be floating point numbers.\n :param template: N-D array, of template or filter you are using for cross-correlation.\n Must be less or equal dimensions to image.\n Length of each dimension must be less than length of image.\n :param image: N-D array\n :param mode: Options, \"full\", \"valid\", \"same\"\n full (Default): The output of fftconvolve is the full discrete linear convolution of the inputs. \n Output size will be image size + 1/2 template size in each dimension.\n valid: The output consists only of those elements that do not rely on the zero-padding.\n same: The output is the same size as image, centered with respect to the ‘full’ output.\n :return: N-D array of same dimensions as image. Size depends on mode parameter.\n \"\"\"\n\n # If this happens, it is probably a mistake\n if np.ndim(template) > np.ndim(image) or \\\n len([i for i in range(np.ndim(template)) if template.shape[i] > image.shape[i]]) > 0:\n print(\"normxcorr2: TEMPLATE larger than IMG. Arguments may be swapped.\")\n\n template = template - np.mean(template)\n image = image - np.mean(image)\n\n a1 = np.ones(template.shape)\n # Faster to flip up down and left right then use fftconvolve instead of scipy's correlate\n ar = np.flipud(np.fliplr(template))\n out = fftconvolve(image, ar.conj(), mode=mode)\n \n image = fftconvolve(np.square(image), a1, mode=mode) - \\\n np.square(fftconvolve(image, a1, mode=mode)) / (np.prod(template.shape))\n\n # Remove small machine precision errors after subtraction\n image[np.where(image < 0)] = 0\n\n template = np.sum(np.square(template))\n out = out / np.sqrt(image * template)\n\n # Remove any divisions by 0 or very close to 0\n out[np.where(np.logical_not(np.isfinite(out)))] = 0\n \n return out\n\ndef Max2d(inData):\n return np.amax(inData), np.unravel_index(inData.argmax(),inData.shape) \n\ndef patternMatch(template,rootImage,cropIndex = None, showFit = False):\n \"\"\"Compare template image to rootImage and find the translation index required \n for makeing template image matched with rootImage. That's by moving (transX,transY) to ensure \n template image as much similar as to rootImage. Using normxcorr2() method which requires a small image region cropped from template.\n Therefore, cropIndex means the subimage of template used to deconvlve with rootImage. If cropIndex is None, then directly using template image as subimage.\n : template: to be compared, 2d numpy array as real. if cropIndex is None, both dimensions of template image must be smaller than rootImage. Using cropIndex must result in a smaller dimension of subimage compared to rootImage. \n : rootImage: basic image, 2d nump.array as real. It is best template and rootImage has the same \n : cropIndex: None as default, or (4,) list/array with [xmin,xmax,ymin,ymax]. \n : showFit: present fit results \n \"\"\"\n if cropIndex == None:\n CropImage = template \n centerofCropInTemplate = (0,0)\n else:\n cropIndex = np.asarray(cropIndex) \n CropImage = template[cropIndex[0]:cropIndex[1],cropIndex[2]:cropIndex[3]]\n centerofCropInTemplate = (int(np.ceil((cropIndex[1]+cropIndex[0])/2)), int(np.ceil((cropIndex[2]+cropIndex[3])/2)))\n cTmp = normxcorr2(CropImage,rootImage,mode='same') \n cMax, cPos = Max2d(cTmp) \n transX, transY = (cPos[0] - centerofCropInTemplate[0], cPos[1] - centerofCropInTemplate[1])\n if showFit:\n figC = plt.figure(figsize=(14,4))\n ax00 = plt.subplot2grid((1,3),(0,0),rowspan=1,colspan=1) \n ax00.set_title(\"Matching Corr Map\")\n ax01 = plt.subplot2grid((1,3),(0,1),rowspan=1,colspan=1) \n ax01.set_title(\"Root image\")\n ax02 = plt.subplot2grid((1,3),(0,2),rowspan=1,colspan=1) \n ax02.set_title(\"Template Image\") \n imax0 = ax00.imshow(cTmp,aspect='equal') \n ax01.imshow(rootImage,aspect='equal',cmap='gray')\n ax02.imshow(template,aspect='equal',cmap='gray') \n figC.colorbar(imax0,ax=ax00,orientation='vertical',fraction=0.05,aspect=50)\n\n figT = plt.figure(figsize=(5,5))\n axT = plt.subplot2grid((1,2),(0,0),rowspan=1,colspan=1) \n axT2 = plt.subplot2grid((1,2),(0,1),rowspan=1,colspan=1) \n axT.imshow(rootImage,cmap='gray',aspect='equal',interpolation = 'none')\n rect = patches.Rectangle((cPos[1]-np.shape(CropImage)[1]/2,cPos[0]-np.shape(CropImage)[0]/2), np.shape(CropImage)[1], np.shape(CropImage)[0], fill=False,linestyle='--',linewidth=2,edgecolor='tab:red')\n axT.add_patch(rect)\n axT2.imshow(CropImage,cmap='gray',aspect='equal',interpolation='none')\n\n return cTmp, cMax, transX, transY \n\n\ndef filter_bilateral( img_in, sigma_s, sigma_v, reg_constant=1e-8 ):\n \"\"\"Simple bilateral filtering of an input image\n\n Performs standard bilateral filtering of an input image. If padding is desired,\n img_in should be padded prior to calling\n\n Args:\n img_in (ndarray) monochrome input image\n sigma_s (float) spatial gaussian std. dev.\n sigma_v (float) value gaussian std. dev.\n reg_constant (float) optional regularization constant for pathalogical cases\n\n Returns:\n result (ndarray) output bilateral-filtered image\n\n Raises: \n ValueError whenever img_in is not a 2D float32 valued numpy.ndarray\n \"\"\"\n\n # check the input\n if not isinstance( img_in, numpy.ndarray ) or img_in.dtype != 'float32' or img_in.ndim != 2:\n raise ValueError('Expected a 2D numpy.ndarray with float32 elements')\n\n # make a simple Gaussian function taking the squared radius\n gaussian = lambda r2, sigma: (numpy.exp( -0.5*r2/sigma**2 )*3).astype(int)*1.0/3.0\n\n # define the window width to be the 3 time the spatial std. dev. to \n # be sure that most of the spatial kernel is actually captured\n win_width = int( 3*sigma_s+1 )\n\n # initialize the results and sum of weights to very small values for\n # numerical stability. not strictly necessary but helpful to avoid\n # wild values with pathological choices of parameters\n wgt_sum = numpy.ones( img_in.shape )*reg_constant\n result = img_in*reg_constant\n\n # accumulate the result by circularly shifting the image across the\n # window in the horizontal and vertical directions. within the inner\n # loop, calculate the two weights and accumulate the weight sum and \n # the unnormalized result image\n for shft_x in range(-win_width,win_width+1):\n for shft_y in range(-win_width,win_width+1):\n # compute the spatial weight\n w = gaussian( shft_x**2+shft_y**2, sigma_s )\n\n # shift by the offsets\n off = numpy.roll(img_in, [shft_y, shft_x], axis=[0,1] )\n\n # compute the value weight\n tw = w*gaussian( (off-img_in)**2, sigma_v )\n\n # accumulate the results\n result += off*tw\n wgt_sum += tw\n\n # normalize the result and return\n return result/wgt_sum\n\ndef FindVrange(enFace,VmaxBound=[0.999,1.0],VminBound=[0.01,0.05]):\n tmp = np.sort(enFace.flatten())\n sizeTmp = np.size(tmp) \n vmax = np.median(tmp[int(sizeTmp*VmaxBound[0]):int(sizeTmp*VmaxBound[1])]) \n vmin = np.median(tmp[int(sizeTmp*VminBound[0]):int(sizeTmp*VminBound[1])])\n OCTnorm = matplotlib.colors.Normalize(vmin=vmin,vmax=vmax) \n return [vmax, vmin,OCTnorm]\n\ndef truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):\n new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\n cmap(np.linspace(minval, maxval, n)))\n return new_cmap \n\ndef fillhole(input_image):\n '''\n input gray binary image get the filled image by floodfill method\n Note: only holes surrounded in the connected regions will be filled.\n :param input_image:\n :return:\n '''\n im_flood_fill = input_image.copy()\n h, w = input_image.shape[:2]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n im_flood_fill = im_flood_fill.astype(\"uint8\")\n cv2.floodFill(im_flood_fill, mask, (0, 0), 255)\n im_flood_fill_inv = cv2.bitwise_not(im_flood_fill)\n img_out = input_image | im_flood_fill_inv\n return img_out \n\ndef WriteIntoGif(path,fps,endsWith = '.png',saveFileName=None):\n import imageio \n from progress.bar import Bar\n if path.endswith(\"/\") or path.endswith(\"\\\\\"):\n path = path[:-1]\n pngFiles = []\n pngIndx = []\n for file in os.listdir(path):\n if file.endswith(endsWith):\n pngFiles.append(file) \n tmp = re.findall(r'\\d+',file)\n pngIndx.append(int(tmp[0]))\n pngFiles = np.asarray(pngFiles)\n pngFiles = pngFiles[np.argsort(pngIndx)]\n bar2 = Bar(' Writing into GIF', max=len(pngFiles))\n images = []\n\n for filename in pngFiles:\n bar2.next() \n images.append(imageio.imread(path+'/'+filename))\n if saveFileName:\n savename = saveFileName+\".gif\"\n else:\n savename = \"animation.gif\"\n imageio.mimsave(path+\"/\"+savename, images,fps=fps)\n\n\ndef patternMatch_fft(plane_xy_shift,plane_xy,showFit = False):\n \"\"\"\n pattern match in frequency domain by taking Fourier transform of input data\n here plane_xy_shift and plane_xy must has the same dimension. \n \"\"\" \n sx, sy = np.shape(plane_xy)\n fft_xy = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(plane_xy)))\n fft_xy_shift = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(plane_xy_shift)))\n cross_xy = np.abs(np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(fft_xy * np.conjugate(fft_xy_shift)))))\n max_corr_r, posxy = Max2d(cross_xy)\n shiftx, shifty = np.asarray(posxy)-[int(sx/2),int(sy/2)]\n\n if showFit:\n vmin = 2*np.amin(np.abs(plane_xy)**0.4)\n vmax = 0.7*np.amax(np.abs(plane_xy)**0.4)\n OCTnorm = matplotlib.colors.Normalize(vmin = vmin,vmax = vmax)\n fig = plt.figure(figsize=(5,3))\n ax00 = plt.subplot2grid((1,4),(0,0),rowspan=1,colspan=1) #xz\n ax01 = plt.subplot2grid((1,4),(0,1),rowspan=1,colspan=1)\n ax02 = plt.subplot2grid((1,4),(0,2),rowspan=1,colspan=1)\n ax03 = plt.subplot2grid((1,4),(0,3),rowspan=1,colspan=1)\n ax00.set_title(\"plane xy\")\n ax01.set_title(\"plane shift\")\n ax02.set_title(\"Fit res\")\n ax03.set_title(\"Re shift plane\")\n ax00.imshow(np.abs(plane_xy)**0.4,cmap='gray')\n ax01.imshow(np.abs(plane_xy_shift)**0.4,cmap='gray') \n ax02.imshow(np.abs(cross_xy))\n ax03.imshow(np.roll(np.abs(plane_xy_shift)**0.4,(shiftx,shifty),axis=(0,1)),cmap='gray')\n\n return [max_corr_r,shiftx,shifty]\n\ndef patternMatch_fft_scan(testVol,refPlane,cAxis,numAve=2,showFit=False):\n \"\"\"\n Do a pattern match by using fft method and also scan over a range along cAxis. \n refPlane: 2d dim array (alread median filtered)\n testVol: 3d dim array\n cAxis: along which axis of testVol to do plane-b-plane compare\n numAve: number of axis to averged over cAxis to do compare\n \"\"\"\n testVol = (testVol - np.mean(testVol))/np.std(testVol) \n refPlane = (refPlane - np.mean(refPlane))/np.std(refPlane) #normalize to reduce the effects of variant brightness on motion correction \n cAxisLen = np.shape(testVol)[cAxis] \n CorrR = 0 \n shiftx = 0\n shifty = 0\n for i in np.arange(numAve,cAxisLen-numAve,step=1,dtype=int):\n if cAxis == 0:\n tmpPlane = np.amax(testVol[i-numAve:i+numAve,:,:],axis=0)\n elif cAxis == 1:\n tmpPlane = np.amax(testVol[:,i-numAve:i+numAve,:],axis=1)\n elif cAxis == 2:\n tmpPlane = np.amax(testVol[:,:,i-numAve:i+numAve],axis=2)\n tmpPlane = ndimage.median_filter(tmpPlane,size=(3,3))\n tmpCorrR,tmpshiftx,tmpshifty = patternMatch_fft(tmpPlane,refPlane,showFit=showFit)\n if np.abs(tmpCorrR) > np.abs(CorrR):\n CorrR = tmpCorrR \n shiftx = tmpshiftx \n shifty = tmpshifty \n\n return [shiftx,shifty]\n\ndef patternMatch_fft_3d(testVol_raw,refVol_raw,testSurfPos,refSurfPos):\n \"\"\"\n Pattern match for motion correction in 3d. \n src_freq = np.fft.fftn(src_image_cpx)\n target_freq = np.fft.fftn(target_image_cpx)\n shape = src_freq.shape\n image_product = src_freq * target_freq.conj()\n cross_correlation = np.fft.ifftn(image_product)\n #cross_correlation = ifftn(image_product) # TODO CHECK why this line is different\n new_cross_corr = np.abs(cross_correlation)\n CCmax = cross_correlation.max()\n maxima = np.unravel_index(np.argmax(new_cross_corr), new_cross_corr.shape)\n midpoints = np.array([np.fix(axis_size//2) for axis_size in shape])\n shifts = np.array(maxima, dtype=np.float32)\n shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]\n \"\"\"\n zshift = int(refSurfPos-testSurfPos) \n testVol_raw = np.roll(testVol_raw,zshift,axis=0)\n testVol = testVol_raw[refSurfPos+20:refSurfPos+450,:,:]\n refVol = refVol_raw[refSurfPos+20:refSurfPos+450,:,:]\n testVol = (testVol - np.mean(testVol))/np.std(testVol) \n refVol = (refVol - np.mean(refVol))/np.std(refVol) \n test_freq = np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(testVol))) \n ref_freq = np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(refVol))) \n shape = test_freq.shape\n cross_correlation = np.abs(np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(ref_freq * test_freq.conj()))))\n CCmax = cross_correlation.max() \n maxima = np.unravel_index(np.argmax(cross_correlation), cross_correlation.shape)\n midpoints = np.array([np.fix(axis_size//2) for axis_size in shape]) \n shifts = np.array(maxima-midpoints, dtype=int)\n #shifts[0] = shifts[0]-int(testSurfPos-refSurfPos) \n testVol_raw = np.roll(testVol_raw,shifts,axis=(0,1,2)) \n if shifts[0] > 30:\n print(\"Warning: z axis shift larger than 30 pixels!\")\n if shifts[1] > 30:\n print(\"Warning: x axis shift larger than 30 pixels!\")\n if shifts[2] > 30:\n print(\"Warining: y axis shift larger than 30 pixels!\") \n return shifts, testVol_raw \n\ndef ListTXT(FileName,mode='w',data=None):\n if mode.lower() == 'w' or mode.lower() == \"write\":\n if data == None:\n raise ValueError(\"Please input DATA to be saved!\")\n else:\n with open(FileName,\"wb\") as fp2:\n pickle.dump(data,fp2)\n output = 1\n elif mode.lower() == \"r\" or mode.lower() == \"read\":\n with open(FileName, \"rb\") as fp: # Unpickling\n output = pickle.load(fp)\n else:\n raise ValueError(\"mode should only be either write or read !\")\n \n return output\n\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = data.flatten() #1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, h\n\n\nclass MplColorHelper:\n def __init__(self, cmap_name, start_val, stop_val):\n self.cmap_name = cmap_name\n self.cmap = plt.get_cmap(cmap_name)\n self.norm = matplotlib.colors.Normalize(vmin=start_val, vmax=stop_val)\n self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)\n print(\"color clim is {}\".format(self.scalarMap.get_clim()))\n def get_rgb(self, val,alpha=1.0):\n return self.scalarMap.to_rgba(val,alpha=alpha) #,bytes=True)\n\n\n\ndef get_prime_factors(number):\n prime_factors = []\n\n while number % 2 == 0:\n prime_factors.append(2)\n number = number / 2\n for i in range(3, int(math.sqrt(number)) + 1, 2):\n while number % i == 0:\n prime_factors.append(int(i))\n number = number / i\n if number > 2:\n prime_factors.append(int(number))\n return prime_factors\n" ]
[ [ "scipy.signal.fftconvolve", "numpy.ones", "numpy.sum", "numpy.argsort", "numpy.asarray", "numpy.size", "matplotlib.cm.ScalarMappable", "numpy.amax", "numpy.isfinite", "matplotlib.pyplot.subplot2grid", "matplotlib.colors.Normalize", "matplotlib.pyplot.figure", "numpy.fliplr", "numpy.abs", "matplotlib.pyplot.get_cmap", "numpy.where", "numpy.linspace", "numpy.mean", "numpy.ceil", "numpy.zeros", "numpy.argmax", "numpy.arange", "numpy.ndim", "numpy.prod", "numpy.fix", "numpy.iscomplexobj", "numpy.std", "numpy.square", "numpy.array", "numpy.roll", "scipy.ndimage.median_filter", "numpy.conjugate", "numpy.fft.ifftshift", "numpy.shape", "numpy.sqrt", "numpy.real", "numpy.imag" ] ]
stephane-eisen/pyleecan
[ "1faedde4b24acc6361fa1fdd4e980eaec4ca3a62" ]
[ "pyleecan/Methods/Machine/LamSlotMulti/plot.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom matplotlib.patches import Patch\nfrom matplotlib.pyplot import axis, legend\n\nfrom ....Functions.init_fig import init_fig\nfrom ....definitions import config_dict\n\nROTOR_COLOR = config_dict[\"PLOT\"][\"COLOR_DICT\"][\"ROTOR_COLOR\"]\nSTATOR_COLOR = config_dict[\"PLOT\"][\"COLOR_DICT\"][\"STATOR_COLOR\"]\n\n\ndef plot(\n self,\n fig=None,\n ax=None,\n is_lam_only=False,\n sym=1,\n alpha=0,\n delta=0,\n is_edge_only=False,\n is_display=True,\n is_show_fig=True,\n):\n \"\"\"Plot the Lamination with empty Slots in a matplotlib fig\n\n Parameters\n ----------\n self : LamSlotMulti\n A LamSlotMulti object\n fig : Matplotlib.figure.Figure\n existing figure to use if None create a new one\n ax : Matplotlib.axes.Axes object\n Axis on which to plot the data\n is_lam_only: bool\n True to plot only the lamination\n sym : int\n Symmetry factor (1= full machine, 2= half of the machine...)\n alpha : float\n Angle for rotation [rad]\n delta : complex\n Complex value for translation\n is_edge_only: bool\n To plot transparent Patches\n is_display : bool\n False to return the patches\n is_show_fig : bool\n To call show at the end of the method\n Returns\n -------\n patches : list\n List of Patches\n \"\"\"\n\n if self.is_stator:\n lam_color = STATOR_COLOR\n else:\n lam_color = ROTOR_COLOR\n\n (fig, axes, patch_leg, label_leg) = init_fig(fig=fig, ax=ax, shape=\"rectangle\")\n\n surf_list = self.build_geometry(sym=sym, alpha=alpha, delta=delta)\n patches = list()\n for surf in surf_list:\n if \"Lamination\" in surf.label:\n patches.extend(surf.get_patches(color=lam_color, is_edge_only=is_edge_only))\n else:\n patches.extend(surf.get_patches(is_edge_only=is_edge_only))\n # Display the result\n if is_display:\n (fig, axes, patch_leg, label_leg) = init_fig(fig)\n axes.set_xlabel(\"(m)\")\n axes.set_ylabel(\"(m)\")\n for patch in patches:\n axes.add_patch(patch)\n\n # Axis Setup\n axes.axis(\"equal\")\n\n # The Lamination is centered in the figure\n Lim = self.Rext * 1.5\n axes.set_xlim(-Lim, Lim)\n axes.set_ylim(-Lim, Lim)\n\n # Add the legend\n if not is_edge_only:\n if self.is_stator:\n patch_leg.append(Patch(color=STATOR_COLOR))\n label_leg.append(\"Stator\")\n axes.set_title(\"Stator with empty slot\")\n else:\n patch_leg.append(Patch(color=ROTOR_COLOR))\n label_leg.append(\"Rotor\")\n axes.set_title(\"Rotor with empty slot\")\n\n legend(patch_leg, label_leg)\n if is_show_fig:\n fig.show()\n else:\n return patches\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.patches.Patch" ] ]
charliememory/detectron2
[ "a2a6220068e73c616ee4c84cb52ea023c0203fa0" ]
[ "projects/DensePose_wrong/densepose/modeling/condinst/iuv_head.py" ]
[ "from typing import Dict\nimport math\n\nimport torch\nfrom torch import nn\n\nfrom fvcore.nn import sigmoid_focal_loss_jit\nfrom detectron2.layers import ShapeSpec\n\n# from adet.layers import conv_with_kaiming_uniform\n# from adet.utils.comm import aligned_bilinear\nfrom densepose.layers import conv_with_kaiming_uniform\nfrom densepose.utils.comm import compute_locations, aligned_bilinear\n# from .. import (\n# build_densepose_data_filter,\n# build_densepose_head,\n# build_densepose_losses,\n# build_densepose_predictor,\n# densepose_inference,\n# )\nimport pdb\n\nINF = 100000000\n\n\ndef build_iuv_head(cfg):\n # return GlobalIUVHeadAfterMaskBranch(cfg)\n return CoordGlobalIUVHeadAfterMaskBranch(cfg)\n\n\nclass CoordGlobalIUVHeadAfterMaskBranch(nn.Module):\n def __init__(self, cfg, disable_rel_coords=False):\n super().__init__()\n self.num_outputs = cfg.MODEL.CONDINST.IUVHead.OUT_CHANNELS\n norm = cfg.MODEL.CONDINST.IUVHead.NORM\n num_convs = cfg.MODEL.CONDINST.IUVHead.NUM_CONVS\n channels = cfg.MODEL.CONDINST.IUVHead.CHANNELS\n soi = cfg.MODEL.FCOS.SIZES_OF_INTEREST\n self.register_buffer(\"sizes_of_interest\", torch.tensor(soi + [soi[-1] * 2]))\n self.in_channels = channels + 2\n self.iuv_out_stride = cfg.MODEL.CONDINST.MASK_OUT_STRIDE\n self.disable_rel_coords = disable_rel_coords\n\n conv_block = conv_with_kaiming_uniform(norm, activation=True)\n\n tower = []\n tower.append(conv_block(\n self.in_channels, channels, 3, 1\n ))\n for i in range(1,num_convs):\n tower.append(conv_block(\n channels, channels, 3, 1\n ))\n tower.append(nn.Conv2d(\n channels, max(self.num_outputs, 1), 1\n ))\n self.add_module('tower', nn.Sequential(*tower))\n\n # self.densepose_losses = build_densepose_losses(cfg)\n\n def forward(self, s_logits, iuv_feats, iuv_feat_stride, instances):\n\n locations = compute_locations(\n iuv_feats.size(2), iuv_feats.size(3),\n stride=iuv_feat_stride, device=iuv_feats.device\n )\n # n_inst = len(instances)\n\n im_inds = instances.im_inds\n\n N, _, H, W = iuv_feats.size()\n rel_coord = torch.zeros([N,2,H,W], device=iuv_feats.device).to(dtype=iuv_feats.dtype)\n\n if not self.disable_rel_coords: \n instance_locations = instances.locations\n relative_coords = instance_locations.reshape(-1, 1, 2) - locations.reshape(1, -1, 2)\n relative_coords = relative_coords.permute(0, 2, 1).float()\n soi = self.sizes_of_interest.float()[instances.fpn_levels]\n relative_coords = relative_coords / soi.reshape(-1, 1, 1)\n relative_coords = relative_coords.to(dtype=iuv_feats.dtype)\n # rel_coord_list = []\n for idx in range(N):\n if idx in im_inds:\n cc = relative_coords[im_inds==idx,].reshape(-1, 2, H, W)\n # assert s_logits.shape[1]==1\n ss = s_logits[im_inds==idx,-1:]\n # coord = torch.sum(cc*ss, dim=0, keepdim=True) \\\n # / (torch.sum(ss, dim=0, keepdim=True)+1e-7)\n coord = torch.mean(cc*ss, dim=0, keepdim=True) \n rel_coord[idx:idx+1] = coord #.reshape(1, 2, H, W)\n # pdb.set_trace()\n # import imageio\n # imageio.imwrite(\"tmp/cc.png\",cc[0,0].detach().cpu().numpy())\n # imageio.imwrite(\"tmp/ss.png\",ss[0,0].detach().cpu().numpy())\n # imageio.imwrite(\"tmp/cc_ss.png\",(cc*ss)[0,0].detach().cpu().numpy())\n # imageio.imwrite(\"tmp/ss_sum.png\",torch.sum(ss, dim=0, keepdim=True)[0,0].detach().cpu().numpy())\n # imageio.imwrite(\"tmp/coord_mean.png\",coord[0,0].detach().cpu().numpy())\n # rel_coord_list.append(rel_coord)\n # pdb.set_trace()\n iuv_head_inputs = torch.cat([rel_coord, iuv_feats], dim=1) \n else:\n iuv_head_inputs = iuv_feats\n\n\n\n\n\n iuv_logit = self.tower(iuv_head_inputs)\n\n assert iuv_feat_stride >= self.iuv_out_stride\n assert iuv_feat_stride % self.iuv_out_stride == 0\n iuv_logit = aligned_bilinear(iuv_logit, int(iuv_feat_stride / self.iuv_out_stride))\n\n return iuv_logit\n\n\nclass GlobalIUVHeadAfterMaskBranch(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.num_outputs = cfg.MODEL.CONDINST.IUVHead.OUT_CHANNELS\n norm = cfg.MODEL.CONDINST.IUVHead.NORM\n num_convs = cfg.MODEL.CONDINST.IUVHead.NUM_CONVS\n channels = cfg.MODEL.CONDINST.IUVHead.CHANNELS\n self.iuv_out_stride = cfg.MODEL.CONDINST.MASK_OUT_STRIDE\n\n conv_block = conv_with_kaiming_uniform(norm, activation=True)\n\n tower = []\n for i in range(num_convs):\n tower.append(conv_block(\n channels, channels, 3, 1\n ))\n tower.append(nn.Conv2d(\n channels, max(self.num_outputs, 1), 1\n ))\n self.add_module('tower', nn.Sequential(*tower))\n\n # self.densepose_losses = build_densepose_losses(cfg)\n\n def forward(self, iuv_feats, iuv_feat_stride, instances=None):\n iuv_logit = self.tower(iuv_feats)\n\n assert iuv_feat_stride >= self.iuv_out_stride\n assert iuv_feat_stride % self.iuv_out_stride == 0\n iuv_logit = aligned_bilinear(iuv_logit, int(iuv_feat_stride / self.iuv_out_stride))\n\n return iuv_logit\n" ]
[ [ "torch.tensor", "torch.nn.Sequential", "torch.zeros", "torch.cat", "torch.mean" ] ]
tpawlowski/image_analytics
[ "60445177a45c81a2c9c389b2f85f0d49d561c211" ]
[ "neuroscience/scidb/stream_mask.py" ]
[ "#!/usr/bin/python\n\n#\n#DFZ 11/15/2016: it's hard to control the chunk size read from the\n# stream() interface, see run_mri_stream.output for a concrete idea.\n#\n\n#the following import block is for testing only\nimport dipy.core.gradients as dpg\nimport os.path as op\nfrom dipy.segment.mask import median_otsu\nimport nibabel as nib\nfrom dipy.denoise import nlmeans\nimport dipy.core.gradients as dpg\nfrom dipy.denoise.noise_estimate import estimate_sigma\nimport time\nimport sys\nimport numpy as np\nimport os\nfrom __builtin__ import float\n\n#SciDB handler\n#from scidbpy import connect\n#sdb = connect('http://localhost:8080')\n\n\ntm_start = time.time()\nsys.stderr.write(\"\\n\\n=====> DFZ DEBUG 3/2/2017: \" + time.ctime() + \" OMG I start again! \\n\")\n\nSUB_ID = 101107\n\nend_of_interaction = 0\nwhile (end_of_interaction != 1):\n header = sys.stdin.readline().rstrip()\n #declare the local denoised array\n if(header != \"0\"):\n sys.stderr.write(\"=====> DFZ 2/24/2017: header = \" + header + \"\\n\")\n #We receive a message from the SciDB instance:\n num_lines = int(header) #how many lines did we get?\n sys.stderr.write(\"=====> DFZ 1/25/2017: num_lines = \"+str(num_lines)+\"\\n\")\n #n_vol = num_lines / 145 / 174 / 145\n #sys.stderr.write(\"=====> DFZ 1/25/2017: n_vol = \"+str(n_vol)+\"(should equal to 288/4 )\\n\")\n\n #Collect all lines into a list:\n input_lines = []\n for i in range(0, num_lines):\n line = sys.stdin.readline().rstrip()\n try:\n f = float(line)\n except:\n f = 0.0\n input_lines.append(f)\n\n#################################################\n############## MRI Logic ########################\n#################################################\n\n #construct the values into a numpy array for MRI\n nparray = np.asarray(input_lines, dtype=np.float32)\n# sys.stderr.write(\"=====> DFZ DEBUG: convertion completed.\\n\")\n sys.stderr.write(\"=====> DFZ DEBUG 2/16/2017: nparray.shape = \" + str(nparray.size) + \"; len(input_lines) = \" + str(len(input_lines)) +\"\\n\")\n mean_b0 = np.reshape(nparray, (145, 174, 145)) #last param should reflect the chunk size\n sys.stderr.write(\"=====> DFZ DEBUG: data loading completed.\\n\")\n\n #masking\n DATA_LOC = \"/home/ubuntu/mri_data/101107/\"\n gtab = dpg.gradient_table(DATA_LOC + 'bvals', DATA_LOC + 'bvecs', b0_threshold=10)\n mask = median_otsu(mean_b0, 4, 2, False, vol_idx=np.where(gtab.b0s_mask), dilate=1)\n sys.stderr.write(\"mask: \\n\")\n sys.stderr.write(str(mask)) #TODO: write it back to SciDB\n\n\n # if you need interative results:\n print(2)\n print(\"Total lines: \" + str(num_lines))\n print(\"I'm tired ----> First line: \" + str(input_lines[0]))\n sys.stdout.flush()\n#This will appear in the scidb-sterr.log file:\n sys.stderr.write(time.ctime() + \"I finished a chunk with \"+ str(num_lines) + \" lines of text!\\n\")\n\n else:\n\n #If we receive \"0\", it means the SciDB instance has no more\n #Data to give us. Here we have the option of also responding with \"0\"\n #Or sending some other message (i.e. a global sum):\n end_of_interaction = 1\n print(\"1\")\n# print(\"KTHXBYE\")\n print(\"KTHXBYE: subject \" + str(SUB_ID) + \" done in \" + str(time.time() - tm_start) + \" seconds\")\n sys.stdout.flush()\n\n#ok = 0\n# So I cannot 'return' or 'print' even after 'return'; the following statements would cause errors\n#exit(0)\n# print \"Start at \" + str(time.ctime())\n" ]
[ [ "numpy.where", "numpy.reshape", "numpy.asarray" ] ]
CyrilGarneau/COVID19-Model
[ "e8b7e459d0cfca580ded33fda05ebd6858e19c86" ]
[ "src/coronaHelper2.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 18 11:01:22 2020\n\n@author: twallema\nCopyright (c) 2020 by T.W. Alleman, BIOMATH, Ghent University. All Rights Reserved.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom random import choices\nimport scipy\nfrom scipy.integrate import odeint\nimport math\nimport models\nimport networkx\nfrom scipy import interpolate as inter\nfrom gekko import GEKKO\n\ndef sampleFromDistribution(filename,k):\n df = pd.read_csv(filename)\n x = df.iloc[:,0]\n y = df.iloc[:,1]\n return(np.asarray(choices(x, y, k = k)))\n\ndef runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospitalvect,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE,\n initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs):\n tN = simtime + 1\n if monteCarlo == True: \n n_samples = dcfvect.size\n S = np.zeros([tN,n_samples])\n E = np.zeros([tN,n_samples])\n SM = np.zeros([tN,n_samples])\n M = np.zeros([tN,n_samples])\n H = np.zeros([tN,n_samples])\n C = np.zeros([tN,n_samples])\n HH = np.zeros([tN,n_samples])\n CH = np.zeros([tN,n_samples])\n R = np.zeros([tN,n_samples])\n F = np.zeros([tN,n_samples])\n SQ = np.zeros([tN,n_samples])\n EQ = np.zeros([tN,n_samples])\n SMQ = np.zeros([tN,n_samples])\n MQ = np.zeros([tN,n_samples])\n RQ = np.zeros([tN,n_samples])\n i=0\n t = np.linspace(0,simtime,tN)\n for sigma in sigmavect:\n dcf = dcfvect[i]\n dcr = dcrvect[i]\n dhospital = dhospitalvect[i]\n sm = smvect[i]\n m = (1-sm)*0.81 \n h = (1-sm)*0.14 \n c = (1-sm)*0.05 \n # perform simulation\n if modelType == 'deterministic':\n if method == 'findInfected' or method == 'findTime' or method == 'none':\n model = models.SEIRSAgeModel(initN=initN,beta=beta,sigma=sigma,Nc=Nc,zeta=zeta,sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,\n totalTests=totalTests,psi_FP=psi_FP,psi_PP=psi_PP,dq=dq,\n initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,initMQ=initMQ,\n initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n elif method == 'findGovernmentResponse':\n extraTime = stoArgs['extraTime']\n measureTime = stoArgs['measureTime']\n initE = 1\n Nc0 = 11.2\n checkpoints = {\n 't': [measureTime+extraTime],\n 'Nc': [Nc]\n }\n model = models.SEIRSAgeModel(initN=initN,beta=beta,sigma=sigma,Nc=Nc0,zeta=zeta,sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,\n totalTests=totalTests,psi_FP=psi_FP,psi_PP=psi_PP,dq=dq,\n initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,initMQ=initMQ,\n initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n else:\n raise Exception('Suitable methods to run model are: none, findTime, findInfected, findGovernmentResponse. The provided method was: {}'.format(method))\n elif modelType == 'stochastic':\n if method == 'findInfected' or method == 'findTime' or method == 'none':\n model = models.SEIRSNetworkModel(G=stoArgs['G'],beta=beta,sigma=sigma,zeta=zeta,p=stoArgs['p'],sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,theta_S=theta_S,\n theta_E=theta_E,theta_SM=theta_SM,theta_M=theta_M,theta_R=theta_R,phi_S=phi_S,phi_E=phi_E,phi_SM=phi_SM,phi_R=phi_R,psi_FP=psi_FP,psi_PP=psi_PP,\n dq=dq,initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,\n initMQ=initMQ,initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n # output is not returned every single day, so the results must be interpolated\n x = y.tseries\n if x[-1] < simtime:\n x=np.append(x,simtime+1)\n y.numS=np.append(y.numS,y.numS[-1])\n y.numE=np.append(y.numE,y.numE[-1])\n y.numSM=np.append(y.numSM,y.numSM[-1])\n y.numM=np.append(y.numM,y.numM[-1])\n y.numH=np.append(y.numH,y.numH[-1])\n y.numC=np.append(y.numC,y.numC[-1])\n y.numHH=np.append(y.numHH,y.numHH[-1])\n y.numCH=np.append(y.numCH,y.numCH[-1])\n y.numR=np.append(y.numR,y.numR[-1])\n y.numF=np.append(y.numF,y.numF[-1])\n y.numSQ=np.append(y.numSQ,y.numSQ[-1])\n y.numEQ=np.append(y.numEQ,y.numEQ[-1])\n y.numSMQ=np.append(y.numSMQ,y.numSMQ[-1])\n y.numMQ=np.append(y.numMQ,y.numMQ[-1])\n y.numRQ=np.append(y.numRQ,y.numRQ[-1])\n\n # first variable\n inte = inter.interp1d(x,y.numS)\n y.numS = inte(t)\n\n inte = inter.interp1d(x,y.numE)\n y.numE = inte(t)\n\n inte = inter.interp1d(x,y.numSM)\n y.numSM = inte(t)\n\n inte = inter.interp1d(x,y.numM)\n y.numM = inte(t)\n\n inte = inter.interp1d(x,y.numH)\n y.numH = inte(t)\n\n inte = inter.interp1d(x,y.numC)\n y.numC = inte(t)\n\n inte = inter.interp1d(x,y.numHH)\n y.numHH = inte(t)\n\n inte = inter.interp1d(x,y.numCH)\n y.numCH = inte(t)\n\n inte = inter.interp1d(x,y.numR)\n y.numR = inte(t)\n\n inte = inter.interp1d(x,y.numF)\n y.numF = inte(t)\n\n inte = inter.interp1d(x,y.numSQ)\n y.numSQ = inte(t)\n\n inte = inter.interp1d(x,y.numEQ)\n y.numEQ = inte(t)\n\n inte = inter.interp1d(x,y.numSMQ)\n y.numSMQ = inte(t)\n\n inte = inter.interp1d(x,y.numMQ)\n y.numMQ = inte(t)\n\n inte = inter.interp1d(x,y.numRQ)\n y.numRQ = inte(t)\n\n elif method == 'findGovernmentResponse':\n extraTime = stoArgs['extraTime']\n measureTime = stoArgs['measureTime']\n initE = 1\n beta0 = 0.244\n checkpoints = {\n 't': [measureTime+extraTime],\n 'beta': [beta]\n }\n model = models.SEIRSNetworkModel(G=stoArgs['G'],beta=beta,sigma=sigma,zeta=zeta,p=stoArgs['p'],sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,theta_S=theta_S,\n theta_E=theta_E,theta_SM=theta_SM,theta_M=theta_M,theta_R=theta_R,phi_S=phi_S,phi_E=phi_E,phi_SM=phi_SM,phi_R=phi_R,psi_FP=psi_FP,psi_PP=psi_PP,\n dq=dq,initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,\n initMQ=initMQ,initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n # output is not returned every single day, so the results must be interpolated\n x = y.tseries\n if x[-1] < simtime:\n x=np.append(x,simtime+1)\n y.numS=np.append(y.numS,y.numS[-1])\n y.numE=np.append(y.numE,y.numE[-1])\n y.numSM=np.append(y.numSM,y.numSM[-1])\n y.numM=np.append(y.numM,y.numM[-1])\n y.numH=np.append(y.numH,y.numH[-1])\n y.numC=np.append(y.numC,y.numC[-1])\n y.numHH=np.append(y.numHH,y.numHH[-1])\n y.numCH=np.append(y.numCH,y.numCH[-1])\n y.numR=np.append(y.numR,y.numR[-1])\n y.numF=np.append(y.numF,y.numF[-1])\n y.numSQ=np.append(y.numSQ,y.numSQ[-1])\n y.numEQ=np.append(y.numEQ,y.numEQ[-1])\n y.numSMQ=np.append(y.numSMQ,y.numSMQ[-1])\n y.numMQ=np.append(y.numMQ,y.numMQ[-1])\n y.numRQ=np.append(y.numRQ,y.numRQ[-1])\n\n # first variable\n inte = inter.interp1d(x,y.numS)\n y.numS = inte(t)\n\n inte = inter.interp1d(x,y.numE)\n y.numE = inte(t)\n\n inte = inter.interp1d(x,y.numSM)\n y.numSM = inte(t)\n\n inte = inter.interp1d(x,y.numM)\n y.numM = inte(t)\n\n inte = inter.interp1d(x,y.numH)\n y.numH = inte(t)\n\n inte = inter.interp1d(x,y.numC)\n y.numC = inte(t)\n\n inte = inter.interp1d(x,y.numHH)\n y.numHH = inte(t)\n\n inte = inter.interp1d(x,y.numCH)\n y.numCH = inte(t)\n\n inte = inter.interp1d(x,y.numR)\n y.numR = inte(t)\n\n inte = inter.interp1d(x,y.numF)\n y.numF = inte(t)\n\n inte = inter.interp1d(x,y.numSQ)\n y.numSQ = inte(t)\n\n inte = inter.interp1d(x,y.numEQ)\n y.numEQ = inte(t)\n\n inte = inter.interp1d(x,y.numSMQ)\n y.numSMQ = inte(t)\n\n inte = inter.interp1d(x,y.numMQ)\n y.numMQ = inte(t)\n\n inte = inter.interp1d(x,y.numRQ)\n y.numRQ = inte(t) \n else:\n raise Exception('Suitable methods to run function dxdt are: none, findTime, findInfected, findGovernmentResponse. The provided method was: {}'.format(method))\n else:\n raise Exception('Modeltype is either deterministic or stochastic. The provided modeltype was: {}'.format(modelType))\n \n # extract results\n if modelType == \"deterministic\":\n S[:,i] = y.sumS.reshape(y.sumS.size,1)[:,0] \n E[:,i] = y.sumE.reshape(y.sumE.size,1)[:,0] \n SM[:,i] = y.sumSM.reshape(y.sumSM.size,1)[:,0] \n M[:,i] = y.sumM.reshape(y.sumM.size,1)[:,0] \n H[:,i] = y.sumH.reshape(y.sumH.size,1)[:,0] \n C[:,i] = y.sumC.reshape(y.sumC.size,1)[:,0] \n HH[:,i] = y.sumHH.reshape(y.sumHH.size,1)[:,0] \n CH[:,i] = y.sumCH.reshape(y.sumCH.size,1)[:,0] \n R[:,i] = y.sumR.reshape(y.sumR.size,1)[:,0] \n F[:,i] = y.sumF.reshape(y.sumF.size,1)[:,0] \n SQ[:,i] = y.sumSQ.reshape(y.sumSQ.size,1)[:,0] \n EQ[:,i] = y.sumEQ.reshape(y.sumEQ.size,1)[:,0] \n SMQ[:,i] = y.sumSMQ.reshape(y.sumSMQ.size,1)[:,0] \n MQ[:,i] = y.sumMQ.reshape(y.sumMQ.size,1)[:,0] \n RQ[:,i] = y.sumRQ.reshape(y.sumRQ.size,1)[:,0]\n\n elif modelType == \"stochastic\":\n S[:,i] = y.numS.reshape(y.numS.size,1)[:,0] \n E[:,i] = y.numE.reshape(y.numE.size,1)[:,0] \n SM[:,i] = y.numSM.reshape(y.numSM.size,1)[:,0] \n M[:,i] = y.numM.reshape(y.numM.size,1)[:,0] \n H[:,i] = y.numH.reshape(y.numH.size,1)[:,0] \n C[:,i] = y.numC.reshape(y.numC.size,1)[:,0] \n HH[:,i] = y.numHH.reshape(y.numHH.size,1)[:,0] \n CH[:,i] = y.numCH.reshape(y.numCH.size,1)[:,0] \n R[:,i] = y.numR.reshape(y.numR.size,1)[:,0] \n F[:,i] = y.numF.reshape(y.numF.size,1)[:,0] \n SQ[:,i] = y.numSQ.reshape(y.numSQ.size,1)[:,0] \n EQ[:,i] = y.numEQ.reshape(y.numEQ.size,1)[:,0] \n SMQ[:,i] = y.numSMQ.reshape(y.numSMQ.size,1)[:,0] \n MQ[:,i] = y.numMQ.reshape(y.numMQ.size,1)[:,0] \n RQ[:,i] = y.numRQ.reshape(y.numRQ.size,1)[:,0] \n i = i + 1\n else:\n S = np.zeros([tN,1])\n E = np.zeros([tN,1])\n SM = np.zeros([tN,1])\n M = np.zeros([tN,1])\n H = np.zeros([tN,1])\n C = np.zeros([tN,1])\n HH = np.zeros([tN,1])\n CH = np.zeros([tN,1])\n R = np.zeros([tN,1])\n F = np.zeros([tN,1])\n SQ = np.zeros([tN,1])\n EQ = np.zeros([tN,1])\n SMQ = np.zeros([tN,1])\n MQ = np.zeros([tN,1])\n RQ = np.zeros([tN,1])\n t = np.linspace(0,simtime,tN)\n dcf = dcfvect\n dcr = dcrvect\n dhospital = dhospitalvect\n sm = smvect\n m = (1-sm)*0.81 \n h = (1-sm)*0.14 \n c = (1-sm)*0.05\n sigma = sigmavect \n # perform simulation\n if modelType == 'deterministic':\n if method == 'findInfected' or method == 'findTime' or method == 'none':\n model = models.SEIRSAgeModel(initN=initN,beta=beta,sigma=sigma,Nc=Nc,zeta=zeta,sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,\n totalTests=totalTests,psi_FP=psi_FP,psi_PP=psi_PP,dq=dq,\n initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,initMQ=initMQ,\n initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n elif method == 'findGovernmentResponse':\n extraTime = stoArgs['extraTime']\n measureTime = stoArgs['measureTime']\n initE = 1\n Nc0 = 11.2\n checkpoints = {\n 't': [measureTime+extraTime],\n 'Nc': [Nc]\n }\n model = models.SEIRSAgeModel(initN=initN,beta=beta,sigma=sigma,Nc=Nc,zeta=zeta,sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,\n totalTests=totalTests,psi_FP=psi_FP,psi_PP=psi_PP,dq=dq,\n initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,initMQ=initMQ,\n initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n else:\n raise Exception('Suitable methods to run the model are: none, findTime, findInfected, findGovernmentResponse. The provided method was: {}'.format(method))\n \n elif modelType == 'stochastic':\n if method == 'findInfected' or method == 'findTime' or method == 'none':\n model = models.SEIRSNetworkModel(G=stoArgs['G'],beta=beta,sigma=sigma,zeta=zeta,p=stoArgs['p'],sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,theta_S=theta_S,\n theta_E=theta_E,theta_SM=theta_SM,theta_M=theta_M,theta_R=theta_R,phi_S=phi_S,phi_E=phi_E,phi_SM=phi_SM,phi_R=phi_R,psi_FP=psi_FP,psi_PP=psi_PP,\n dq=dq,initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,\n initMQ=initMQ,initRQ=initRQ)\n print(simtime)\n y = model.run(T=simtime,checkpoints=checkpoints)\n # output is not returned every single day, so the results must be interpolated\n x = y.tseries\n if x[-1] < simtime:\n x=np.append(x,simtime+1)\n y.numS=np.append(y.numS,y.numS[-1])\n y.numE=np.append(y.numE,y.numE[-1])\n y.numSM=np.append(y.numSM,y.numSM[-1])\n y.numM=np.append(y.numM,y.numM[-1])\n y.numH=np.append(y.numH,y.numH[-1])\n y.numC=np.append(y.numC,y.numC[-1])\n y.numHH=np.append(y.numHH,y.numHH[-1])\n y.numCH=np.append(y.numCH,y.numCH[-1])\n y.numR=np.append(y.numR,y.numR[-1])\n y.numF=np.append(y.numF,y.numF[-1])\n y.numSQ=np.append(y.numSQ,y.numSQ[-1])\n y.numEQ=np.append(y.numEQ,y.numEQ[-1])\n y.numSMQ=np.append(y.numSMQ,y.numSMQ[-1])\n y.numMQ=np.append(y.numMQ,y.numMQ[-1])\n y.numRQ=np.append(y.numRQ,y.numRQ[-1])\n\n # first variable\n inte = inter.interp1d(x,y.numS)\n y.numS = inte(t)\n\n inte = inter.interp1d(x,y.numE)\n y.numE = inte(t)\n\n inte = inter.interp1d(x,y.numSM)\n y.numSM = inte(t)\n\n inte = inter.interp1d(x,y.numM)\n y.numM = inte(t)\n\n inte = inter.interp1d(x,y.numH)\n y.numH = inte(t)\n\n inte = inter.interp1d(x,y.numC)\n y.numC = inte(t)\n\n inte = inter.interp1d(x,y.numHH)\n y.numHH = inte(t)\n\n inte = inter.interp1d(x,y.numCH)\n y.numCH = inte(t)\n\n inte = inter.interp1d(x,y.numR)\n y.numR = inte(t)\n\n inte = inter.interp1d(x,y.numF)\n y.numF = inte(t)\n\n inte = inter.interp1d(x,y.numSQ)\n y.numSQ = inte(t)\n\n inte = inter.interp1d(x,y.numEQ)\n y.numEQ = inte(t)\n\n inte = inter.interp1d(x,y.numSMQ)\n y.numSMQ = inte(t)\n\n inte = inter.interp1d(x,y.numMQ)\n y.numMQ = inte(t)\n\n inte = inter.interp1d(x,y.numRQ)\n y.numRQ = inte(t)\n\n elif method == 'findGovernmentResponse':\n extraTime = stoArgs['extraTime']\n measureTime = stoArgs['measureTime']\n initE = 1\n beta0 = 0.290\n checkpoints = {\n 't': [measureTime+extraTime],\n 'beta': [beta]\n }\n model = models.SEIRSNetworkModel(G=stoArgs['G'],beta=beta,sigma=sigma,zeta=zeta,p=stoArgs['p'],sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,theta_S=theta_S,\n theta_E=theta_E,theta_SM=theta_SM,theta_M=theta_M,theta_R=theta_R,phi_S=phi_S,phi_E=phi_E,phi_SM=phi_SM,phi_R=phi_R,psi_FP=psi_FP,psi_PP=psi_PP,\n dq=dq,initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,\n initMQ=initMQ,initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n # output is not returned every single day, so the results must be interpolated\n x = y.tseries\n if x[-1] < simtime:\n x=np.append(x,simtime+1)\n y.numS=np.append(y.numS,y.numS[-1])\n y.numE=np.append(y.numE,y.numE[-1])\n y.numSM=np.append(y.numSM,y.numSM[-1])\n y.numM=np.append(y.numM,y.numM[-1])\n y.numH=np.append(y.numH,y.numH[-1])\n y.numC=np.append(y.numC,y.numC[-1])\n y.numHH=np.append(y.numHH,y.numHH[-1])\n y.numCH=np.append(y.numCH,y.numCH[-1])\n y.numR=np.append(y.numR,y.numR[-1])\n y.numF=np.append(y.numF,y.numF[-1])\n y.numSQ=np.append(y.numSQ,y.numSQ[-1])\n y.numEQ=np.append(y.numEQ,y.numEQ[-1])\n y.numSMQ=np.append(y.numSMQ,y.numSMQ[-1])\n y.numMQ=np.append(y.numMQ,y.numMQ[-1])\n y.numRQ=np.append(y.numRQ,y.numRQ[-1])\n\n # first variable\n inte = inter.interp1d(x,y.numS)\n y.numS = inte(t)\n\n inte = inter.interp1d(x,y.numE)\n y.numE = inte(t)\n\n inte = inter.interp1d(x,y.numSM)\n y.numSM = inte(t)\n\n inte = inter.interp1d(x,y.numM)\n y.numM = inte(t)\n\n inte = inter.interp1d(x,y.numH)\n y.numH = inte(t)\n\n inte = inter.interp1d(x,y.numC)\n y.numC = inte(t)\n\n inte = inter.interp1d(x,y.numHH)\n y.numHH = inte(t)\n\n inte = inter.interp1d(x,y.numCH)\n y.numCH = inte(t)\n\n inte = inter.interp1d(x,y.numR)\n y.numR = inte(t)\n\n inte = inter.interp1d(x,y.numF)\n y.numF = inte(t)\n\n inte = inter.interp1d(x,y.numSQ)\n y.numSQ = inte(t)\n\n inte = inter.interp1d(x,y.numEQ)\n y.numEQ = inte(t)\n\n inte = inter.interp1d(x,y.numSMQ)\n y.numSMQ = inte(t)\n\n inte = inter.interp1d(x,y.numMQ)\n y.numMQ = inte(t)\n\n inte = inter.interp1d(x,y.numRQ)\n y.numRQ = inte(t) \n else:\n raise Exception('Suitable methods to run model are: none, findTime, findInfected, findGovernmentResponse. The provided method was: {}'.format(method))\n \n else:\n raise Exception('Modeltype is either deterministic or stochastic. The provided modeltype was: {}'.format(modelType))\n \n # extract results\n if modelType == \"deterministic\":\n S[:,0] = y.sumS.reshape(y.sumS.size,1)[:,0] \n E[:,0] = y.sumE.reshape(y.sumE.size,1)[:,0] \n SM[:,0] = y.sumSM.reshape(y.sumSM.size,1)[:,0] \n M[:,0] = y.sumM.reshape(y.sumM.size,1)[:,0] \n H[:,0] = y.sumH.reshape(y.sumH.size,1)[:,0] \n C[:,0] = y.sumC.reshape(y.sumC.size,1)[:,0] \n HH[:,0] = y.sumHH.reshape(y.sumHH.size,1)[:,0] \n CH[:,0] = y.sumCH.reshape(y.sumCH.size,1)[:,0] \n R[:,0] = y.sumR.reshape(y.sumR.size,1)[:,0] \n F[:,0] = y.sumF.reshape(y.sumF.size,1)[:,0] \n SQ[:,0] = y.sumSQ.reshape(y.sumSQ.size,1)[:,0] \n EQ[:,0] = y.sumEQ.reshape(y.sumEQ.size,1)[:,0] \n SMQ[:,0] = y.sumSMQ.reshape(y.sumSMQ.size,1)[:,0] \n MQ[:,0] = y.sumMQ.reshape(y.sumMQ.size,1)[:,0] \n RQ[:,0] = y.sumRQ.reshape(y.sumRQ.size,1)[:,0] \n elif modelType == \"stochastic\":\n S[:,0] = y.numS.reshape(y.numS.size,1)[:,0] \n E[:,0] = y.numE.reshape(y.numE.size,1)[:,0] \n SM[:,0] = y.numSM.reshape(y.numSM.size,1)[:,0] \n M[:,0] = y.numM.reshape(y.numM.size,1)[:,0] \n H[:,0] = y.numH.reshape(y.numH.size,1)[:,0] \n C[:,0] = y.numC.reshape(y.numC.size,1)[:,0] \n HH[:,0] = y.numHH.reshape(y.numHH.size,1)[:,0] \n CH[:,0] = y.numCH.reshape(y.numCH.size,1)[:,0] \n R[:,0] = y.numR.reshape(y.numR.size,1)[:,0] \n F[:,0] = y.numF.reshape(y.numF.size,1)[:,0] \n SQ[:,0] = y.numSQ.reshape(y.numSQ.size,1)[:,0] \n EQ[:,0] = y.numEQ.reshape(y.numEQ.size,1)[:,0] \n SMQ[:,0] = y.numSMQ.reshape(y.numSMQ.size,1)[:,0] \n MQ[:,0] = y.numMQ.reshape(y.numMQ.size,1)[:,0] \n RQ[:,0] = y.numRQ.reshape(y.numRQ.size,1)[:,0] \n\n if modelType == 'deterministic':\n return(t,S,E,SM,M,H,C,HH,CH,R,F,SQ,EQ,SMQ,MQ,RQ)\n elif modelType == 'stochastic':\n return(t,S,E,SM,M,H,C,HH,CH,R,F,SQ,EQ,SMQ,MQ,RQ,y.numNodes)\n\n\ndef LSQ(thetas,data,fitTo,\n initN,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,monteCarlo,method,modelType,checkpoints,stoArgs):\n\n if method == 'findInfected':\n # check if number of provided bounds is two\n if len(thetas) != 2:\n raise Exception('Number of bounds for method findInfected is 2. The number of provided bounds was: {}'.format(len(thetas)))\n # define length of simulation from provided data\n simtime = data.size-1\n # assign estimates to correct varaiable\n beta = thetas[0]\n B0=thetas[1]\n # calculate initial condition\n if modelType == 'stochastic':\n raise Exception('A stochastic model should be calibrated using the method findTime. The provided calibration method was: {}'.format(method))\n initN = initN\n initE = np.ones(Nc.shape[0])*B0\n initSM = np.zeros(Nc.shape[0])\n initM = np.zeros(Nc.shape[0])\n initH = np.zeros(Nc.shape[0])\n initC = np.zeros(Nc.shape[0])\n initHH = np.zeros(Nc.shape[0])\n initCH = np.zeros(Nc.shape[0])\n initR = np.zeros(Nc.shape[0])\n initF = np.zeros(Nc.shape[0])\n initSQ = np.zeros(Nc.shape[0])\n initEQ = np.zeros(Nc.shape[0])\n initSMQ = np.zeros(Nc.shape[0])\n initMQ = np.zeros(Nc.shape[0])\n initRQ = np.zeros(Nc.shape[0]) \n # run simulation\n y = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1)\n # calculate quadratic error\n SSE = sum((ymodel-data)**2)\n\n elif method == 'findTime':\n # check if number of provided bounds is two or three for deterministic/stochastic respectively\n # assign the estimates to the correct variables\n if modelType == 'deterministic':\n if len(thetas) != 2:\n raise Exception('Number of bounds for deterministic model and method findTime is 2. The number of provided bounds was: {}'.format(len(thetas)))\n beta = thetas[0]\n extraTime = int(thetas[1])\n stoArgs.update({'extraTime': int(thetas[1])})\n elif modelType == 'stochastic':\n if len(thetas) != 3:\n raise Exception('Number of bounds for stochastic model and method findTime is 3. The number of provided bounds was: {}'.format(len(thetas)))\n beta = thetas[0]\n extraTime = int(thetas[1])\n stoArgs.update({'extraTime': int(thetas[1])})\n p = thetas[2]\n stoArgs.update({'p': thetas[2]})\n else:\n raise Exception('Invalid modelType. The provided modelType was: {}'.format(modelType))\n # define length of simulation from provided data\n simtime = data.size+extraTime-1\n # calculate initial condition\n initN = initN\n initE = np.ones(Nc.shape[0])\n initSM = np.zeros(Nc.shape[0])\n initM = np.zeros(Nc.shape[0])\n initH = np.zeros(Nc.shape[0])\n initC = np.zeros(Nc.shape[0])\n initHH = np.zeros(Nc.shape[0])\n initCH = np.zeros(Nc.shape[0])\n initR = np.zeros(Nc.shape[0])\n initF = np.zeros(Nc.shape[0])\n initSQ = np.zeros(Nc.shape[0])\n initEQ = np.zeros(Nc.shape[0])\n initSMQ = np.zeros(Nc.shape[0])\n initMQ = np.zeros(Nc.shape[0])\n initRQ = np.zeros(Nc.shape[0]) \n # run simulation\n y = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n if modelType == 'deterministic':\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + (np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1))\n ymodel = ymodel[extraTime-1:-1,0].reshape(ymodel[extraTime-1:-1,0].size,1)\n # calculate quadratic error\n SSE = sum((ymodel-data)**2) \n elif modelType == 'stochastic':\n r = initN/y[-1] # ratio between stochastic population and total population\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + (np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1))*r # extrapolate to whole population\n ymodel = ymodel[extraTime-1:-1,0].reshape(ymodel[extraTime-1:-1,0].size,1)\n # calculate quadratic error\n SSE = sum((ymodel-data)**2)\n \n elif method == 'findGovernmentResponse':\n # check if number of provided bounds is three\n if len(thetas) != 3:\n raise Exception('Number of bounds for method findGovernmentResponse is 3. The number of provided bounds was: {}'.format(len(thetas)))\n # assign beta and normal Nc\n beta = 0.0314\n Nc = np.array([11.2])\n # assign estimates to correct variable\n Nc_star = np.array([thetas[0]]) \n extraTime = int(thetas[1])\n stoArgs.update({'extraTime': int(thetas[1])})\n measureTime = int(thetas[2])\n stoArgs.update({'measureTime': int(thetas[2])})\n checkpoints={\n 't': [extraTime+measureTime],\n 'Nc': [Nc_star]\n }\n # define length of simulation from provided data\n simtime = data.size+int(extraTime)-1\n # calculate initial condition\n initN = initN\n initE = np.ones(Nc.shape[0])\n initSM = np.zeros(Nc.shape[0])\n initM = np.zeros(Nc.shape[0])\n initH = np.zeros(Nc.shape[0])\n initC = np.zeros(Nc.shape[0])\n initHH = np.zeros(Nc.shape[0])\n initCH = np.zeros(Nc.shape[0])\n initR = np.zeros(Nc.shape[0])\n initF = np.zeros(Nc.shape[0])\n initSQ = np.zeros(Nc.shape[0])\n initEQ = np.zeros(Nc.shape[0])\n initSMQ = np.zeros(Nc.shape[0])\n initMQ = np.zeros(Nc.shape[0])\n initRQ = np.zeros(Nc.shape[0])\n method='none' \n # run simulation\n y = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1)\n ymodel = ymodel[extraTime-1:-1,0].reshape(ymodel[extraTime-1:-1,0].size,1)\n # calculate quadratic error\n SSE = sum(abs(ymodel-data)) \n elif method == 'socialInteraction':\n # source: https://github.com/kieshaprem/covid19-agestructureSEIR-wuhan-social-distancing/tree/master/data\n Nc_home = np.loadtxt(\"Belgium/BELhome.txt\", dtype='f', delimiter='\\t')\n Nc_work = np.loadtxt(\"Belgium/BELwork.txt\", dtype='f', delimiter='\\t')\n Nc_schools = np.loadtxt(\"Belgium/BELschools.txt\", dtype='f', delimiter='\\t')\n Nc_others = np.loadtxt(\"Belgium/BELothers.txt\", dtype='f', delimiter='\\t')\n Nc_all = np.loadtxt(\"Belgium/BELall.txt\", dtype='f', delimiter='\\t')\n Nc = Nc_all\n checkpoints={\n 't': [26,29,29+5,29+10,29+15],\n 'Nc': [Nc_all-Nc_schools,\n Nc_home + thetas[0]*(1-0.20)*Nc_work +thetas[0]*(1-0.70)*Nc_others,\n Nc_home + thetas[1]*(1-0.40)*Nc_work + thetas[1]*(1-0.70)*Nc_others,\n Nc_home + thetas[2]*(1-0.52)*Nc_work + thetas[2]*(1-0.70)*Nc_others,\n Nc_home + thetas[3]*(1-0.52)*Nc_work + thetas[3]*(1-0.70)*Nc_others]\n }\n # define length of simulation from provided data\n extraTime = 27\n simtime = data.size+27-1\n beta = 0.032155\n # calculate initial condition\n initN = initN\n initE = np.ones(Nc.shape[0])\n initSM = np.zeros(Nc.shape[0])\n initM = np.zeros(Nc.shape[0])\n initH = np.zeros(Nc.shape[0])\n initC = np.zeros(Nc.shape[0])\n initHH = np.zeros(Nc.shape[0])\n initCH = np.zeros(Nc.shape[0])\n initR = np.zeros(Nc.shape[0])\n initF = np.zeros(Nc.shape[0])\n initSQ = np.zeros(Nc.shape[0])\n initEQ = np.zeros(Nc.shape[0])\n initSMQ = np.zeros(Nc.shape[0])\n initMQ = np.zeros(Nc.shape[0])\n initRQ = np.zeros(Nc.shape[0])\n # run simulation\n method='findTime'\n y = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints)\n if modelType == 'deterministic':\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + (np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1))\n ymodel = ymodel[extraTime-1:-1,0].reshape(ymodel[extraTime-1:-1,0].size,1)\n # calculate quadratic error\n SSE = sum((ymodel-data)**2) \n else:\n raise Exception('Method not suited for least-squares fit: choose either findTime, findInfected or findGovernmentResponse. The provided method was: {}'.format(method))\n return(SSE)\n\ndef modelFit(bounds,data,fitTo,initN,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,monteCarlo,n_samples,method,modelType,checkpoints,\ndisp,polish,maxiter,popsize,**stoArgs):\n # Monte Carlo sampling of parameters gamma, dHD, dHI, sm, and call to genetic optimisation algorithm is performed here\n if monteCarlo == True:\n sigmavect = sampleFromDistribution('corona_incubatie.csv',n_samples)\n dcfvect = np.random.normal(18.5, 5.2, n_samples)\n dcrvect = np.random.normal(22.0, 5.2, n_samples)\n smvect = np.random.normal(0.86, 0.04/1.96, n_samples)\n mvect = (1-smvect)*0.81\n hvect = (1-smvect)*0.14\n cvect = (1-smvect)*0.05\n dhospitalvect = np.random.normal(9.10, 0.50/1.96, n_samples)\n thetas = scipy.optimize.differential_evolution(LSQ, bounds, args=(data,fitTo,initN,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospitalvect,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,monteCarlo,method,modelType,checkpoints,stoArgs),disp=disp,polish=polish,workers=5,maxiter=maxiter, popsize=popsize,tol=1e-18)\n else:\n sigma = 5.2\n dcf = 18.5 \n dcr = 22.0\n sm = 0.86\n m = (1-sm)*0.81 \n h = (1-sm)*0.14 \n c = (1-sm)*0.05\n dhospital = 9.1\n thetas = scipy.optimize.differential_evolution(LSQ, bounds, args=(data,fitTo,initN,sigma,Nc,zeta,sm,m,h,c,dsm,dm,dhospital,dh,dcf,dcr,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,monteCarlo,method,modelType,checkpoints,stoArgs),disp=disp,polish=polish,workers=5,maxiter=maxiter, popsize=popsize,tol=1e-18)\n fit = thetas.x\n return(fit)\n\ndef simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints,**stoArgs):\n # This function is a wrapper for 'runSimulation' to include monte carlo sampling and extract the results in a dictionary\n # Monte Carlo sampling of parameters gamma, dHD, dHI, sm, and call to genetic optimisation algorithm is performed here\n if monteCarlo == True:\n sigmavect = sampleFromDistribution('corona_incubatie.csv',n_samples)\n dcfvect = np.random.normal(18.5, 5.2, n_samples)\n dcrvect = np.random.normal(22.0, 5.2, n_samples)\n smvect = np.random.normal(0.86, 0.04/1.96, n_samples)\n mvect = (1-smvect)*0.81\n hvect = (1-smvect)*0.14\n cvect = (1-smvect)*0.05\n dhospitalvect = np.random.normal(9.10, 0.50/1.96, n_samples)\n simout = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospitalvect,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n \n else:\n sigma = 5.2\n dcf = 18.5 \n dcr = 22.0\n sm = 0.86\n m = (1-sm)*0.81 \n h = (1-sm)*0.14 \n c = (1-sm)*0.05\n dhospital = 9.1 \n simout = runSimulation(initN,beta,sigma,Nc,zeta,sm,m,h,c,dsm,dm,dhospital,dh,dcf,dcr,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,\n initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n \n # -----------------------------------------------------------------------------\n # extract results, rescale to population size initN in case of stochastic model\n # -----------------------------------------------------------------------------\n if modelType == 'deterministic':\n simout = {\n 't': simout[0],\n 'S': simout[1],\n 'E': simout[2],\n 'SM': simout[3],\n 'M': simout[4],\n 'H': simout[5],\n 'C': simout[6],\n 'HH': simout[7],\n 'CH': simout[8],\n 'R': simout[9],\n 'F': simout[10],\n 'SQ': simout[11],\n 'EQ': simout[12],\n 'SMQ': simout[13],\n 'MQ': simout[14],\n 'RQ': simout[15],\n } \n elif modelType == 'stochastic':\n r = initN/simout[-1]\n simout = {\n 't': simout[0],\n 'S': simout[1]*r,\n 'E': simout[2]*r,\n 'SM': simout[3]*r,\n 'M': simout[4]*r,\n 'H': simout[5]*r,\n 'C': simout[6]*r,\n 'HH': simout[7]*r,\n 'CH': simout[8]*r,\n 'R': simout[9]*r,\n 'F': simout[10]*r,\n 'SQ': simout[11]*r,\n 'SMQ': simout[12]*r,\n 'MQ': simout[13]*r,\n 'RQ': simout[14]*r,\n } \n\n return(simout)\n\ndef constructHorizon(theta,period): \n n = len(theta)\n t = np.zeros([n-1])\n for i in range(n-1):\n t[i] = period*(i+1) \n checkpoints = {'t': t,\n 'Nc': theta[1:]}\n return(checkpoints)\n\ndef constructHorizonPlot(theta,period):\n if type(theta) is np.ndarray:\n n = theta.size\n Nc = np.ones([period*n+1])\n for i in range(n):\n Nc[period*i:(period*i+period)]=theta[i]\n elif type(theta) is float:\n n = 1\n Nc = np.ones([period*n])\n for i in range(n):\n Nc[period*i:(period*i+period)]=theta\n else:\n raise Exception('Theta must be a vector or float. The provided datatype was: {}'.format(type(theta)))\n return(Nc)\n \ndef constructHorizonTesting(theta1,theta2,period): \n n = len(theta1)\n t = np.zeros([n-1])\n for i in range(n-1):\n t[i] = period*(i+1) \n checkpoints = {'t': t,\n 'Nc': theta1[1:],\n 'totalTests': theta2[1:]}\n return(checkpoints)\n\ndef constructHorizonTestingPlot(theta1,theta2,period):\n if type(theta1) is np.ndarray:\n n = theta1.size\n Nc = np.ones([period*n+1])\n theta_M = np.ones([period*n+1])\n for i in range(n):\n if i == 0:\n Nc[period*i:(period*i+period)+1]=theta1[i]\n theta_M[period*i:(period*i+period)+1]=theta2[i]\n else:\n Nc[period*i:(period*i+period)]=theta1[i]\n theta_M[period*i:(period*i+period)]=theta2[i] \n elif type(theta1) is float:\n n = 1\n Nc = np.ones([period*n])\n theta_M = np.ones([period*n])\n for i in range(n):\n if i == 0:\n Nc[period*i:(period*i+period)+1]=theta1\n theta_M[period*i:(period*i+period)+1]=theta2\n else:\n Nc[period*i:(period*i+period)]=theta1[i]\n theta_M[period*i:(period*i+period)]=theta2[i] \n else:\n raise Exception('Theta must be a vector or float. The provided datatype was: {}'.format(type(theta1)))\n return(Nc,theta_M)\n\ndef MPCcalcWeights(thetas,initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,roundOff,period,P,stoArgs):\n controlDoF = 1\n if controlDoF == 1:\n thetas[thetas<5.6] = 1.8\n thetas[(thetas>=5.6)&(thetas<8)] = 6\n thetas[thetas>=8] = 11.2\n # Add thetas to a list\n Ncs=[]\n for i in range(thetas.size):\n Ncs.append(np.array([thetas[i]]))\n # Build prediction horizon\n for i in range(P-thetas.size):\n Ncs.append(Ncs[-1])\n checkpoints = constructHorizon(Ncs,period)\n # Set correct simtime\n simtime = checkpoints['t'].size*period\n # run simulation\n method == 'none' # nothing special \n Nc = np.array([thetas[0]]) # first checkpoint cannot be at time 0\n simout = simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints,**stoArgs)\n if monteCarlo == True:\n CH = np.mean(simout['CH'],axis=1)\n CH = np.mean(simout['CH'],axis=1).reshape(CH.size,1) \n else:\n CH = simout['CH']\n # regeling van de kritiek zieken\n y_sp = ICU # maximum aantal bedden op ICU\n ymodel = CH # voorspelde aantal kritiek zieken bij ingang beta\n error = y_sp - ymodel # vector met fouten in de tijd\n SSE = sum(error**2)\n\n elif controlDoF == 2:\n # Split list of thetas in half\n length = thetas.size\n middle_index = length//2\n thetas1 = thetas[:middle_index]\n # Discretise thetas1 (=Nc)\n thetas1[thetas1<5.6] = 1.8\n thetas1[(thetas1>=5.6)&(thetas1<8)] = 6\n thetas1[thetas1>=8] = 11.2\n thetas2 = thetas[middle_index:]\n # Add thetas to list\n Ncs1=[]\n for i in range(thetas1.size):\n Ncs1.append(np.array([thetas1[i]]))\n Ncs2=[]\n for i in range(thetas2.size):\n Ncs2.append(np.array([thetas2[i]]))\n # Build prediction horizons\n for i in range(P-thetas.size//2):\n Ncs1.append(Ncs1[-1])\n Ncs2.append(Ncs2[-1])\n # Construct checkpoints dictionary\n checkpoints = constructHorizonTesting(Ncs1,Ncs2,period)\n #print(checkpoints)\n # Define simtime\n simtime = checkpoints['t'].size*period \n # run simulation\n method == 'none' # nothing special \n Nc = np.array([thetas[0]]) # first checkpoint cannot be at time 0\n totalTests = np.array([thetas[middle_index]])\n simout = simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints,**stoArgs)\n if monteCarlo == True:\n CH = np.mean(simout['CH'],axis=1)\n CH = np.mean(simout['CH'],axis=1).reshape(CH.size,1) \n else:\n CH = simout['CH']\n # regeling van de kritiek zieken\n y_sp = ICU # maximum aantal bedden op ICU\n ymodel = CH # voorspelde aantal kritiek zieken bij ingang beta\n error = y_sp - ymodel # vector met fouten in de tijd\n SSE = sum(error**2)\n return(SSE)\n\ndef MPCcalcWeightsAge(thetas,initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,period,P):\n\n # source: https://github.com/kieshaprem/covid19-agestructureSEIR-wuhan-social-distancing/tree/master/data\n Nc_home = np.loadtxt(\"Belgium/BELhome.txt\", dtype='f', delimiter='\\t')\n Nc_work = np.loadtxt(\"Belgium/BELwork.txt\", dtype='f', delimiter='\\t')\n Nc_schools = np.loadtxt(\"Belgium/BELschools.txt\", dtype='f', delimiter='\\t')\n Nc_others = np.loadtxt(\"Belgium/BELothers.txt\", dtype='f', delimiter='\\t')\n Nc_all = np.loadtxt(\"Belgium/BELall.txt\", dtype='f', delimiter='\\t')\n # Use values of thetas to build a list object Ncs containing discrete scenarios\n Ncs=[]\n for i in range(thetas.size):\n if thetas[i]<=1 and thetas[i]>=0:\n Ncs.append(Nc_all)\n elif thetas[i]<=2 and thetas[i]> 1:\n Ncs.append(Nc_home + Nc_schools + 0.01*(1-0.52)*Nc_work + 0.01*(1-0.70)*Nc_others)\n elif thetas[i]<=3 and thetas[i]> 2:\n Ncs.append(Nc_home + 0.01*(1-0.52)*Nc_work + 0.01*(1-0.70)*Nc_others)\n\n # build prediction horizon\n for i in range(P-thetas.size):\n Ncs.append(Ncs[-1])\n checkpoints = constructHorizon(Ncs,period)\n simtime = checkpoints['t'].size*period\n # run simulation\n method == 'none' # nothing special\n Nc = Ncs[0] # first checkpoint cannot be at time 0\n simout = simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints)\n if monteCarlo == True:\n CH = np.mean(simout['CH'],axis=1)\n CH = np.mean(simout['CH'],axis=1).reshape(CH.size,1) \n else:\n CH = simout['CH']\n # regeling van de kritiek zieken\n y_sp = ICU # maximum aantal bedden op ICU\n ymodel = CH # voorspelde aantal kritiek zieken bij ingang beta\n error = y_sp - ymodel # vector met fouten in de tijd\n SSE = sum(error**2)\n return(SSE)\n\ndef MPCoptimize(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,roundOff,period,P,N,\n disp,polish,maxiter,popsize,**stoArgs):\n controlDoF = 1\n if controlDoF == 1:\n # Geef bounds op\n bounds=[]\n for i in range(N):\n bounds.append((0,11.2))\n # Perform optimisation \n fit = scipy.optimize.differential_evolution(MPCcalcWeights, bounds, args=(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,roundOff,period,P,stoArgs),disp=disp,polish=polish,workers=-1,maxiter=maxiter, popsize=popsize,tol=1e-30)\n thetas=fit.x\n\n elif controlDoF == 2:\n # Geef bounds op\n bounds=[]\n # First variable is Nc\n for i in range(N):\n bounds.append((0,11.2))\n # Second variable is theta_M\n for i in range(N):\n bounds.append((0,1e6)) \n # Perform optimisation \n fit = scipy.optimize.differential_evolution(MPCcalcWeights, bounds, args=(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,roundOff,period,P,stoArgs),disp=disp,polish=polish,workers=-1,maxiter=maxiter, popsize=popsize,tol=1e-30)\n thetas=fit.x \n print(thetas)\n return(thetas)\n \ndef MPCoptimizeAge(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,period,P,N,\n disp,polish,maxiter,popsize):\n # source: https://github.com/kieshaprem/covid19-agestructureSEIR-wuhan-social-distancing/tree/master/data\n Nc_home = np.loadtxt(\"Belgium/BELhome.txt\", dtype='f', delimiter='\\t')\n Nc_work = np.loadtxt(\"Belgium/BELwork.txt\", dtype='f', delimiter='\\t')\n Nc_schools = np.loadtxt(\"Belgium/BELschools.txt\", dtype='f', delimiter='\\t')\n Nc_others = np.loadtxt(\"Belgium/BELothers.txt\", dtype='f', delimiter='\\t')\n Nc_all = np.loadtxt(\"Belgium/BELall.txt\", dtype='f', delimiter='\\t')\n \n # Geef bounds op\n bounds=[]\n for i in range(N):\n bounds.append((0,3))\n # Prepare solver\n # Perform optimisation (CONTINUOUS) \n fit = scipy.optimize.differential_evolution(MPCcalcWeightsAge, bounds, args=(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,period,P),disp=disp,polish=polish,workers=-1,maxiter=maxiter, popsize=popsize,tol=1e-18,mutation=(1.9, 1.99), recombination=1)\n thetas = fit.x\n\n # discretise thetas if needed\n thetas=fit.x\n Ncs=[]\n for i in range(thetas.size):\n if thetas[i]<=1 and thetas[i]>=0:\n Ncs.append(Nc_all)\n elif thetas[i]<=2 and thetas[i]> 1:\n Ncs.append(Nc_home + Nc_schools + 0.01*(1-0.52)*Nc_work + 0.01*(1-0.70)*Nc_others)\n elif thetas[i]<=3 and thetas[i]> 2:\n Ncs.append(Nc_home + 0.01*(1-0.52)*Nc_work + 0.01*(1-0.70)*Nc_others)\n return(Ncs,thetas)\n\n# You cannot keep extending the control horizon because the number of parameters will get so big\n# that optimisation becomes a problem. To simulate the full course of the outbreak, it is better\n# to optimise one policy interval, advance the simulation to the next policy interval and repeat\ndef MPClongTerm(y0,nat,mort,dSM,dM,dZ,m,z,h,mh,ICU,monteCarlo,n_samples,period,maxiter,popsize,polish,disp,P,N,discrete,roundOff,Kh,Kd,Ki,nPeriods):\n betaVect=[]\n for i in range(nPeriods):\n # optimise control horizon over prediction horizon\n beta = MPCoptimize(y0,nat,mort,dSM,dM,dZ,m,z,h,mh,ICU,monteCarlo,n_samples,period,maxiter,popsize,polish,disp,P,N,discrete,roundOff,Kh,Kd,Ki)\n betaVect.append(beta[0])\n # advance the simulation one policy interval\n simtime = period # - 2\n tN = simtime + 1 \n t = np.linspace(0,simtime,tN)\n u = np.ones([tN])\n u = u*beta[0]\n simout = simModel(y0,nat,mort,u,dSM,dM,dZ,m,z,h,mh,ICU,tN,simtime,monteCarlo,n_samples,'variableBeta')\n O = simout[1]\n B = simout[2]\n SM = simout[3]\n M = simout[4]\n Z = simout[5]\n H = simout[6]\n I = simout[7]\n D = simout[8]\n T = simout[9]\n O = np.mean(O,axis=1)\n B = np.mean(B,axis=1)\n SM = np.mean(SM,axis=1)\n M = np.mean(M,axis=1)\n Z = np.mean(Z,axis=1)\n H = np.mean(H,axis=1)\n I = np.mean(I,axis=1)\n D = np.mean(D,axis=1)\n T = np.mean(T,axis=1)\n y0 = np.array([O[-1],B[-1],SM[-1],M[-1],Z[-1],H[-1],I[-1],D[-1],T[-1]])\n return(betaVect) \n \n" ]
[ [ "numpy.ones", "scipy.interpolate.interp1d", "numpy.array", "numpy.zeros", "numpy.mean", "pandas.read_csv", "numpy.append", "scipy.optimize.differential_evolution", "numpy.random.normal", "numpy.linspace", "numpy.loadtxt" ] ]
manuelprogramming/OSA
[ "3a57ea944eef3e8680055a35e8cebd36b93dac51" ]
[ "handlers/plotting.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef format_plot(func):\n def func_wrapper(*args):\n func(*args)\n plt.ylabel(\"Intensity [dBm]\")\n plt.xlabel(\"Wavelength [nm]\")\n plt.tight_layout()\n plt.show()\n return func\n\n return func_wrapper\n\n\ndef format_ani_plot(func):\n def func_wrapper(*args):\n func(*args)\n plt.ylabel(\"Intensity [dBm]\")\n plt.xlabel(\"Wavelength [nm]\")\n plt.tight_layout()\n return func\n return func_wrapper\n\n\ndef interactive_off_on(func):\n def func_wrapper(*args):\n plt.ioff()\n func(*args)\n plt.ion()\n return func\n return func_wrapper\n\n\ndef config_matplotlib(debug_mode: bool) -> None:\n plt.style.use(\"seaborn-whitegrid\")\n if not debug_mode:\n plt.ion()\n\n\nif __name__ == '__main__':\n x = np.random.random(15)\n" ]
[ [ "matplotlib.pyplot.style.use", "matplotlib.pyplot.ioff", "matplotlib.pyplot.tight_layout", "numpy.random.random", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ion", "matplotlib.pyplot.xlabel" ] ]
BesterRanX/BesterTF
[ "2e7e6938f74d027ebf9aee9b8af432a3e7b54519" ]
[ "BesterTF/Layers.py" ]
[ "import tensorflow as tf\n\n\nclass Layer():\n def __init__(self, output_dim, input_dim=0, activation=None):\n # cache parameters\n self.activation = activation\n self.input_dim = input_dim\n self.output_dim = output_dim\n\n\n\nclass Dense(Layer):\n def __init__(self, output_dim, input_dim=0, activation=None):\n # super class init\n Layer.__init__(output_dim, input_dim, activation)\n\n def compile(self):\n # initialise weights\n self.Weights = tf.Variable(tf.random_uniform([self.input_dim, self.output_dim], -1, 1))\n # initialise biases\n self.biases = tf.Variable(tf.zeros([1, self.output_dim]) + 0.1)\n\n # activation\n def act(self, inputs=None):\n Wx_plus_b = tf.matmul(inputs, self.Weights, name='Wx_plus_b') + self.biases\n return self.activation(Wx_plus_b)\n" ]
[ [ "tensorflow.random_uniform", "tensorflow.zeros", "tensorflow.matmul" ] ]
strawsyz/straw
[ "db313c78c2e3c0355cd10c70ac25a15bb5632d41" ]
[ "study/dgl_study/02.py" ]
[ "import dgl\nimport networkx as nx\n\n# create a graph\ng_nx = nx.petersen_graph()\ng_dgl = dgl.DGLGraph(g_nx)\n\nimport matplotlib.pyplot as plt\n\nplt.subplot(121)\nnx.draw(g_nx, with_labels=True)\nplt.subplot(122)\nnx.draw(g_dgl.to_networkx(), with_labels=True)\n\nplt.show()\n\n# add edges and nodes into graph\nimport dgl\nimport torch as th\n\ng = dgl.DGLGraph()\ng.add_nodes(10)\n# A couple edges one-by-one\nfor i in range(1, 5):\n g.add_edge(i, 0)\n# A few more with a paired list\nsrc = list(range(5, 8));\ndst = [0] * 3\ng.add_edges(src, dst)\n# finish with a pair of tensors\nsrc = th.tensor([8, 9]);\ndst = th.tensor([0, 0])\ng.add_edges(src, dst)\ng.add_edges([2], [8])\nnx.draw(g.to_networkx(), with_labels=True)\nplt.show()\n# Edge broadcasting will do star graph in one go!\ng.clear();\ng.add_nodes(10)\nsrc = th.tensor(list(range(1, 10)));\ng.add_edges(src, 0)\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nnx.draw(g.to_networkx(), with_labels=True)\nplt.show()\n\n# assigin a feature\nimport dgl\nimport torch\n\n# assign node features\nx = torch.randn(10, 3)\n# g.clear()\ng.ndata['x'] = x\n\n# print(g.ndata['x'] == g.nodes[:].data['x'])\nprint(g.ndata['x'])\nprint('x value of first node in graph : {}'.format(g.nodes[0].data['x']))\n# Access node set with integer, list, or integer tensor\ng.nodes[0].data['x'] = torch.zeros(1, 3)\ng.nodes[[0, 1, 2]].data['x'] = torch.zeros(3, 3)\ng.nodes[torch.tensor([0, 1, 2])].data['x'] = torch.zeros(3, 3)\n\n# Assign edge features\ng.edata['w'] = th.randn(9, 2)\nprint(g.edata['w'])\nprint('w value of first edge in graph : {}'.format(g.edges[0].data['w']))\n# Access edge set with IDs in integer, list, or integer tensor\ng.edges[1].data['w'] = th.randn(1, 2)\ng.edges[[0, 1, 2]].data['w'] = th.zeros(3, 2)\nprint(\"g.edges[[0, 1, 2]].data['w'] : \\n{}\".format(g.edges[[0, 1, 2]].data['w']))\ng.edges[th.tensor([0, 1, 2])].data['w'] = th.zeros(3, 2)\n\n# You can also access the edges by giving endpoints\ng.edges[1, 0].data['w'] = th.ones(1, 2) # edge 1 -> 0\ng.edges[[1, 2, 3], [0, 0, 0]].data['w'] = th.ones(3, 2) # edges [1, 2, 3] -> 0\n\nprint(g.node_attr_schemes())\ng.ndata['x'] = th.zeros((10, 4))\nprint(g.node_attr_schemes())\n\n# remove node or edge states\ng.ndata.pop('x')\ng.edata.pop('w')\nprint(g.node_attr_schemes())\n\n# create multigraphs\ng_multi = dgl.DGLGraph(multigraph=True)\ng_multi.add_nodes(10)\ng_multi.ndata['x'] = torch.randn(10, 2)\ng_multi.add_edges(list(range(1, 10)), 0)\ng_multi.add_edge(1, 0) # two edges on 1->0\n\ng_multi.edata['w'] = th.randn(10, 2)\ng_multi.edges[1].data['w'] = th.zeros(1, 2)\nprint(g_multi.edges())\nplt.figure()\nnx.draw(g_dgl.to_networkx(), with_labels=True)\n\nplt.show()\n\n# in multigraphs, use edge's id to query edge\neid_10 = g_multi.edge_id(1, 0)\ng_multi.edges[eid_10].data['w'] = th.ones(len(eid_10), 2)\nprint(g_multi.edata['w'])\n\n# !!!!nodes and edges can be added but not remove\n" ]
[ [ "torch.ones", "torch.randn", "matplotlib.pyplot.figure", "torch.tensor", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "torch.zeros" ] ]
FaustinCarter/lmfit-py
[ "7fbb75b2fd3f383e78692fd85c9a646793d4b071" ]
[ "tests/test_itercb.py" ]
[ "import numpy as np\nfrom lmfit import Parameters, minimize, report_fit\nfrom lmfit.models import LinearModel, GaussianModel\nfrom lmfit.lineshapes import gaussian\n\ndef per_iteration(pars, iter, resid, *args, **kws):\n \"\"\"iteration callback, will abort at iteration 23\n \"\"\"\n # print( iter, ', '.join([\"%s=%.4f\" % (p.name, p.value) for p in pars.values()]))\n return iter == 23\n\ndef test_itercb():\n x = np.linspace(0, 20, 401)\n y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)\n y = y - .20*x + 3.333 + np.random.normal(scale=0.23, size=len(x))\n mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')\n\n pars = mod.make_params(peak_amplitude=21.0,\n peak_center=7.0,\n peak_sigma=2.0,\n bkg_intercept=2,\n bkg_slope=0.0)\n\n out = mod.fit(y, pars, x=x, iter_cb=per_iteration)\n\n assert(out.nfev == 23)\n assert(out.aborted)\n assert(not out.errorbars)\n assert(not out.success)\n" ]
[ [ "numpy.linspace" ] ]
JamesJeffryes/kb_phylogenomics
[ "133b7b7c4179b5fb1b51bade70069a545bca91fc" ]
[ "lib/kb_phylogenomics/kb_phylogenomicsImpl.py" ]
[ "# -*- coding: utf-8 -*-\n#BEGIN_HEADER\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport shutil\nimport hashlib\nimport subprocess\nimport requests\nrequests.packages.urllib3.disable_warnings()\nimport re\nimport traceback\nimport uuid\nfrom datetime import datetime\nfrom pprint import pprint, pformat\n\nimport numpy as np\nimport math\nfrom Bio import SeqIO\n\nfrom biokbase.workspace.client import Workspace as workspaceService\n#from Workspace.WorkspaceClient import Workspace as workspaceService\nfrom DataFileUtil.DataFileUtilClient import DataFileUtil as DFUClient\nfrom KBaseReport.KBaseReportClient import KBaseReport\n\nfrom DomainAnnotation.DomainAnnotationClient import DomainAnnotation\n\nimport ete3\nimport matplotlib.pyplot as pyplot # use this instead\nfrom matplotlib.patches import Arc\nfrom matplotlib.patches import Rectangle\n\n#END_HEADER\n\n\nclass kb_phylogenomics:\n '''\n Module Name:\n kb_phylogenomics\n\n Module Description:\n A KBase module: kb_phylogenomics\n\nThis module contains methods for running and visualizing results of phylogenomics and comparative genomics analyses\n '''\n\n ######## WARNING FOR GEVENT USERS ####### noqa\n # Since asynchronous IO can lead to methods - even the same method -\n # interrupting each other, you must be *very* careful when using global\n # state. A method could easily clobber the state set by another while\n # the latter method is running.\n ######################################### noqa\n VERSION = \"1.2.0\"\n GIT_URL = \"https://github.com/kbaseapps/kb_phylogenomics\"\n GIT_COMMIT_HASH = \"43733230d3f70a2eccc123b3867e99775b0d9f4c\"\n\n #BEGIN_CLASS_HEADER\n\n def log(self, target, message):\n if target is not None:\n target.append(message)\n print(message)\n sys.stdout.flush()\n\n #END_CLASS_HEADER\n\n # config contains contents of config file in a hash or None if it couldn't\n # be found\n def __init__(self, config):\n #BEGIN_CONSTRUCTOR\n self.workspaceURL = config['workspace-url']\n self.shockURL = config['shock-url']\n #self.handleURL = config['handle-service-url']\n self.serviceWizardURL = config['service-wizard-url']\n self.callbackURL = os.environ['SDK_CALLBACK_URL']\n self.scratch = os.path.abspath(config['scratch'])\n\n #pprint(config)\n\n if not os.path.exists(self.scratch):\n os.makedirs(self.scratch)\n\n #self.genome_feature_id_delim = '.f:'\n\n #END_CONSTRUCTOR\n pass\n\n\n def view_tree(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_tree_Input\" (view_tree() ** **\n show a KBase Tree and make newick and images downloadable) ->\n structure: parameter \"workspace_name\" of type \"workspace_name\" (**\n Common types), parameter \"input_tree_ref\" of type \"data_obj_ref\",\n parameter \"desc\" of String\n :returns: instance of type \"view_tree_Output\" -> structure: parameter\n \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_tree\n\n #### STEP 0: init\n ##\n dfu = DFUClient(self.callbackURL)\n console = []\n invalid_msgs = []\n self.log(console,'Running view_tree() with params=')\n self.log(console, \"\\n\"+pformat(params))\n report = ''\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join (self.scratch, 'output_'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\n #### STEP 1: do some basic checks\n ##\n if 'workspace_name' not in params:\n raise ValueError('workspace_name parameter is required')\n if 'input_tree_ref' not in params:\n raise ValueError('input_tree_ref parameter is required')\n #if 'output_name' not in params:\n # raise ValueError('output_name parameter is required')\n\n\n #### STEP 2: load the method provenance from the context object\n ##\n self.log(console,\"SETTING PROVENANCE\") # DEBUG\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n # add additional info to provenance here, in this case the input data object reference\n provenance[0]['input_ws_objects'] = []\n provenance[0]['input_ws_objects'].append(params['input_tree_ref'])\n provenance[0]['service'] = 'kb_phylogenomics'\n provenance[0]['method'] = 'view_tree'\n\n\n #### STEP 3: Get tree and save as newick file\n ##\n try:\n ws = workspaceService(self.workspaceURL, token=ctx['token'])\n objects = ws.get_objects([{'ref': params['input_tree_ref']}])\n data = objects[0]['data']\n info = objects[0]['info']\n intree_name = info[1]\n intree_type_name = info[2].split('.')[1].split('-')[0]\n\n except Exception as e:\n raise ValueError('Unable to fetch input_tree_ref object from workspace: ' + str(e))\n #to get the full stack trace: traceback.format_exc()\n \n if intree_type_name == 'Tree':\n tree_in = data\n else:\n raise ValueError('Cannot yet handle input_tree type of: '+type_name)\n\n intree_newick_file_path = os.path.join(output_dir, intree_name+\".newick\")\n self.log(console, 'writing intree file: '+intree_newick_file_path)\n with open(intree_newick_file_path, 'w', 0) as intree_newick_file_handle:\n intree_newick_file_handle.write(tree_in['tree'])\n\n # upload\n try:\n newick_upload_ret = dfu.file_to_shock({'file_path': intree_newick_file_path,\n #'pack': 'zip'})\n 'make_handle': 0})\n except:\n raise ValueError ('error uploading newick file to shock')\n\n\n #### STEP 4: if labels defined, make separate newick-labels file\n ## (NOTE: adjust IDs so ETE3 parse doesn't choke on conflicting chars)\n ##\n if 'default_node_labels' in tree_in:\n newick_labels_file = intree_name+'-labels.newick'\n output_newick_labels_file_path = os.path.join(output_dir, newick_labels_file);\n #default_row_ids = tree_in['default_row_labels']\n #new_ids = dict()\n #for row_id in default_row_ids.keys():\n # new_ids[row_id] = default_row_ids[row_id]\n\n mod_newick_buf = tree_in['tree']\n mod_newick_buf = re.sub('\\|','%'+'|'.encode(\"hex\"), mod_newick_buf)\n #for row_id in new_ids.keys():\n for node_id in tree_in['default_node_labels'].keys():\n label = tree_in['default_node_labels'][node_id]\n #self.log (console, \"node \"+node_id+\" label B4: '\"+label+\"'\") # DEBUG\n label = re.sub(' \\(kb[^\\)]*\\)', '', label) # just get rid of problematic (kb|g.1234)\n label = re.sub('\\s','_',label)\n #label = re.sub('\\/','%'+'/'.encode(\"hex\"), label)\n #label = re.sub(r'\\\\','%'+'\\\\'.encode(\"hex\"), label)\n #label = re.sub('\\[','%'+'['.encode(\"hex\"), label)\n #label = re.sub('\\]','%'+']'.encode(\"hex\"), label)\n label = re.sub('\\(','[', label)\n label = re.sub('\\)',']', label)\n label = re.sub('\\:','%'+':'.encode(\"hex\"), label)\n label = re.sub('\\;','%'+';'.encode(\"hex\"), label)\n label = re.sub('\\|','%'+'|'.encode(\"hex\"), label)\n #self.log (console, \"node \"+node_id+\" label AF: '\"+label+\"'\") # DEBUG\n #self.log (console, \"NEWICK B4: '\"+mod_newick_buf+\"'\") # DEBUG\n mod_node_id = re.sub('\\|','%'+'|'.encode(\"hex\"), node_id)\n mod_newick_buf = re.sub ('\\('+mod_node_id+'\\:', '('+label+':', mod_newick_buf)\n mod_newick_buf = re.sub ('\\,'+mod_node_id+'\\:', ','+label+':', mod_newick_buf)\n #self.log (console, \"NEWICK AF: '\"+mod_newick_buf+\"'\") # DEBUG\n\n #self.log(console, \"new_id: '\"+new_id+\"' label: '\"+label+\"'\") # DEBUG\n \n mod_newick_buf = re.sub ('_', ' ', mod_newick_buf)\n with open (output_newick_labels_file_path, 'w', 0) as output_newick_labels_file_handle:\n output_newick_labels_file_handle.write(mod_newick_buf)\n\n # upload\n try:\n newick_labels_upload_ret = dfu.file_to_shock({'file_path': output_newick_labels_file_path,\n #'pack': 'zip'})\n 'make_handle': 0})\n except:\n raise ValueError ('error uploading newick labels file to shock')\n\n\n #### STEP 5: Create html with tree image\n ##\n html_output_dir = os.path.join(output_dir, 'output_html.'+str(timestamp))\n if not os.path.exists(html_output_dir):\n os.makedirs(html_output_dir)\n html_file = intree_name+'.html'\n png_file = intree_name+'.png'\n pdf_file = intree_name+'.pdf'\n output_html_file_path = os.path.join(html_output_dir, html_file);\n output_png_file_path = os.path.join(html_output_dir, png_file);\n output_pdf_file_path = os.path.join(output_dir, pdf_file);\n newick_buf = tree_in['tree']\n if 'default_node_labels' in tree_in:\n newick_buf = mod_newick_buf\n self.log(console, \"NEWICK_BUF: '\"+newick_buf+\"'\")\n\n # init ETE3 objects\n t = ete3.Tree(newick_buf)\n ts = ete3.TreeStyle()\n\n # customize\n ts.show_leaf_name = True\n ts.show_branch_length = False\n ts.show_branch_support = True\n #ts.scale = 50 # 50 pixels per branch length unit\n ts.branch_vertical_margin = 5 # pixels between adjacent branches\n title_disp = intree_name\n if 'desc' in params and params['desc'] != None and params['desc'] != '':\n title_disp += ': '+params['desc']\n ts.title.add_face(ete3.TextFace(title_disp, fsize=10), column=0)\n\n node_style = ete3.NodeStyle()\n node_style[\"fgcolor\"] = \"#606060\" # for node balls\n node_style[\"size\"] = 10 # for node balls (gets reset based on support)\n node_style[\"vt_line_color\"] = \"#606060\"\n node_style[\"hz_line_color\"] = \"#606060\"\n node_style[\"vt_line_width\"] = 2\n node_style[\"hz_line_width\"] = 2\n node_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n node_style[\"hz_line_type\"] = 0\n\n leaf_style = ete3.NodeStyle()\n leaf_style[\"fgcolor\"] = \"#ffffff\" # for node balls\n leaf_style[\"size\"] = 2 # for node balls (we're using it to add space)\n leaf_style[\"vt_line_color\"] = \"#606060\" # unecessary\n leaf_style[\"hz_line_color\"] = \"#606060\"\n leaf_style[\"vt_line_width\"] = 2\n leaf_style[\"hz_line_width\"] = 2\n leaf_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n leaf_style[\"hz_line_type\"] = 0\n\n for n in t.traverse():\n if n.is_leaf():\n style = leaf_style\n else:\n style = ete3.NodeStyle()\n for k in node_style.keys():\n style[k] = node_style[k]\n\n if n.support > 0.95:\n style[\"size\"] = 6\n elif n.support > 0.90:\n style[\"size\"] = 5\n elif n.support > 0.80:\n style[\"size\"] = 4\n else:\n style[\"size\"] = 2\n\n n.set_style(style)\n\n # save images\n dpi = 300\n img_units = \"in\"\n img_pix_width = 1200\n img_in_width = round(float(img_pix_width)/float(dpi), 1)\n img_html_width = img_pix_width // 2\n t.render(output_png_file_path, w=img_in_width, units=img_units, dpi=dpi, tree_style=ts)\n t.render(output_pdf_file_path, w=img_in_width, units=img_units, tree_style=ts) # dpi irrelevant\n\n # make html\n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head><title>KBase Tree: '+intree_name+'</title></head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n html_report_lines += ['<img width='+str(img_html_width)+' src=\"'+png_file+'\">']\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n\n html_report_str = \"\\n\".join(html_report_lines)\n with open (output_html_file_path, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n\n\n # upload images and html\n try:\n png_upload_ret = dfu.file_to_shock({'file_path': output_png_file_path,\n #'pack': 'zip'})\n 'make_handle': 0})\n except:\n raise ValueError ('error uploading png file to shock')\n try:\n pdf_upload_ret = dfu.file_to_shock({'file_path': output_pdf_file_path,\n #'pack': 'zip'})\n 'make_handle': 0})\n except:\n raise ValueError ('error uploading pdf file to shock')\n try:\n html_upload_ret = dfu.file_to_shock({'file_path': html_output_dir,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('error uploading png file to shock')\n\n\n # Create report obj\n #\n reportName = 'blast_report_'+str(uuid.uuid4())\n #report += output_newick_buf+\"\\n\"\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n #reportObj['objects_created'].append({'ref': str(params['workspace_name'])+'/'+str(params['output_name']),'description': params['output_name']+' Tree'})\n reportObj['html_links'] = [{'shock_id': html_upload_ret['shock_id'],\n 'name': html_file,\n 'label': intree_name+' HTML'\n }\n ]\n reportObj['file_links'] = [{'shock_id': newick_upload_ret['shock_id'],\n 'name': intree_name+'.newick',\n 'label': intree_name+' NEWICK'\n }\n ]\n if 'default_node_labels' in tree_in:\n reportObj['file_links'].append({'shock_id': newick_labels_upload_ret['shock_id'],\n 'name': intree_name+'-labels.newick',\n 'label': intree_name+' NEWICK (with labels)'\n })\n\n reportObj['file_links'].extend([{'shock_id': png_upload_ret['shock_id'],\n 'name': intree_name+'.png',\n 'label': intree_name+' PNG'\n },\n {'shock_id': pdf_upload_ret['shock_id'],\n 'name': intree_name+'.pdf',\n 'label': intree_name+' PDF'\n }])\n\n SERVICE_VER = 'release'\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n report_info = reportClient.create_extended_report(reportObj)\n\n\n # Done\n #\n self.log(console,\"BUILDING RETURN OBJECT\")\n output = { 'report_name': report_info['name'],\n 'report_ref': report_info['ref']\n }\n\n self.log(console,\"view_tree() DONE\")\n #END view_tree\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_tree return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def trim_tree_to_genomeSet(self, ctx, params):\n \"\"\"\n :param params: instance of type \"trim_tree_to_genomeSet_Input\"\n (trim_tree_to_genomeSet() ** ** trim a KBase Tree to match\n genomeset, and make newick and images downloadable) -> structure:\n parameter \"workspace_name\" of type \"workspace_name\" (** Common\n types), parameter \"input_genomeSet_ref\" of type \"data_obj_ref\",\n parameter \"input_tree_ref\" of type \"data_obj_ref\", parameter\n \"desc\" of String\n :returns: instance of type \"trim_tree_to_genomeSet_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN trim_tree_to_genomeSet\n #END trim_tree_to_genomeSet\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method trim_tree_to_genomeSet return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def run_DomainAnnotation_Sets(self, ctx, params):\n \"\"\"\n :param params: instance of type \"run_DomainAnnotation_Sets_Input\"\n (run_DomainAnnotation_Sets() ** ** run the DomainAnnotation App\n against a GenomeSet) -> structure: parameter \"workspace_name\" of\n type \"workspace_name\" (** Common types), parameter\n \"input_genomeSet_ref\" of type \"data_obj_ref\", parameter\n \"override_annot\" of type \"bool\"\n :returns: instance of type \"run_DomainAnnotation_Sets_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN run_DomainAnnotation_Sets\n console = []\n self.log(console, 'Running run_DomainAnnotation_Sets() with params=')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n\n ### STEP 1: basic parameter checks + parsing\n required_params = ['workspace_name',\n 'input_genomeSet_ref'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+required_param+\"'\")\n\n\n ### STEP 2: build a list of genomes to iterate through\n\n # get genome set\n input_ref = params['input_genomeSet_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseSearch.GenomeSet\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n genomeSet_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genomeSet: \"+input_ref)\n\n # get genome refs and object names\n genome_ids = genomeSet_obj['elements'].keys() # note: genome_id may be meaningless\n genome_refs = []\n for genome_id in genome_ids:\n genome_refs.append (genomeSet_obj['elements'][genome_id]['ref'])\n\n genome_obj_name_by_ref = dict()\n uniq_genome_ws_ids = dict()\n ws_name_by_genome_ref = dict()\n\n for genome_ref in genome_refs:\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n ws_name_by_genome_ref[input_ref] = input_obj_info[WORKSPACE_I]\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[input_ref] = input_name\n\n\n ### STEP 3: Determine which genomes have already got domain annotations\n domain_annot_done = dict()\n for ws_id in uniq_genome_ws_ids.keys():\n try:\n dom_annot_obj_info_list = wsClient.list_objects({'ids':[ws_id],'type':\"KBaseGeneFamilies.DomainAnnotation\"})\n except Exception as e:\n raise ValueError (\"Unable to list DomainAnnotation objects from workspace: \"+str(ws_id)+\" \"+str(e))\n\n for info in dom_annot_obj_info_list:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n \n dom_annot_ref = str(info[WSID_I])+'/'+str(info[OBJID_I])+'/'+str(info[VERSION_I])\n try:\n domain_data = wsClient.get_objects([{'ref':dom_annot_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch domain annotation: \"+dom_annot_ref)\n\n # read domain data object\n genome_ref = domain_data['genome_ref']\n if genome_ref not in genome_refs:\n continue\n domain_annot_done[genome_ref] = True\n\n\n ### STEP 4: run DomainAnnotation on each genome in set\n try:\n SERVICE_VER = 'dev' # DEBUG\n daClient = DomainAnnotation (url=self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER) # SDK Local\n #daClient = DomainAnnotation (url=self.serviceWizardURL, token=ctx['token'], service_ver=SERVICE_VER) # Dynamic service\n except:\n raise ValueError (\"unable to instantiate DomainAnnotationClient\")\n\n # RUN DomainAnnotations\n report_text = ''\n for genome_i,genome_ref in enumerate(genome_refs):\n\n if 'override_annot' not in params or params['override_annot'] != '1':\n if genome_ref in domain_annot_done:\n self.log (console, \"SKIPPING repeat domain annotation for genome: \"+genome_obj_name_by_ref[genome_ref])\n\n continue\n\n genome_obj_name = genome_obj_name_by_ref[genome_ref]\n domains_obj_name = re.sub ('[\\.\\-\\_\\:]GenomeAnnotation$', '', genome_obj_name)\n domains_obj_name = re.sub ('[\\.\\-\\_\\:]Genome$', '', domains_obj_name)\n domains_obj_name += '.DomainAnnotation'\n domains_obj_name = 'domains_'+domains_obj_name # DEBUG\n DomainAnnotation_Params = { 'genome_ref': genome_ref,\n 'dms_ref': 'KBasePublicGeneDomains/All',\n 'ws': params['workspace_name'],\n #'ws': ws_name_by_genome_ref[genome_ref],\n 'output_result_id': domains_obj_name\n }\n self.log (console, \"RUNNING domain annotation for genome: \"+genome_obj_name_by_ref[genome_ref])\n self.log(console, \"\\n\"+pformat(DomainAnnotation_Params))\n self.log(console, str(datetime.now()))\n\n #da_retVal = daClient.search_domains (DomainAnnotation_Params)[0]\n da_retVal = daClient.search_domains (DomainAnnotation_Params)\n this_output_ref = da_retVal['output_result_id']\n this_report_name = da_retVal['report_name']\n this_report_ref = da_retVal['report_ref']\n\n try:\n this_report_obj = wsClient.get_objects([{'ref':this_report_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch report: \"+this_report_ref)\n report_text += this_report_obj['text_message']\n report_text += \"\\n\\n\"\n\n\n ### STEP 5: build and save the report\n reportObj = {\n 'objects_created': [],\n 'text_message': report_text\n }\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n report_info = reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n\n\n ### STEP 6: construct the output to send back\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END run_DomainAnnotation_Sets\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method run_DomainAnnotation_Sets return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_fxn_profile(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_fxn_profile_Input\"\n (view_fxn_profile() ** ** show a table/heatmap of general\n categories or custom gene families for a set of Genomes) ->\n structure: parameter \"workspace_name\" of type \"workspace_name\" (**\n Common types), parameter \"input_genomeSet_ref\" of type\n \"data_obj_ref\", parameter \"namespace\" of String, parameter\n \"custom_target_fams\" of type \"CustomTargetFams\" (parameter groups)\n -> structure: parameter \"target_fams\" of list of String, parameter\n \"extra_target_fam_groups_COG\" of list of String, parameter\n \"extra_target_fam_groups_PFAM\" of list of String, parameter\n \"extra_target_fam_groups_TIGR\" of list of String, parameter\n \"extra_target_fam_groups_SEED\" of list of String, parameter\n \"count_category\" of String, parameter \"heatmap\" of type \"bool\",\n parameter \"vertical\" of type \"bool\", parameter \"top_hit\" of type\n \"bool\", parameter \"e_value\" of Double, parameter \"log_base\" of\n Double, parameter \"show_blanks\" of type \"bool\"\n :returns: instance of type \"view_fxn_profile_Output\" -> structure:\n parameter \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_fxn_profile\n\n ### STEP 0: basic init\n console = []\n self.log(console, 'Running view_fxn_profile(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_genomeSet_ref',\n 'namespace'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n if params['namespace'] == 'custom':\n if ('custom_target_fams' not in params or not params['custom_target_fams']) \\\n or ( \\\n ('target_fams' not in params['custom_target_fams'] or not params['custom_target_fams']['target_fams']) \\\n and ('extra_target_fam_groups_COG' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_COG']) \\\n and ('extra_target_fam_groups_PFAM' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_PFAM']) \\\n and ('extra_target_fam_groups_TIGR' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_TIGR']) \\\n and ('extra_target_fam_groups_SEED' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_SEED'])\n ):\n \n raise ValueError (\"Must define either param: 'target_fams' or 'extra_target_fam_groups' if using CUSTOM targets\")\n\n # base config\n namespace_classes = ['COG', 'PF', 'TIGR', 'SEED']\n show_blanks = False\n if 'show_blanks' in params and params['show_blanks'] == '1':\n show_blanks = True\n e_value_thresh = None\n if 'e_value' in params and params['e_value'] != None and params['e_value'] != '':\n e_value_thresh = float (params['e_value'])\n top_hit_flag = False\n if 'top_hit' in params and params['top_hit'] != None and params['top_hit'] != '' and params['top_hit'] != 0:\n top_hit_flag = True\n\n domain_desc_basepath = os.path.abspath('/kb/module/data/domain_desc')\n domain_to_cat_map_path = dict()\n domain_cat_names_path = dict()\n domain_fam_names_path = dict()\n domain_to_cat_map_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_cat_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014_funcat.tsv')\n domain_fam_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_to_cat_map_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_cat_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans_names.tsv')\n domain_fam_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_to_cat_map_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_cat_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrrole2go.txt')\n #domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrfams2go.txt')\n domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_to_cat_map_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_funcat.txt')\n #domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_fam_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_genomeSet_ref'])]\n\n\n # set the output path\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\n # configure categories\n #\n cats = []\n cat2name = dict()\n cat2group = dict()\n domfam2cat = dict()\n cat2domfams = dict()\n namespaces_reading = dict()\n\n # categories are high-level summations\n if params['namespace'] != 'custom':\n for namespace in ['COG','PF','TIGR','SEED']:\n if params['namespace'] == namespace:\n namespaces_reading[namespace] = True\n\n # read all mappings between groups and domfams\n for namespace in ['COG','PF','TIGR','SEED']:\n\n cat2name[namespace] = dict()\n cat2group[namespace] = dict()\n domfam2cat[namespace] = dict()\n cat2domfams[namespace] = dict()\n\n # get high-level cats\n tigrrole_id2cat = dict()\n with open (domain_cat_names_path[namespace], 'r', 0) as dom_cat_handle:\n for line in dom_cat_handle.readlines():\n line = line.strip()\n \n if namespace == 'COG':\n [cat, cat_group, cat_name] = line.split(\"\\t\")[0:3]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'PF':\n [cat, cat_name] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = None\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [cat, cat_id, cat_group, cat_name_plus_go_terms] = line.split(\"\\t\")[0:4]\n tigrrole_id2cat[cat_id] = cat\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_name = re.sub (' *\\> GO:.*$', '', cat_name_plus_go_terms)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'SEED':\n #[cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n [cat_group, cat] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_disp = re.sub ('_', ' ', cat)\n cat2name[namespace][cat] = cat_disp\n cat2group[namespace][cat] = cat_group\n\n # get domfam to cat map, and vice versa\n with open (domain_to_cat_map_path[namespace], 'r', 0) as dom2cat_map_handle:\n for line in dom2cat_map_handle.readlines():\n line = line.strip()\n\n if namespace == 'COG':\n [domfam, cat_str, cat_name] = line.split(\"\\t\")[0:3]\n cat = cat_str[0] # only use first cat\n\n elif namespace == 'PF':\n [domfam, cat, cat_name, dom_id, dom_name] = line.split(\"\\t\")[0:5]\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if cat_id != '' and int(cat_id) != 0 and cat_id in tigrrole_id2cat:\n cat = tigrrole_id2cat[cat_id]\n else:\n continue\n\n elif namespace == 'SEED':\n [cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n\n domfam2cat[namespace][domfam] = cat\n if cat not in cat2domfams[namespace]:\n cat2domfams[namespace][cat] = []\n cat2domfams[namespace][cat].append(domfam)\n\n # custom domains\n if params['namespace'] == 'custom':\n\n # add target fams\n target_fams = []\n if 'target_fams' in params['custom_target_fams'] and params['custom_target_fams']['target_fams']:\n for target_fam in params['custom_target_fams']['target_fams']:\n target_fam = target_fam.strip()\n if target_fam == '':\n continue\n\n target_fam = re.sub (\"^cog\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^pf\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^tigr\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^seed\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PFAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P-FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P_FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGRFAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n\n target_fam = re.sub (\"^COG:\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG-\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG_\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG *\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF:\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF-\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF_\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF *\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR:\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR *\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED:\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED-\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED_\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED *\", \"SEED\", target_fam, flags=re.IGNORECASE)\n\n num_id_len = dict()\n num_id_len['COG'] = 4\n num_id_len['PF'] = 5\n num_id_len['TIGR'] = 5\n\n #self.log (console, \"TARGET_FAM A: '\"+target_fam+\"'\") # DEBUG\n \n if target_fam.startswith('SEED'):\n namespaces_reading['SEED'] = True\n target_fam = target_fam.strip()\n target_fam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' ', '_', target_fam)\n else:\n namespace_found = False\n for namespace_iter in ['COG','PF','TIGR']:\n if target_fam.startswith(namespace_iter):\n this_namespace = namespace_iter\n namespaces_reading[this_namespace] = True\n target_fam = re.sub(this_namespace, \"\", target_fam)\n namespace_found = True\n break\n if not namespace_found:\n raise ValueError (\"unrecognized custom domain family: '\"+str(target_fam)+\"'\")\n leading_zeros = ''\n for c_i in range(num_id_len[this_namespace] - len(str(target_fam))):\n leading_zeros += '0'\n target_fam = this_namespace + leading_zeros + target_fam\n\n #self.log (console, \"TARGET_FAM B: '\"+target_fam+\"'\") # DEBUG\n\n target_fams.append(target_fam)\n\n # add extra target fams\n extra_target_fams = []\n extra_target_fam_groups = []\n domfam2group = dict()\n for target_set in ['extra_target_fam_groups_COG', 'extra_target_fam_groups_PFAM', 'extra_target_fam_groups_TIGR', 'extra_target_fam_groups_SEED']:\n if target_set in params['custom_target_fams'] and params['custom_target_fams'][target_set]:\n extra_target_fam_groups.extend (params['custom_target_fams'][target_set])\n\n if extra_target_fam_groups:\n for target_group in extra_target_fam_groups:\n target_group = target_group.strip()\n if target_group == '':\n continue\n\n namespace = re.sub (\":.*$\", \"\", target_group)\n namespaces_reading[namespace] = True\n\n if namespace == 'COG':\n this_group = re.sub (\"COG: \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'PF':\n this_group = re.sub (\"PF: Clan \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'TIGR':\n this_group = re.sub (\"TIGR: role:\", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n this_group = 'TIGR_role:'+this_group\n elif namespace == 'SEED':\n this_group = re.sub (\"SEED: \", \"\", target_group)\n\n for domfam in cat2domfams[namespace][this_group]:\n extra_target_fams.append(domfam)\n domfam2group[domfam] = target_group\n\n # we have our targets\n cats = target_fams + extra_target_fams\n\n # store names of targets\n domfam2name = dict()\n for namespace in namespaces_reading.keys():\n domfam2name[namespace] = dict()\n\n if namespace == 'COG':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, cat_class, domfam_name] = line.split(\"\\t\")[0:3]\n domfam2name[namespace][domfam] = domfam_name\n\n elif namespace == 'PF':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, class_id, class_name, domfam_id, domfam_name] = line.split(\"\\t\")[0:5]\n if domfam_name.startswith(domfam_id):\n combo_name = domfam_name\n else:\n combo_name = domfam_id+': '+domfam_name\n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'TIGR':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if domfam_name != '':\n if domfam_desc.startswith(domfam_name):\n combo_name = domfam_desc\n else:\n combo_name = domfam_name+': '+domfam_desc\n else:\n if domfam_desc.startswith(domfam_id):\n combo_name = domfam_desc\n else:\n combo_name = domfam_id+': '+domfam_desc\n if ec_id != '':\n combo_name += ' (EC '+ec_id+')'\n \n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'SEED':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [level1, level2, level3, domfam] = line.split(\"\\t\")[0:4]\n\n domfam_desc = domfam\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n if domfam in domfam2name[namespace]:\n if len(domfam_desc) > len(domfam2name[namespace][domfam]):\n domfam2name[namespace][domfam] = domfam_desc\n else:\n domfam2name[namespace][domfam] = domfam_desc\n\n # just in case\n elif params['namespace'] != 'COG' \\\n and params['namespace'] != 'PF' \\\n and params['namespace'] != 'TIGR' \\\n and params['namespace'] != 'SEED':\n raise ValueError (\"Unknown namespace: '\"+str(params['namespace'])+\"'\")\n\n\n # get genome set\n #\n input_ref = params['input_genomeSet_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseSearch.GenomeSet\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n genomeSet_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genomeSet: \"+input_ref)\n\n\n # get genome refs, object names, sci names, protein-coding gene counts, and SEED annot\n #\n genome_ids = genomeSet_obj['elements'].keys() # note: genome_id may be meaningless\n genome_refs = []\n for genome_id in genome_ids:\n genome_refs.append (genomeSet_obj['elements'][genome_id]['ref'])\n\n genome_obj_name_by_ref = dict()\n genome_sci_name_by_ref = dict()\n genome_CDS_count_by_ref = dict()\n uniq_genome_ws_ids = dict()\n\n dom_hits = dict() # initialize dom_hits here because reading SEED within genome\n genes_with_hits_cnt = dict()\n\n for genome_ref in genome_refs:\n\n dom_hits[genome_ref] = dict()\n genes_with_hits_cnt[genome_ref] = dict()\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[genome_ref] = input_name\n\n try:\n genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n # sci name\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n \n # CDS cnt\n cds_cnt = 0\n for feature in genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n cds_cnt += 1\n genome_CDS_count_by_ref[genome_ref] = cds_cnt\n\n # SEED annotations\n #\n #f_cnt = 0 # DEBUG\n if 'SEED' in namespaces_reading:\n for feature in genome_obj['features']:\n #if f_cnt % 100 == 0:\n # self.log (console, \"iterating features: \"+str(f_cnt)) # DEBUG\n\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n #if f_cnt % 100 == 0:\n # self.log (console, \"prot: \"+str(feature['protein_translation'])) # DEBUG\n\n if 'function' in feature and feature['function'] != None and feature['function'] != '':\n gene_name = feature['id']\n \n #if f_cnt % 100 == 0:\n # self.log (console, \"fxn: '\"+str(feature['function'])+\"'\") # DEBUG\n\n # store assignments for gene\n for namespace in ['SEED']:\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n dom_hits[genome_ref][gene_name][namespace] = dict()\n\n domfam_list = []\n annot_set = feature['function'].strip().split(';')\n for annot in annot_set:\n annot_set_2 = annot.strip().split('@')\n for annot2 in annot_set_2:\n domfam = annot2.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n domfam_list.append(domfam)\n #if f_cnt % 100 == 0:\n # self.log (console, \"domfam: '\"+str(domfam)+\"'\") # DEBUG\n\n if top_hit_flag: # does SEED give more than one function?\n dom_hits[genome_ref][gene_name][namespace][domfam_list[0]] = True\n else:\n for domfam in domfam_list:\n dom_hits[genome_ref][gene_name][namespace][domfam] = True\n\n #f_cnt += 1 # DEBUG\n\n\n # capture domain hits to genes within each namespace\n #\n if params['namespace'] != 'SEED':\n dom_annot_found = dict()\n\n KBASE_DOMAINHIT_GENE_ID_I = 0\n KBASE_DOMAINHIT_GENE_BEG_I = 1 # not used\n KBASE_DOMAINHIT_GENE_END_I = 2 # not used\n KBASE_DOMAINHIT_GENE_STRAND_I = 3 # not used\n KBASE_DOMAINHIT_GENE_HITS_DICT_I = 4\n KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J = 0\n KBASE_DOMAINHIT_GENE_HITS_DICT_END_J = 1\n KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J = 2\n KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J = 3\n KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J = 4\n\n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT A: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n for ws_id in uniq_genome_ws_ids.keys():\n try:\n dom_annot_obj_info_list = wsClient.list_objects({'ids':[ws_id],'type':\"KBaseGeneFamilies.DomainAnnotation\"})\n except Exception as e:\n raise ValueError (\"Unable to list DomainAnnotation objects from workspace: \"+str(ws_id)+\" \"+str(e))\n\n for info in dom_annot_obj_info_list:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n \n dom_annot_ref = str(info[WSID_I])+'/'+str(info[OBJID_I])+'/'+str(info[VERSION_I])\n try:\n domain_data = wsClient.get_objects([{'ref':dom_annot_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch domain annotation: \"+dom_annot_ref)\n\n # read domain data object\n genome_ref = domain_data['genome_ref']\n if genome_ref not in genome_refs:\n continue\n dom_annot_found[genome_ref] = True\n\n if genome_ref not in dom_hits:\n dom_hits[genome_ref] = dict()\n\n if genome_ref not in genes_with_hits_cnt:\n genes_with_hits_cnt[genome_ref] = dict()\n\n for scaffold_id_iter in domain_data['data'].keys():\n for CDS_domain_list in domain_data['data'][scaffold_id_iter]:\n gene_ID = CDS_domain_list[KBASE_DOMAINHIT_GENE_ID_I]\n #gene_name = re.sub ('^'+genome_object_name+'.', '', gene_ID) \n gene_name = gene_ID\n #(contig_name, gene_name) = (gene_ID[0:gene_ID.index(\".\")], gene_ID[gene_ID.index(\".\")+1:])\n #print (\"DOMAIN_HIT: \"+contig_name+\" \"+gene_name) # DEBUG\n #print (\"DOMAIN_HIT for gene: \"+gene_name) # DEBUG\n #gene_beg = CDS_domain_list[KBASE_DOMAINHIT_GENE_BEG_I]\n #gene_end = CDS_domain_list[KBASE_DOMAINHIT_GENE_END_I]\n #gene_strand = CDS_domain_list[KBASE_DOMAINHIT_GENE_STRAND_I]\n gene_hits_dict = CDS_domain_list[KBASE_DOMAINHIT_GENE_HITS_DICT_I]\n\n dom_hits_by_namespace = dict()\n top_hit_evalue_by_namespace = dict()\n top_hit_dom_by_namespace = dict()\n\n for namespace in namespace_classes:\n dom_hits_by_namespace[namespace] = dict()\n top_hit_evalue_by_namespace[namespace] = 100\n top_hit_dom_by_namespace[namespace] = None\n\n for domfam in gene_hits_dict.keys():\n if domfam.startswith('PF'):\n domfam_clean = re.sub('\\.[^\\.]*$','',domfam)\n else:\n domfam_clean = domfam\n known_namespace = False\n for this_namespace in namespace_classes:\n if domfam.startswith(this_namespace):\n namespace = this_namespace\n known_namespace = True\n if not known_namespace:\n continue\n\n for hit in gene_hits_dict[domfam]:\n beg = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J])\n end = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_END_J])\n e_value = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J])\n bit_score = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J])\n aln_perc = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J])\n\n if e_value_thresh != None and e_value > e_value_thresh:\n continue\n if top_hit_flag:\n if top_hit_dom_by_namespace[namespace] == None \\\n or top_hit_evalue_by_namespace[namespace] > e_value:\n top_hit_dom_by_namespace[namespace] = domfam_clean\n top_hit_evalue_by_namespace[namespace] = e_value\n \n dom_hits_by_namespace[namespace][domfam_clean] = True\n\n # store assignments for gene\n for namespace in namespace_classes:\n if namespace == 'SEED':\n continue\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n if dom_hits_by_namespace[namespace]:\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n \n if top_hit_flag:\n dom_hits[genome_ref][gene_name][namespace] = { top_hit_dom_by_namespace[namespace]: True }\n else:\n dom_hits[genome_ref][gene_name][namespace] = dom_hits_by_namespace[namespace]\n\n # make sure we have domain annotations for all genomes\n missing_annot = []\n for genome_ref in genome_refs:\n if genome_ref not in dom_annot_found:\n missing_annot.append(\"\\t\"+'MISSING DOMAIN ANNOTATION FOR: '+genome_ref)\n if missing_annot:\n error_msg = \"ABORT: You must run the DomainAnnotation App first\\n\"\n error_msg += \"\\n\".join(missing_annot)\n raise ValueError (error_msg)\n \n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT B: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n \n # calculate table\n #\n table_data = dict()\n INSANE_VALUE = 10000000000000000\n overall_low_val = INSANE_VALUE\n overall_high_val = -INSANE_VALUE\n\n # count raw\n for genome_ref in genome_refs:\n if genome_ref not in table_data:\n table_data[genome_ref] = dict()\n for cat in cats:\n table_data[genome_ref][cat] = 0\n\n # custom\n if params['namespace'] == 'custom':\n for cat in cats:\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n if cat in dom_hits[genome_ref][gene_name][namespace]:\n table_data[genome_ref][cat] += 1\n\n # high level summation\n else:\n namespace = params['namespace']\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n for domfam in dom_hits[genome_ref][gene_name][namespace].keys():\n #self.log(console, \"DOMFAM: '\"+str(domfam)+\"'\") # DEBUG\n\n if domfam in domfam2cat[namespace]:\n cat = domfam2cat[namespace][domfam]\n #self.log(console, \"CAT: '\"+str(cat)+\"'\") # DEBUG\n if cat in cats:\n #self.log(console, \"CAT_FOUND: '\"+str(cat)+\"'\") # DEBUG\n table_data[genome_ref][cat] += 1\n \n # adjust to percs\n if params['count_category'].startswith('perc'):\n for genome_ref in genome_refs:\n for cat in cats:\n if params['count_category'] == 'perc_annot':\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n else:\n namespace = params['namespace']\n total_genes = genes_with_hits_cnt[genome_ref][namespace]\n else:\n total_genes = genome_CDS_count_by_ref[genome_ref]\n\n table_data[genome_ref][cat] /= float(total_genes)\n table_data[genome_ref][cat] *= 100.0\n\n # determine high and low val\n for genome_ref in genome_refs:\n for cat in cats:\n val = table_data[genome_ref][cat]\n if val == 0: continue\n #self.log (console, \"HIGH VAL SCAN CAT: '\"+cat+\"' VAL: '\"+str(val)+\"'\") # DEBUG\n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n if val > overall_high_val:\n overall_high_val = val\n if val < overall_low_val:\n overall_low_val = val\n if overall_high_val == -INSANE_VALUE:\n raise ValueError (\"unable to find any counts\")\n\n\n # determine cats with a value and build group\n #\n cat_seen = dict()\n group_size = dict()\n group_size_with_blanks = dict()\n group_order = []\n group_order_with_blanks = []\n for cat in cats:\n cat_seen[cat] = False\n if params['namespace'] == 'custom':\n # get cats seen and group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n else:\n namespace = params['namespace']\n # get group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != None and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n\n\n # build report\n #\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n\n\n # build html report\n sp = '&nbsp;'\n text_color = \"#606060\"\n text_color_2 = \"#606060\"\n head_color_1 = \"#eeeeee\"\n head_color_2 = \"#eeeeee\"\n border_color = \"#cccccc\"\n border_cat_color = \"#ffccff\"\n #graph_color = \"lightblue\"\n #graph_width = 100\n #graph_char = \".\"\n graph_char = sp\n color_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e']\n max_color = len(color_list)-1\n cat_disp_trunc_len = 40\n cell_width = '10px'\n if len(genome_refs) > 20:\n graph_gen_fontsize = \"1\"\n elif len(genome_refs) > 10:\n graph_gen_fontsize = \"2\"\n else:\n graph_gen_fontsize = \"3\"\n if len(cats) > 20:\n graph_cat_fontsize = \"1\"\n elif len(cats) > 5:\n graph_cat_fontsize = \"2\"\n else:\n graph_cat_fontsize = \"3\"\n if int(graph_cat_fontsize) < int(graph_gen_fontsize):\n cell_fontsize = graph_gen_fontsize = graph_cat_fontsize\n else:\n cell_fontsize = graph_cat_fontsize = graph_gen_fontsize\n graph_padding = \"5\"\n graph_spacing = \"3\"\n #border = \"1\"\n border = \"0\"\n #row_spacing = \"-2\"\n num_rows = len(genome_refs)\n show_groups = False\n if len(group_order) > 0: show_groups = True\n\n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Functional Domain Profile</title>']\n html_report_lines += ['<style>']\n html_report_lines += [\".vertical-text {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 0.65em;\\n}\\n.vertical-text__inner {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.1;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += [\".vertical-text_title {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 1.0em;\\n}\\n.vertical-text__inner_title {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.0;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner_title:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += ['</style>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n\n # genomes as rows\n if 'vertical' in params and params['vertical'] == \"1\":\n # table header\n html_report_lines += ['<table cellpadding='+graph_padding+' cellspacing='+graph_spacing+' border='+border+'>']\n corner_rowspan = \"1\"\n if show_groups: corner_rowspan = \"2\"\n label = ''\n if params['namespace'] != 'custom':\n label = params['namespace']\n if label == 'PF':\n label = 'PFAM'\n elif label == 'TIGR':\n label = 'TIGRFAM'\n html_report_lines += ['<tr><td valign=bottom align=right rowspan='+corner_rowspan+'><div class=\"vertical-text_title\"><div class=\"vertical-text__inner_title\"><font color=\"'+text_color+'\">'+label+'</font></div></div></td>']\n \n # group headers\n if show_groups:\n for cat_group in group_order:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n max_group_width = 3*group_size[cat_group]\n if len(cat_group) > max_group_width:\n new_cat_group_words = []\n sentence_len = 0\n for w_i,word in enumerate(cat_group_words):\n new_cat_group_words.append(word)\n sentence_len += len(word)\n if w_i < len(cat_group_words)-1:\n if sentence_len + 1 + len(cat_group_words[w_i+1]) > max_group_width:\n new_cat_group_words[w_i] += '<br>'\n sentence_len = 0\n cat_group_words = new_cat_group_words\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \" \".join(cat_group_words)\n\n # DEBUG\n #if cat_group not in group_size:\n # self.log(console, \"CAT_GROUP: '\"+str(cat_group)+\"'\") # DEBUG\n # self.log(console, \"CAT_GROUP_DISP: '\"+str(cat_group_disp)+\"'\") # DEBUG\n # for cg in group_size:\n # self.log(console, \"CG: '\"+str(cg)+\"'\") # DEBUG\n\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white colspan='+str(group_size[cat_group])+'></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_1+'\"valign=middle align=center colspan='+str(group_size[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n html_report_lines += ['</tr><tr>']\n\n # column headers\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub (\"\\d*$\", \"\", cat)\n cell_title = domfam2name[namespace][cat].strip()\n cat_disp = cat\n cat_disp = re.sub ('^SEED', 'SEED:', cat_disp)\n else:\n cell_title = cat2name[params['namespace']][cat].strip()\n cat_disp = cat\n cat_disp = re.sub (\"TIGR_\", \"\", cat_disp)\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_2+'\"title=\"'+cell_title+'\" valign=bottom align=center>']\n if params['namespace'] != 'COG':\n html_report_lines += ['<div class=\"vertical-text\"><div class=\"vertical-text__inner\">']\n html_report_lines += ['<font color=\"'+text_color_2+'\" size='+graph_cat_fontsize+'><b>']\n #for c_i,c in enumerate(cat_disp):\n # if c_i < len(cat_disp)-1:\n # html_report_lines += [c+'<br>']\n # else:\n # html_report_lines += [c]\n html_report_lines += [cat_disp]\n html_report_lines += ['</b></font>']\n if params['namespace'] != 'COG':\n html_report_lines += ['</div></div>']\n html_report_lines += ['</td>']\n html_report_lines += ['</tr>']\n \n # rest of rows\n for genome_ref in genome_refs:\n genome_sci_name = genome_sci_name_by_ref[genome_ref]\n html_report_lines += ['<tr>']\n html_report_lines += ['<td align=right><font color=\"'+text_color+'\" size='+graph_gen_fontsize+'><b><nobr>'+genome_sci_name+'</nobr></b></font></td>']\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n val = table_data[genome_ref][cat]\n if val == 0:\n cell_color = 'white'\n else: \n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n cell_color_i = max_color - int(round(max_color * (val-overall_low_val) / float(overall_high_val-overall_low_val)))\n c = color_list[cell_color_i]\n cell_color = '#'+c+c+c+c+'FF'\n\n if params['count_category'].startswith('perc'):\n cell_val = str(\"%.3f\"%table_data[genome_ref][cat])\n cell_val += '%'\n else:\n cell_val = str(table_data[genome_ref][cat])\n\n if 'heatmap' in params and params['heatmap'] == '1':\n if table_data[genome_ref][cat] == 0:\n this_text_color = text_color\n #this_graph_char = \"0\"\n this_graph_char = sp\n else:\n this_text_color = cell_color\n this_graph_char = graph_char\n html_report_lines += ['<td align=center valign=middle title=\"'+cell_val+'\" style=\"width:'+cell_width+'\" bgcolor=\"'+cell_color+'\"><font color=\"'+this_text_color+'\" size='+cell_fontsize+'>'+this_graph_char+'</font></td>']\n else:\n html_report_lines += ['<td align=center valign=middle style=\"'+cell_width+'; border-right:solid 2px '+border_color+'; border-bottom:solid 2px '+border_color+'\"><font color=\"'+text_color+'\" size='+cell_fontsize+'>'+cell_val+'</font></td>']\n\n html_report_lines += ['</tr>']\n html_report_lines += ['</table>']\n\n # genomes as columns\n else:\n raise ValueError (\"Do not yet support Genomes as columns\")\n\n\n # key table\n html_report_lines += ['<p>']\n html_report_lines += ['<table cellpadding=3 cellspacing=2 border='+border+'>']\n html_report_lines += ['<tr><td valign=middle align=left colspan=3 style=\"border-bottom:solid 4px '+border_color+'\"><font color=\"'+text_color+'\"><b>KEY</b></font></td></tr>']\n\n if show_groups:\n group_cat_i = 0\n for cat_group in group_order_with_blanks:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \"&nbsp;<br>\".join(cat_group_words)\n cat_group_disp += sp\n\n html_report_lines += ['<tr>']\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white rowspan='+str(group_size_with_blanks[cat_group])+' style=\"border-right:solid 4px '+border_color+'\"></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 4px '+border_color+'\" valign=top align=right rowspan='+str(group_size_with_blanks[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n # add first cat for group\n first_cat = cats[group_cat_i]\n cell_color = 'white'\n #if not cat_seen[first_cat] and not show_blanks:\n if not cat_seen[first_cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = first_cat\n if first_cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', first_cat)\n cat_disp = re.sub ('^SEED', 'SEED:', first_cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = first_cat\n desc = cat2name[namespace][first_cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n # add rest of cats in group\n for c_i in range(group_cat_i, group_cat_i+group_size_with_blanks[cat_group]-1):\n cat = cats[c_i]\n cell_color = 'white'\n #if not cat_seen[cat] and not show_blanks:\n if not cat_seen[cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n \n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n else:\n for cat in cats:\n cell_color = 'white'\n if not cat_seen[cat] and not show_blanks:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', domfam)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n\n html_report_lines += ['</table>']\n\n # close\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n html_file = os.path.join (output_dir, 'domain_profile_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n upload_ret = dfu.file_to_shock({'file_path': html_file,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'domain_profile_report.html',\n 'label': 'Functional Domain Profile report'}\n ]\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_fxn_profile\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_fxn_profile return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_fxn_profile_featureSet(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_fxn_profile_featureSet_Input\"\n (view_fxn_profile_featureSet() ** ** show a table/heatmap of\n general categories or custom gene families for a set of Genomes)\n -> structure: parameter \"workspace_name\" of type \"workspace_name\"\n (** Common types), parameter \"input_featureSet_ref\" of type\n \"data_obj_ref\", parameter \"namespace\" of String, parameter\n \"custom_target_fams\" of type \"CustomTargetFams\" (parameter groups)\n -> structure: parameter \"target_fams\" of list of String, parameter\n \"extra_target_fam_groups_COG\" of list of String, parameter\n \"extra_target_fam_groups_PFAM\" of list of String, parameter\n \"extra_target_fam_groups_TIGR\" of list of String, parameter\n \"extra_target_fam_groups_SEED\" of list of String, parameter\n \"count_category\" of String, parameter \"heatmap\" of type \"bool\",\n parameter \"vertical\" of type \"bool\", parameter \"top_hit\" of type\n \"bool\", parameter \"e_value\" of Double, parameter \"log_base\" of\n Double, parameter \"show_blanks\" of type \"bool\"\n :returns: instance of type \"view_fxn_profile_featureSet_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_fxn_profile_featureSet\n\n ### STEP 0: basic init\n console = []\n self.log(console, 'Running view_fxn_profile_featureSet(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_featureSet_ref',\n 'namespace'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n if params['namespace'] == 'custom':\n if ('custom_target_fams' not in params or not params['custom_target_fams']) \\\n or ( \\\n ('target_fams' not in params['custom_target_fams'] or not params['custom_target_fams']['target_fams']) \\\n and ('extra_target_fam_groups_COG' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_COG']) \\\n and ('extra_target_fam_groups_PFAM' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_PFAM']) \\\n and ('extra_target_fam_groups_TIGR' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_TIGR']) \\\n and ('extra_target_fam_groups_SEED' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_SEED'])\n ):\n \n raise ValueError (\"Must define either param: 'target_fams' or 'extra_target_fam_groups' if using CUSTOM targets\")\n\n # base config\n namespace_classes = ['COG', 'PF', 'TIGR', 'SEED']\n show_blanks = False\n if 'show_blanks' in params and params['show_blanks'] == '1':\n show_blanks = True\n e_value_thresh = None\n if 'e_value' in params and params['e_value'] != None and params['e_value'] != '':\n e_value_thresh = float (params['e_value'])\n top_hit_flag = False\n if 'top_hit' in params and params['top_hit'] != None and params['top_hit'] != '' and params['top_hit'] != 0:\n top_hit_flag = True\n\n domain_desc_basepath = os.path.abspath('/kb/module/data/domain_desc')\n domain_to_cat_map_path = dict()\n domain_cat_names_path = dict()\n domain_fam_names_path = dict()\n domain_to_cat_map_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_cat_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014_funcat.tsv')\n domain_fam_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_to_cat_map_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_cat_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans_names.tsv')\n domain_fam_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_to_cat_map_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_cat_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrrole2go.txt')\n #domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrfams2go.txt')\n domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_to_cat_map_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_funcat.txt')\n #domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_fam_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_featureSet_ref'])]\n\n\n # set the output path\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\n # configure categories\n #\n cats = []\n cat2name = dict()\n cat2group = dict()\n domfam2cat = dict()\n cat2domfams = dict()\n namespaces_reading = dict()\n\n # categories are high-level summations\n if params['namespace'] != 'custom':\n for namespace in ['COG','PF','TIGR','SEED']:\n if params['namespace'] == namespace:\n namespaces_reading[namespace] = True\n\n # read all mappings between groups and domfams\n for namespace in ['COG','PF','TIGR','SEED']:\n\n cat2name[namespace] = dict()\n cat2group[namespace] = dict()\n domfam2cat[namespace] = dict()\n cat2domfams[namespace] = dict()\n\n # get high-level cats\n tigrrole_id2cat = dict()\n with open (domain_cat_names_path[namespace], 'r', 0) as dom_cat_handle:\n for line in dom_cat_handle.readlines():\n line = line.strip()\n \n if namespace == 'COG':\n [cat, cat_group, cat_name] = line.split(\"\\t\")[0:3]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'PF':\n [cat, cat_name] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = None\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [cat, cat_id, cat_group, cat_name_plus_go_terms] = line.split(\"\\t\")[0:4]\n tigrrole_id2cat[cat_id] = cat\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_name = re.sub (' *\\> GO:.*$', '', cat_name_plus_go_terms)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'SEED':\n #[cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n [cat_group, cat] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_disp = re.sub ('_', ' ', cat)\n cat2name[namespace][cat] = cat_disp\n cat2group[namespace][cat] = cat_group\n\n # get domfam to cat map, and vice versa\n with open (domain_to_cat_map_path[namespace], 'r', 0) as dom2cat_map_handle:\n for line in dom2cat_map_handle.readlines():\n line = line.strip()\n\n if namespace == 'COG':\n [domfam, cat_str, cat_name] = line.split(\"\\t\")[0:3]\n cat = cat_str[0] # only use first cat\n\n elif namespace == 'PF':\n [domfam, cat, cat_name, dom_id, dom_name] = line.split(\"\\t\")[0:5]\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if cat_id != '' and int(cat_id) != 0 and cat_id in tigrrole_id2cat:\n cat = tigrrole_id2cat[cat_id]\n else:\n continue\n\n elif namespace == 'SEED':\n [cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n\n domfam2cat[namespace][domfam] = cat\n if cat not in cat2domfams[namespace]:\n cat2domfams[namespace][cat] = []\n cat2domfams[namespace][cat].append(domfam)\n\n # custom domains\n if params['namespace'] == 'custom':\n\n # add target fams\n target_fams = []\n if 'target_fams' in params['custom_target_fams'] and params['custom_target_fams']['target_fams']:\n for target_fam in params['custom_target_fams']['target_fams']:\n target_fam = target_fam.strip()\n if target_fam == '':\n continue\n\n target_fam = re.sub (\"^cog\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^pf\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^tigr\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^seed\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PFAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P-FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P_FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGRFAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n\n target_fam = re.sub (\"^COG:\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG-\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG_\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG *\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF:\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF-\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF_\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF *\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR:\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR *\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED:\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED-\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED_\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED *\", \"SEED\", target_fam, flags=re.IGNORECASE)\n\n num_id_len = dict()\n num_id_len['COG'] = 4\n num_id_len['PF'] = 5\n num_id_len['TIGR'] = 5\n\n #self.log (console, \"TARGET_FAM A: '\"+target_fam+\"'\") # DEBUG\n \n if target_fam.startswith('SEED'):\n namespaces_reading['SEED'] = True\n target_fam = target_fam.strip()\n target_fam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' ', '_', target_fam)\n else:\n namespace_found = False\n for namespace_iter in ['COG','PF','TIGR']:\n if target_fam.startswith(namespace_iter):\n this_namespace = namespace_iter\n namespaces_reading[this_namespace] = True\n target_fam = re.sub(this_namespace, \"\", target_fam)\n namespace_found = True\n break\n if not namespace_found:\n raise ValueError (\"unrecognized custom domain family: '\"+str(target_fam)+\"'\")\n leading_zeros = ''\n for c_i in range(num_id_len[this_namespace] - len(str(target_fam))):\n leading_zeros += '0'\n target_fam = this_namespace + leading_zeros + target_fam\n\n #self.log (console, \"TARGET_FAM B: '\"+target_fam+\"'\") # DEBUG\n\n target_fams.append(target_fam)\n\n # add extra target fams\n extra_target_fams = []\n extra_target_fam_groups = []\n domfam2group = dict()\n for target_set in ['extra_target_fam_groups_COG', 'extra_target_fam_groups_PFAM', 'extra_target_fam_groups_TIGR', 'extra_target_fam_groups_SEED']:\n if target_set in params['custom_target_fams'] and params['custom_target_fams'][target_set]:\n extra_target_fam_groups.extend (params['custom_target_fams'][target_set])\n\n if extra_target_fam_groups:\n for target_group in extra_target_fam_groups:\n target_group = target_group.strip()\n if target_group == '':\n continue\n\n namespace = re.sub (\":.*$\", \"\", target_group)\n namespaces_reading[namespace] = True\n\n if namespace == 'COG':\n this_group = re.sub (\"COG: \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'PF':\n this_group = re.sub (\"PF: Clan \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'TIGR':\n this_group = re.sub (\"TIGR: role:\", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n this_group = 'TIGR_role:'+this_group\n elif namespace == 'SEED':\n this_group = re.sub (\"SEED: \", \"\", target_group)\n\n for domfam in cat2domfams[namespace][this_group]:\n extra_target_fams.append(domfam)\n domfam2group[domfam] = target_group\n\n # we have our targets\n cats = target_fams + extra_target_fams\n\n # store names of targets\n domfam2name = dict()\n for namespace in namespaces_reading.keys():\n domfam2name[namespace] = dict()\n\n if namespace == 'COG':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, cat_class, domfam_name] = line.split(\"\\t\")[0:3]\n domfam2name[namespace][domfam] = domfam_name\n\n elif namespace == 'PF':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, class_id, class_name, domfam_id, domfam_name] = line.split(\"\\t\")[0:5]\n if domfam_name.startswith(domfam_id):\n combo_name = domfam_name\n else:\n combo_name = domfam_id+': '+domfam_name\n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'TIGR':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if domfam_name != '':\n if domfam_desc.startswith(domfam_name):\n combo_name = domfam_desc\n else:\n combo_name = domfam_name+': '+domfam_desc\n else:\n if domfam_desc.startswith(domfam_id):\n combo_name = domfam_desc\n else:\n combo_name = domfam_id+': '+domfam_desc\n if ec_id != '':\n combo_name += ' (EC '+ec_id+')'\n \n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'SEED':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [level1, level2, level3, domfam] = line.split(\"\\t\")[0:4]\n\n domfam_desc = domfam\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n if domfam in domfam2name[namespace]:\n if len(domfam_desc) > len(domfam2name[namespace][domfam]):\n domfam2name[namespace][domfam] = domfam_desc\n else:\n domfam2name[namespace][domfam] = domfam_desc\n\n # just in case\n elif params['namespace'] != 'COG' \\\n and params['namespace'] != 'PF' \\\n and params['namespace'] != 'TIGR' \\\n and params['namespace'] != 'SEED':\n raise ValueError (\"Unknown namespace: '\"+str(params['namespace'])+\"'\")\n\n\n # get genome set from featureSet\n #\n input_ref = params['input_featureSet_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseCollections.FeatureSet\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n featureSet_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch featureSet: \"+input_ref)\n\n\n # get genome refs, object names, sci names, protein-coding gene counts, and SEED annot\n #\n #genome_ids = genomeSet_obj['elements'].keys() # note: genome_id may be meaningless\n genome_refs = []\n genome_ref_seen = dict()\n #for genome_id in genome_ids:\n # genome_refs.append (genomeSet_obj['elements'][genome_id]['ref'])\n for element_id in featureSet_obj['elements'].keys():\n genome_ref = featureSet_obj['elements'][element_id][0]\n if genome_ref not in genome_ref_seen:\n genome_ref_seen[genome_ref] = True\n genome_refs.append(genome_ref)\n\n genome_obj_name_by_ref = dict()\n genome_sci_name_by_ref = dict()\n genome_CDS_count_by_ref = dict()\n uniq_genome_ws_ids = dict()\n\n dom_hits = dict() # initialize dom_hits here because reading SEED within genome\n genes_with_hits_cnt = dict()\n\n for genome_ref in genome_refs:\n\n dom_hits[genome_ref] = dict()\n genes_with_hits_cnt[genome_ref] = dict()\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[genome_ref] = input_name\n\n try:\n genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n # sci name\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n \n # CDS cnt\n cds_cnt = 0\n for feature in genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n cds_cnt += 1\n genome_CDS_count_by_ref[genome_ref] = cds_cnt\n\n # SEED annotations\n #\n #f_cnt = 0 # DEBUG\n if 'SEED' in namespaces_reading:\n for feature in genome_obj['features']:\n\n # filter out genes that aren't in featureSet\n target_feature = False\n #featureSet_element_id = genome_ref+self.genome_feature_id_delim+feature['id']\n featureSet_element_id = feature['id']\n if featureSet_element_id in featureSet_obj['elements']:\n target_feature = True\n\n #if f_cnt % 100 == 0:\n # self.log (console, \"iterating features: \"+str(f_cnt)) # DEBUG\n\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n #if f_cnt % 100 == 0:\n # self.log (console, \"prot: \"+str(feature['protein_translation'])) # DEBUG\n\n if 'function' in feature and feature['function'] != None and feature['function'] != '':\n gene_name = feature['id']\n \n #if f_cnt % 100 == 0:\n # self.log (console, \"fxn: '\"+str(feature['function'])+\"'\") # DEBUG\n\n # store assignments for gene\n for namespace in ['SEED']:\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if not target_feature:\n continue\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n dom_hits[genome_ref][gene_name][namespace] = dict()\n\n domfam_list = []\n annot_set = feature['function'].strip().split(';')\n for annot in annot_set:\n annot_set_2 = annot.strip().split('@')\n for annot2 in annot_set_2:\n domfam = annot2.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n domfam_list.append(domfam)\n #if f_cnt % 100 == 0:\n # self.log (console, \"domfam: '\"+str(domfam)+\"'\") # DEBUG\n\n if top_hit_flag: # does SEED give more than one function?\n dom_hits[genome_ref][gene_name][namespace][domfam_list[0]] = True\n else:\n for domfam in domfam_list:\n dom_hits[genome_ref][gene_name][namespace][domfam] = True\n\n #f_cnt += 1 # DEBUG\n\n\n # capture domain hits to genes within each namespace\n #\n if params['namespace'] != 'SEED':\n dom_annot_found = dict()\n\n KBASE_DOMAINHIT_GENE_ID_I = 0\n KBASE_DOMAINHIT_GENE_BEG_I = 1 # not used\n KBASE_DOMAINHIT_GENE_END_I = 2 # not used\n KBASE_DOMAINHIT_GENE_STRAND_I = 3 # not used\n KBASE_DOMAINHIT_GENE_HITS_DICT_I = 4\n KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J = 0\n KBASE_DOMAINHIT_GENE_HITS_DICT_END_J = 1\n KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J = 2\n KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J = 3\n KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J = 4\n\n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT A: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n for ws_id in uniq_genome_ws_ids.keys():\n try:\n dom_annot_obj_info_list = wsClient.list_objects({'ids':[ws_id],'type':\"KBaseGeneFamilies.DomainAnnotation\"})\n except Exception as e:\n raise ValueError (\"Unable to list DomainAnnotation objects from workspace: \"+str(ws_id)+\" \"+str(e))\n\n for info in dom_annot_obj_info_list:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n \n dom_annot_ref = str(info[WSID_I])+'/'+str(info[OBJID_I])+'/'+str(info[VERSION_I])\n try:\n domain_data = wsClient.get_objects([{'ref':dom_annot_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch domain annotation: \"+dom_annot_ref)\n\n # read domain data object\n genome_ref = domain_data['genome_ref']\n if genome_ref not in genome_refs:\n continue\n dom_annot_found[genome_ref] = True\n\n if genome_ref not in dom_hits:\n dom_hits[genome_ref] = dict()\n\n if genome_ref not in genes_with_hits_cnt:\n genes_with_hits_cnt[genome_ref] = dict()\n\n for scaffold_id_iter in domain_data['data'].keys():\n for CDS_domain_list in domain_data['data'][scaffold_id_iter]:\n gene_ID = CDS_domain_list[KBASE_DOMAINHIT_GENE_ID_I]\n #gene_name = re.sub ('^'+genome_object_name+'.', '', gene_ID) \n gene_name = gene_ID\n #(contig_name, gene_name) = (gene_ID[0:gene_ID.index(\".\")], gene_ID[gene_ID.index(\".\")+1:])\n #print (\"DOMAIN_HIT: \"+contig_name+\" \"+gene_name) # DEBUG\n #print (\"DOMAIN_HIT for gene: \"+gene_name) # DEBUG\n #gene_beg = CDS_domain_list[KBASE_DOMAINHIT_GENE_BEG_I]\n #gene_end = CDS_domain_list[KBASE_DOMAINHIT_GENE_END_I]\n #gene_strand = CDS_domain_list[KBASE_DOMAINHIT_GENE_STRAND_I]\n gene_hits_dict = CDS_domain_list[KBASE_DOMAINHIT_GENE_HITS_DICT_I]\n\n dom_hits_by_namespace = dict()\n top_hit_evalue_by_namespace = dict()\n top_hit_dom_by_namespace = dict()\n\n for namespace in namespace_classes:\n dom_hits_by_namespace[namespace] = dict()\n top_hit_evalue_by_namespace[namespace] = 100\n top_hit_dom_by_namespace[namespace] = None\n\n for domfam in gene_hits_dict.keys():\n if domfam.startswith('PF'):\n domfam_clean = re.sub('\\.[^\\.]*$','',domfam)\n else:\n domfam_clean = domfam\n known_namespace = False\n for this_namespace in namespace_classes:\n if domfam.startswith(this_namespace):\n namespace = this_namespace\n known_namespace = True\n if not known_namespace:\n continue\n\n for hit in gene_hits_dict[domfam]:\n beg = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J])\n end = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_END_J])\n e_value = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J])\n bit_score = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J])\n aln_perc = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J])\n\n if e_value_thresh != None and e_value > e_value_thresh:\n continue\n if top_hit_flag:\n if top_hit_dom_by_namespace[namespace] == None \\\n or top_hit_evalue_by_namespace[namespace] > e_value:\n top_hit_dom_by_namespace[namespace] = domfam_clean\n top_hit_evalue_by_namespace[namespace] = e_value\n \n dom_hits_by_namespace[namespace][domfam_clean] = True\n\n # store assignments for gene\n for namespace in namespace_classes:\n if namespace == 'SEED':\n continue\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n if dom_hits_by_namespace[namespace]:\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n # filter out genes that aren't in featureSet\n #featureSet_element_id = genome_ref+self.genome_feature_id_delim+gene_ID\n featureSet_element_id = gene_ID\n if featureSet_element_id not in featureSet_obj['elements']:\n continue\n \n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n \n if top_hit_flag:\n dom_hits[genome_ref][gene_name][namespace] = { top_hit_dom_by_namespace[namespace]: True }\n else:\n dom_hits[genome_ref][gene_name][namespace] = dom_hits_by_namespace[namespace]\n\n # make sure we have domain annotations for all genomes\n missing_annot = []\n for genome_ref in genome_refs:\n if genome_ref not in dom_annot_found:\n missing_annot.append(\"\\t\"+'MISSING DOMAIN ANNOTATION FOR: '+genome_ref)\n if missing_annot:\n error_msg = \"ABORT: You must run the DomainAnnotation App first\\n\"\n error_msg += \"\\n\".join(missing_annot)\n raise ValueError (error_msg)\n \n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT B: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n \n # calculate table\n #\n table_data = dict()\n INSANE_VALUE = 10000000000000000\n overall_low_val = INSANE_VALUE\n overall_high_val = -INSANE_VALUE\n\n # count raw\n for genome_ref in genome_refs:\n if genome_ref not in table_data:\n table_data[genome_ref] = dict()\n for cat in cats:\n table_data[genome_ref][cat] = 0\n\n # custom\n if params['namespace'] == 'custom':\n for cat in cats:\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n if cat in dom_hits[genome_ref][gene_name][namespace]:\n table_data[genome_ref][cat] += 1\n\n # high level summation\n else:\n namespace = params['namespace']\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n for domfam in dom_hits[genome_ref][gene_name][namespace].keys():\n #self.log(console, \"DOMFAM: '\"+str(domfam)+\"'\") # DEBUG\n\n if domfam in domfam2cat[namespace]:\n cat = domfam2cat[namespace][domfam]\n #self.log(console, \"CAT: '\"+str(cat)+\"'\") # DEBUG\n if cat in cats:\n #self.log(console, \"CAT_FOUND: '\"+str(cat)+\"'\") # DEBUG\n table_data[genome_ref][cat] += 1\n \n # adjust to percs\n if params['count_category'].startswith('perc'):\n for genome_ref in genome_refs:\n for cat in cats:\n if params['count_category'] == 'perc_annot':\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n else:\n namespace = params['namespace']\n total_genes = genes_with_hits_cnt[genome_ref][namespace]\n else:\n total_genes = genome_CDS_count_by_ref[genome_ref]\n\n table_data[genome_ref][cat] /= float(total_genes)\n table_data[genome_ref][cat] *= 100.0\n\n # determine high and low val\n for genome_ref in genome_refs:\n for cat in cats:\n val = table_data[genome_ref][cat]\n if val == 0: continue\n #self.log (console, \"HIGH VAL SCAN CAT: '\"+cat+\"' VAL: '\"+str(val)+\"'\") # DEBUG\n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n if val > overall_high_val:\n overall_high_val = val\n if val < overall_low_val:\n overall_low_val = val\n if overall_high_val == -INSANE_VALUE:\n raise ValueError (\"unable to find any counts\")\n\n\n # determine cats with a value and build group\n #\n cat_seen = dict()\n group_size = dict()\n group_size_with_blanks = dict()\n group_order = []\n group_order_with_blanks = []\n for cat in cats:\n cat_seen[cat] = False\n if params['namespace'] == 'custom':\n # get cats seen and group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n else:\n namespace = params['namespace']\n # get group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != None and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n\n\n # build report\n #\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n\n\n # build html report\n sp = '&nbsp;'\n text_color = \"#606060\"\n text_color_2 = \"#606060\"\n head_color_1 = \"#eeeeee\"\n head_color_2 = \"#eeeeee\"\n border_color = \"#cccccc\"\n border_cat_color = \"#ffccff\"\n #graph_color = \"lightblue\"\n #graph_width = 100\n #graph_char = \".\"\n graph_char = sp\n color_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e']\n max_color = len(color_list)-1\n cat_disp_trunc_len = 40\n cell_width = '10px'\n if len(genome_refs) > 20:\n graph_gen_fontsize = \"1\"\n elif len(genome_refs) > 10:\n graph_gen_fontsize = \"2\"\n else:\n graph_gen_fontsize = \"3\"\n if len(cats) > 20:\n graph_cat_fontsize = \"1\"\n elif len(cats) > 5:\n graph_cat_fontsize = \"2\"\n else:\n graph_cat_fontsize = \"3\"\n if int(graph_cat_fontsize) < int(graph_gen_fontsize):\n cell_fontsize = graph_gen_fontsize = graph_cat_fontsize\n else:\n cell_fontsize = graph_cat_fontsize = graph_gen_fontsize\n graph_padding = \"5\"\n graph_spacing = \"3\"\n #border = \"1\"\n border = \"0\"\n #row_spacing = \"-2\"\n num_rows = len(genome_refs)\n show_groups = False\n if len(group_order) > 0: show_groups = True\n\n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Functional Domain Profile</title>']\n html_report_lines += ['<style>']\n html_report_lines += [\".vertical-text {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 0.65em;\\n}\\n.vertical-text__inner {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.1;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += [\".vertical-text_title {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 1.0em;\\n}\\n.vertical-text__inner_title {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.0;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner_title:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += ['</style>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n\n # genomes as rows\n if 'vertical' in params and params['vertical'] == \"1\":\n # table header\n html_report_lines += ['<table cellpadding='+graph_padding+' cellspacing='+graph_spacing+' border='+border+'>']\n corner_rowspan = \"1\"\n if show_groups: corner_rowspan = \"2\"\n label = ''\n if params['namespace'] != 'custom':\n label = params['namespace']\n if label == 'PF':\n label = 'PFAM'\n elif label == 'TIGR':\n label = 'TIGRFAM'\n html_report_lines += ['<tr><td valign=bottom align=right rowspan='+corner_rowspan+'><div class=\"vertical-text_title\"><div class=\"vertical-text__inner_title\"><font color=\"'+text_color+'\">'+label+'</font></div></div></td>']\n \n # group headers\n if show_groups:\n for cat_group in group_order:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n max_group_width = 3*group_size[cat_group]\n if len(cat_group) > max_group_width:\n new_cat_group_words = []\n sentence_len = 0\n for w_i,word in enumerate(cat_group_words):\n new_cat_group_words.append(word)\n sentence_len += len(word)\n if w_i < len(cat_group_words)-1:\n if sentence_len + 1 + len(cat_group_words[w_i+1]) > max_group_width:\n new_cat_group_words[w_i] += '<br>'\n sentence_len = 0\n cat_group_words = new_cat_group_words\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \" \".join(cat_group_words)\n\n # DEBUG\n #if cat_group not in group_size:\n # self.log(console, \"CAT_GROUP: '\"+str(cat_group)+\"'\") # DEBUG\n # self.log(console, \"CAT_GROUP_DISP: '\"+str(cat_group_disp)+\"'\") # DEBUG\n # for cg in group_size:\n # self.log(console, \"CG: '\"+str(cg)+\"'\") # DEBUG\n\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white colspan='+str(group_size[cat_group])+'></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_1+'\"valign=middle align=center colspan='+str(group_size[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n html_report_lines += ['</tr><tr>']\n\n # column headers\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub (\"\\d*$\", \"\", cat)\n cell_title = domfam2name[namespace][cat].strip()\n cat_disp = cat\n cat_disp = re.sub ('^SEED', 'SEED:', cat_disp)\n else:\n cell_title = cat2name[params['namespace']][cat].strip()\n cat_disp = cat\n cat_disp = re.sub (\"TIGR_\", \"\", cat_disp)\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_2+'\"title=\"'+cell_title+'\" valign=bottom align=center>']\n if params['namespace'] != 'COG':\n html_report_lines += ['<div class=\"vertical-text\"><div class=\"vertical-text__inner\">']\n html_report_lines += ['<font color=\"'+text_color_2+'\" size='+graph_cat_fontsize+'><b>']\n #for c_i,c in enumerate(cat_disp):\n # if c_i < len(cat_disp)-1:\n # html_report_lines += [c+'<br>']\n # else:\n # html_report_lines += [c]\n html_report_lines += [cat_disp]\n html_report_lines += ['</b></font>']\n if params['namespace'] != 'COG':\n html_report_lines += ['</div></div>']\n html_report_lines += ['</td>']\n html_report_lines += ['</tr>']\n \n # rest of rows\n for genome_ref in genome_refs:\n genome_sci_name = genome_sci_name_by_ref[genome_ref]\n html_report_lines += ['<tr>']\n html_report_lines += ['<td align=right><font color=\"'+text_color+'\" size='+graph_gen_fontsize+'><b><nobr>'+genome_sci_name+'</nobr></b></font></td>']\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n val = table_data[genome_ref][cat]\n if val == 0:\n cell_color = 'white'\n else: \n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n cell_color_i = max_color - int(round(max_color * (val-overall_low_val) / float(overall_high_val-overall_low_val)))\n c = color_list[cell_color_i]\n cell_color = '#'+c+c+c+c+'FF'\n\n if params['count_category'].startswith('perc'):\n cell_val = str(\"%.3f\"%table_data[genome_ref][cat])\n cell_val += '%'\n else:\n cell_val = str(table_data[genome_ref][cat])\n\n if 'heatmap' in params and params['heatmap'] == '1':\n if table_data[genome_ref][cat] == 0:\n this_text_color = text_color\n #this_graph_char = \"0\"\n this_graph_char = sp\n else:\n this_text_color = cell_color\n this_graph_char = graph_char\n html_report_lines += ['<td align=center valign=middle title=\"'+cell_val+'\" style=\"width:'+cell_width+'\" bgcolor=\"'+cell_color+'\"><font color=\"'+this_text_color+'\" size='+cell_fontsize+'>'+this_graph_char+'</font></td>']\n else:\n html_report_lines += ['<td align=center valign=middle style=\"'+cell_width+'; border-right:solid 2px '+border_color+'; border-bottom:solid 2px '+border_color+'\"><font color=\"'+text_color+'\" size='+cell_fontsize+'>'+cell_val+'</font></td>']\n\n html_report_lines += ['</tr>']\n html_report_lines += ['</table>']\n\n # genomes as columns\n else:\n raise ValueError (\"Do not yet support Genomes as columns\")\n\n\n # key table\n html_report_lines += ['<p>']\n html_report_lines += ['<table cellpadding=3 cellspacing=2 border='+border+'>']\n html_report_lines += ['<tr><td valign=middle align=left colspan=3 style=\"border-bottom:solid 4px '+border_color+'\"><font color=\"'+text_color+'\"><b>KEY</b></font></td></tr>']\n\n if show_groups:\n group_cat_i = 0\n for cat_group in group_order_with_blanks:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \"&nbsp;<br>\".join(cat_group_words)\n cat_group_disp += sp\n\n html_report_lines += ['<tr>']\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white rowspan='+str(group_size_with_blanks[cat_group])+' style=\"border-right:solid 4px '+border_color+'\"></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 4px '+border_color+'\" valign=top align=right rowspan='+str(group_size_with_blanks[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n # add first cat for group\n first_cat = cats[group_cat_i]\n cell_color = 'white'\n #if not cat_seen[first_cat] and not show_blanks:\n if not cat_seen[first_cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = first_cat\n if first_cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', first_cat)\n cat_disp = re.sub ('^SEED', 'SEED:', first_cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = first_cat\n desc = cat2name[namespace][first_cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n # add rest of cats in group\n for c_i in range(group_cat_i, group_cat_i+group_size_with_blanks[cat_group]-1):\n cat = cats[c_i]\n cell_color = 'white'\n #if not cat_seen[cat] and not show_blanks:\n if not cat_seen[cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n \n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n else:\n for cat in cats:\n cell_color = 'white'\n if not cat_seen[cat] and not show_blanks:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', domfam)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n\n html_report_lines += ['</table>']\n\n # close\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n html_file = os.path.join (output_dir, 'domain_profile_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n upload_ret = dfu.file_to_shock({'file_path': html_file,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'domain_profile_report.html',\n 'label': 'Functional Domain Profile report'}\n ]\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_fxn_profile_featureSet\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_fxn_profile_featureSet return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_fxn_profile_phylo(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_fxn_profile_phylo_Input\"\n (view_fxn_profile_phylo() ** ** show a table/heatmap of general\n categories or custom gene families for a set of Genomes using the\n species tree) -> structure: parameter \"workspace_name\" of type\n \"workspace_name\" (** Common types), parameter\n \"input_speciesTree_ref\" of type \"data_obj_ref\", parameter\n \"namespace\" of String, parameter \"custom_target_fams\" of type\n \"CustomTargetFams\" (parameter groups) -> structure: parameter\n \"target_fams\" of list of String, parameter\n \"extra_target_fam_groups_COG\" of list of String, parameter\n \"extra_target_fam_groups_PFAM\" of list of String, parameter\n \"extra_target_fam_groups_TIGR\" of list of String, parameter\n \"extra_target_fam_groups_SEED\" of list of String, parameter\n \"count_category\" of String, parameter \"heatmap\" of type \"bool\",\n parameter \"vertical\" of type \"bool\", parameter \"top_hit\" of type\n \"bool\", parameter \"e_value\" of Double, parameter \"log_base\" of\n Double, parameter \"show_blanks\" of type \"bool\"\n :returns: instance of type \"view_fxn_profile_phylo_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_fxn_profile_phylo\n\n ### STEP 0: basic init\n console = []\n self.log(console, 'Running view_fxn_profile_phylo(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_speciesTree_ref',\n 'namespace'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n if params['namespace'] == 'custom':\n if ('custom_target_fams' not in params or not params['custom_target_fams']) \\\n or ( \\\n ('target_fams' not in params['custom_target_fams'] or not params['custom_target_fams']['target_fams']) \\\n and ('extra_target_fam_groups_COG' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_COG']) \\\n and ('extra_target_fam_groups_PFAM' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_PFAM']) \\\n and ('extra_target_fam_groups_TIGR' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_TIGR']) \\\n and ('extra_target_fam_groups_SEED' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_SEED'])\n ):\n \n raise ValueError (\"Must define either param: 'target_fams' or 'extra_target_fam_groups' if using CUSTOM targets\")\n\n # base config\n namespace_classes = ['COG', 'PF', 'TIGR', 'SEED']\n show_blanks = False\n if 'show_blanks' in params and params['show_blanks'] == '1':\n show_blanks = True\n e_value_thresh = None\n if 'e_value' in params and params['e_value'] != None and params['e_value'] != '':\n e_value_thresh = float (params['e_value'])\n top_hit_flag = False\n if 'top_hit' in params and params['top_hit'] != None and params['top_hit'] != '' and params['top_hit'] != 0:\n top_hit_flag = True\n\n domain_desc_basepath = os.path.abspath('/kb/module/data/domain_desc')\n domain_to_cat_map_path = dict()\n domain_cat_names_path = dict()\n domain_fam_names_path = dict()\n domain_to_cat_map_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_cat_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014_funcat.tsv')\n domain_fam_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_to_cat_map_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_cat_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans_names.tsv')\n domain_fam_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_to_cat_map_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_cat_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrrole2go.txt')\n #domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrfams2go.txt')\n domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_to_cat_map_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_funcat.txt')\n #domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_fam_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_speciesTree_ref'])]\n\n\n # set the output paths\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n html_output_dir = os.path.join(output_dir,'html_output')\n if not os.path.exists(html_output_dir):\n os.makedirs(html_output_dir)\n\n\n # configure categories\n #\n cats = []\n cat2name = dict()\n cat2group = dict()\n domfam2cat = dict()\n cat2domfams = dict()\n namespaces_reading = dict()\n\n # categories are high-level summations\n if params['namespace'] != 'custom':\n for namespace in ['COG','PF','TIGR','SEED']:\n if params['namespace'] == namespace:\n namespaces_reading[namespace] = True\n\n # read all mappings between groups and domfams\n for namespace in ['COG','PF','TIGR','SEED']:\n\n cat2name[namespace] = dict()\n cat2group[namespace] = dict()\n domfam2cat[namespace] = dict()\n cat2domfams[namespace] = dict()\n\n # get high-level cats\n tigrrole_id2cat = dict()\n with open (domain_cat_names_path[namespace], 'r', 0) as dom_cat_handle:\n for line in dom_cat_handle.readlines():\n line = line.strip()\n \n if namespace == 'COG':\n [cat, cat_group, cat_name] = line.split(\"\\t\")[0:3]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'PF':\n [cat, cat_name] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = None\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [cat, cat_id, cat_group, cat_name_plus_go_terms] = line.split(\"\\t\")[0:4]\n tigrrole_id2cat[cat_id] = cat\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_name = re.sub (' *\\> GO:.*$', '', cat_name_plus_go_terms)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n # DEBUG\n #self.log(console, \"CAT: '\"+str(cat)+\"' NAME: '\"+str(cat_name)+\"' GROUP: '\"+str(cat_group)+\"'\")\n\n elif namespace == 'SEED':\n #[cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n [cat_group, cat] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_disp = re.sub ('_', ' ', cat)\n cat2name[namespace][cat] = cat_disp\n cat2group[namespace][cat] = cat_group\n\n # get domfam to cat map, and vice versa\n with open (domain_to_cat_map_path[namespace], 'r', 0) as dom2cat_map_handle:\n for line in dom2cat_map_handle.readlines():\n line = line.strip()\n\n if namespace == 'COG':\n [domfam, cat_str, cat_name] = line.split(\"\\t\")[0:3]\n cat = cat_str[0] # only use first cat\n\n elif namespace == 'PF':\n [domfam, cat, cat_name, dom_id, dom_name] = line.split(\"\\t\")[0:5]\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if cat_id != '' and int(cat_id) != 0 and cat_id in tigrrole_id2cat:\n cat = tigrrole_id2cat[cat_id]\n else:\n continue\n\n elif namespace == 'SEED':\n [cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n\n domfam2cat[namespace][domfam] = cat\n if cat not in cat2domfams[namespace]:\n cat2domfams[namespace][cat] = []\n cat2domfams[namespace][cat].append(domfam)\n\n # custom domains\n if params['namespace'] == 'custom':\n\n # add target fams\n target_fams = []\n if 'target_fams' in params['custom_target_fams'] and params['custom_target_fams']['target_fams']:\n for target_fam in params['custom_target_fams']['target_fams']:\n target_fam = target_fam.strip()\n if target_fam == '':\n continue\n\n target_fam = re.sub (\"^cog\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^pf\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^tigr\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^seed\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PFAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P-FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P_FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGRFAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n\n target_fam = re.sub (\"^COG:\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG-\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG_\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG *\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF:\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF-\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF_\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF *\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR:\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR *\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED:\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED-\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED_\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED *\", \"SEED\", target_fam, flags=re.IGNORECASE)\n\n num_id_len = dict()\n num_id_len['COG'] = 4\n num_id_len['PF'] = 5\n num_id_len['TIGR'] = 5\n\n #self.log (console, \"TARGET_FAM A: '\"+target_fam+\"'\") # DEBUG\n \n if target_fam.startswith('SEED'):\n namespaces_reading['SEED'] = True\n target_fam = target_fam.strip()\n target_fam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' ', '_', target_fam)\n else:\n namespace_found = False\n for namespace_iter in ['COG','PF','TIGR']:\n if target_fam.startswith(namespace_iter):\n this_namespace = namespace_iter\n namespaces_reading[this_namespace] = True\n target_fam = re.sub(this_namespace, \"\", target_fam)\n namespace_found = True\n break\n if not namespace_found:\n raise ValueError (\"unrecognized custom domain family: '\"+str(target_fam)+\"'\")\n leading_zeros = ''\n for c_i in range(num_id_len[this_namespace] - len(str(target_fam))):\n leading_zeros += '0'\n target_fam = this_namespace + leading_zeros + target_fam\n\n #self.log (console, \"TARGET_FAM B: '\"+target_fam+\"'\") # DEBUG\n\n target_fams.append(target_fam)\n\n # add extra target fams\n extra_target_fams = []\n extra_target_fam_groups = []\n domfam2group = dict()\n for target_set in ['extra_target_fam_groups_COG', 'extra_target_fam_groups_PFAM', 'extra_target_fam_groups_TIGR', 'extra_target_fam_groups_SEED']:\n if target_set in params['custom_target_fams'] and params['custom_target_fams'][target_set]:\n extra_target_fam_groups.extend (params['custom_target_fams'][target_set])\n\n if extra_target_fam_groups:\n for target_group in extra_target_fam_groups:\n target_group = target_group.strip()\n if target_group == '':\n continue\n\n namespace = re.sub (\":.*$\", \"\", target_group)\n namespaces_reading[namespace] = True\n\n if namespace == 'COG':\n this_group = re.sub (\"COG: \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'PF':\n this_group = re.sub (\"PF: Clan \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'TIGR':\n this_group = re.sub (\"TIGR: role:\", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n this_group = 'TIGR_role:'+this_group\n elif namespace == 'SEED':\n this_group = re.sub (\"SEED: \", \"\", target_group)\n\n for domfam in cat2domfams[namespace][this_group]:\n extra_target_fams.append(domfam)\n domfam2group[domfam] = target_group\n\n # we have our targets\n cats = target_fams + extra_target_fams\n\n # store names of targets\n domfam2name = dict()\n for namespace in namespaces_reading.keys():\n domfam2name[namespace] = dict()\n\n if namespace == 'COG':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, cat_class, domfam_name] = line.split(\"\\t\")[0:3]\n domfam2name[namespace][domfam] = domfam_name\n\n elif namespace == 'PF':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, class_id, class_name, domfam_id, domfam_name] = line.split(\"\\t\")[0:5]\n if domfam_name.startswith(domfam_id):\n combo_name = domfam_name\n else:\n combo_name = domfam_id+': '+domfam_name\n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'TIGR':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if domfam_name != '':\n if domfam_desc.startswith(domfam_name):\n combo_name = domfam_desc\n else:\n combo_name = domfam_name+': '+domfam_desc\n else:\n if domfam_desc.startswith(domfam_id):\n combo_name = domfam_desc\n else:\n combo_name = domfam_id+': '+domfam_desc\n if ec_id != '':\n combo_name += ' (EC '+ec_id+')'\n \n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'SEED':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [level1, level2, level3, domfam] = line.split(\"\\t\")[0:4]\n\n domfam_desc = domfam\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n if domfam in domfam2name[namespace]:\n if len(domfam_desc) > len(domfam2name[namespace][domfam]):\n domfam2name[namespace][domfam] = domfam_desc\n else:\n domfam2name[namespace][domfam] = domfam_desc\n\n # just in case\n elif params['namespace'] != 'COG' \\\n and params['namespace'] != 'PF' \\\n and params['namespace'] != 'TIGR' \\\n and params['namespace'] != 'SEED':\n raise ValueError (\"Unknown namespace: '\"+str(params['namespace'])+\"'\")\n\n\n # get speciesTree\n #\n input_ref = params['input_speciesTree_ref']\n speciesTree_name = None\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n speciesTree_name = input_obj_info[NAME_I]\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseTrees.Tree\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n speciesTree_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch speciesTree: \"+input_ref)\n\n\n # get genome_refs from speciesTree and instantiate ETE3 tree and order\n #\n genome_refs = []\n genome_id_by_ref = dict()\n genome_ref_by_id = dict()\n for genome_id in speciesTree_obj['default_node_labels'].keys():\n genome_ref = speciesTree_obj['ws_refs'][genome_id]['g'][0]\n genome_id_by_ref[genome_ref] = genome_id\n genome_ref_by_id[genome_id] = genome_ref\n\n species_tree = ete3.Tree(speciesTree_obj['tree'])\n species_tree.ladderize()\n for genome_id in species_tree.get_leaf_names():\n genome_refs.append(genome_ref_by_id[genome_id])\n\n\n # get object names, sci names, protein-coding gene counts, and SEED annot\n #\n genome_obj_name_by_ref = dict()\n genome_sci_name_by_ref = dict()\n genome_sci_name_by_id = dict()\n genome_CDS_count_by_ref = dict()\n uniq_genome_ws_ids = dict()\n\n dom_hits = dict() # initialize dom_hits here because reading SEED within genome\n genes_with_hits_cnt = dict()\n\n for genome_ref in genome_refs:\n\n dom_hits[genome_ref] = dict()\n genes_with_hits_cnt[genome_ref] = dict()\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[genome_ref] = input_name\n\n try:\n genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n # sci name\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n genome_sci_name_by_id[genome_id_by_ref[genome_ref]] = genome_obj['scientific_name']\n \n # CDS cnt\n cds_cnt = 0\n for feature in genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n cds_cnt += 1\n genome_CDS_count_by_ref[genome_ref] = cds_cnt\n\n # SEED annotations\n #\n #f_cnt = 0 # DEBUG\n if 'SEED' in namespaces_reading:\n for feature in genome_obj['features']:\n #if f_cnt % 100 == 0:\n # self.log (console, \"iterating features: \"+str(f_cnt)) # DEBUG\n\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n #if f_cnt % 100 == 0:\n # self.log (console, \"prot: \"+str(feature['protein_translation'])) # DEBUG\n\n if 'function' in feature and feature['function'] != None and feature['function'] != '':\n gene_name = feature['id']\n \n #if f_cnt % 100 == 0:\n # self.log (console, \"fxn: '\"+str(feature['function'])+\"'\") # DEBUG\n\n # store assignments for gene\n for namespace in ['SEED']:\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n dom_hits[genome_ref][gene_name][namespace] = dict()\n\n domfam_list = []\n annot_set = feature['function'].strip().split(';')\n for annot in annot_set:\n annot_set_2 = annot.strip().split('@')\n for annot2 in annot_set_2:\n domfam = annot2.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n domfam_list.append(domfam)\n #if f_cnt % 100 == 0:\n # self.log (console, \"domfam: '\"+str(domfam)+\"'\") # DEBUG\n\n if top_hit_flag: # does SEED give more than one function?\n dom_hits[genome_ref][gene_name][namespace][domfam_list[0]] = True\n else:\n for domfam in domfam_list:\n dom_hits[genome_ref][gene_name][namespace][domfam] = True\n\n #f_cnt += 1 # DEBUG\n\n\n # capture domain hits to genes within each namespace\n #\n if params['namespace'] != 'SEED':\n dom_annot_found = dict()\n\n KBASE_DOMAINHIT_GENE_ID_I = 0\n KBASE_DOMAINHIT_GENE_BEG_I = 1 # not used\n KBASE_DOMAINHIT_GENE_END_I = 2 # not used\n KBASE_DOMAINHIT_GENE_STRAND_I = 3 # not used\n KBASE_DOMAINHIT_GENE_HITS_DICT_I = 4\n KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J = 0\n KBASE_DOMAINHIT_GENE_HITS_DICT_END_J = 1\n KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J = 2\n KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J = 3\n KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J = 4\n\n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT A: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n for ws_id in uniq_genome_ws_ids.keys():\n try:\n dom_annot_obj_info_list = wsClient.list_objects({'ids':[ws_id],'type':\"KBaseGeneFamilies.DomainAnnotation\"})\n except Exception as e:\n raise ValueError (\"Unable to list DomainAnnotation objects from workspace: \"+str(ws_id)+\" \"+str(e))\n\n for info in dom_annot_obj_info_list:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n \n dom_annot_ref = str(info[WSID_I])+'/'+str(info[OBJID_I])+'/'+str(info[VERSION_I])\n try:\n domain_data = wsClient.get_objects([{'ref':dom_annot_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch domain annotation: \"+dom_annot_ref)\n\n # read domain data object\n genome_ref = domain_data['genome_ref']\n if genome_ref not in genome_refs:\n continue\n dom_annot_found[genome_ref] = True\n\n if genome_ref not in dom_hits:\n dom_hits[genome_ref] = dict()\n\n if genome_ref not in genes_with_hits_cnt:\n genes_with_hits_cnt[genome_ref] = dict()\n\n for scaffold_id_iter in domain_data['data'].keys():\n for CDS_domain_list in domain_data['data'][scaffold_id_iter]:\n gene_ID = CDS_domain_list[KBASE_DOMAINHIT_GENE_ID_I]\n #gene_name = re.sub ('^'+genome_object_name+'.', '', gene_ID) \n gene_name = gene_ID\n #(contig_name, gene_name) = (gene_ID[0:gene_ID.index(\".\")], gene_ID[gene_ID.index(\".\")+1:])\n #print (\"DOMAIN_HIT: \"+contig_name+\" \"+gene_name) # DEBUG\n #print (\"DOMAIN_HIT for gene: \"+gene_name) # DEBUG\n #gene_beg = CDS_domain_list[KBASE_DOMAINHIT_GENE_BEG_I]\n #gene_end = CDS_domain_list[KBASE_DOMAINHIT_GENE_END_I]\n #gene_strand = CDS_domain_list[KBASE_DOMAINHIT_GENE_STRAND_I]\n gene_hits_dict = CDS_domain_list[KBASE_DOMAINHIT_GENE_HITS_DICT_I]\n\n dom_hits_by_namespace = dict()\n top_hit_evalue_by_namespace = dict()\n top_hit_dom_by_namespace = dict()\n\n for namespace in namespace_classes:\n dom_hits_by_namespace[namespace] = dict()\n top_hit_evalue_by_namespace[namespace] = 100\n top_hit_dom_by_namespace[namespace] = None\n\n for domfam in gene_hits_dict.keys():\n if domfam.startswith('PF'):\n domfam_clean = re.sub('\\.[^\\.]*$','',domfam)\n else:\n domfam_clean = domfam\n known_namespace = False\n for this_namespace in namespace_classes:\n if domfam.startswith(this_namespace):\n namespace = this_namespace\n known_namespace = True\n if not known_namespace:\n continue\n\n for hit in gene_hits_dict[domfam]:\n beg = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J])\n end = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_END_J])\n e_value = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J])\n bit_score = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J])\n aln_perc = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J])\n\n if e_value_thresh != None and e_value > e_value_thresh:\n continue\n if top_hit_flag:\n if top_hit_dom_by_namespace[namespace] == None \\\n or top_hit_evalue_by_namespace[namespace] > e_value:\n top_hit_dom_by_namespace[namespace] = domfam_clean\n top_hit_evalue_by_namespace[namespace] = e_value\n \n dom_hits_by_namespace[namespace][domfam_clean] = True\n\n # store assignments for gene\n for namespace in namespace_classes:\n if namespace == 'SEED':\n continue\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n if dom_hits_by_namespace[namespace]:\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n \n if top_hit_flag:\n dom_hits[genome_ref][gene_name][namespace] = { top_hit_dom_by_namespace[namespace]: True }\n else:\n dom_hits[genome_ref][gene_name][namespace] = dom_hits_by_namespace[namespace]\n \n # make sure we have domain annotations for all genomes\n missing_annot = []\n for genome_ref in genome_refs:\n if genome_ref not in dom_annot_found:\n missing_annot.append(\"\\t\"+'MISSING DOMAIN ANNOTATION FOR: '+genome_ref)\n if missing_annot:\n error_msg = \"ABORT: You must run the DomainAnnotation App first\\n\"\n error_msg += \"\\n\".join(missing_annot)\n raise ValueError (error_msg)\n \n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT B: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n \n # calculate table\n #\n table_data = dict()\n INSANE_VALUE = 10000000000000000\n overall_low_val = INSANE_VALUE\n overall_high_val = -INSANE_VALUE\n\n # count raw\n for genome_ref in genome_refs:\n if genome_ref not in table_data:\n table_data[genome_ref] = dict()\n for cat in cats:\n table_data[genome_ref][cat] = 0\n\n # custom\n if params['namespace'] == 'custom':\n for cat in cats:\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n if cat in dom_hits[genome_ref][gene_name][namespace]:\n table_data[genome_ref][cat] += 1\n\n # high level summation\n else:\n namespace = params['namespace']\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n for domfam in dom_hits[genome_ref][gene_name][namespace].keys():\n #self.log(console, \"DOMFAM: '\"+str(domfam)+\"'\") # DEBUG\n\n if domfam in domfam2cat[namespace]:\n cat = domfam2cat[namespace][domfam]\n #self.log(console, \"CAT: '\"+str(cat)+\"'\") # DEBUG\n if cat in cats:\n #self.log(console, \"CAT_FOUND: '\"+str(cat)+\"'\") # DEBUG\n table_data[genome_ref][cat] += 1\n \n # adjust to percs\n if params['count_category'].startswith('perc'):\n for genome_ref in genome_refs:\n\n # DEBUG\n #sci_name = genome_sci_name_by_ref[genome_ref]\n #try:\n # total_genes = genes_with_hits_cnt[genome_ref]['COG']\n # print (sci_name +\" (\"+genome_ref+\"): COG OK\")\n #except:\n # print (sci_name +\" (\"+genome_ref+\"): COG MISSING\")\n\n for cat in cats:\n if params['count_category'] == 'perc_annot':\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n else:\n namespace = params['namespace']\n total_genes = genes_with_hits_cnt[genome_ref][namespace]\n else:\n total_genes = genome_CDS_count_by_ref[genome_ref]\n\n table_data[genome_ref][cat] /= float(total_genes)\n table_data[genome_ref][cat] *= 100.0\n\n # determine high and low val\n for genome_ref in genome_refs:\n for cat in cats:\n val = table_data[genome_ref][cat]\n if val == 0: continue\n #self.log (console, \"HIGH VAL SCAN CAT: '\"+cat+\"' VAL: '\"+str(val)+\"'\") # DEBUG\n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n if val > overall_high_val:\n overall_high_val = val\n if val < overall_low_val:\n overall_low_val = val\n if overall_high_val == -INSANE_VALUE:\n raise ValueError (\"unable to find any counts\")\n\n\n # determine cats with a value and build group\n #\n cat_seen = dict()\n group_size = dict()\n group_size_with_blanks = dict()\n group_order = []\n group_order_with_blanks = []\n for cat in cats:\n cat_seen[cat] = False\n if params['namespace'] == 'custom':\n # get group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n else:\n namespace = params['namespace']\n # get group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != None and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n\n\n # Draw tree (we already instantiated Tree above)\n #\n png_file = speciesTree_name+'.png'\n pdf_file = speciesTree_name+'.pdf'\n output_png_file_path = os.path.join(html_output_dir, png_file);\n output_pdf_file_path = os.path.join(html_output_dir, pdf_file);\n\n # init ETE3 accessory objects\n ts = ete3.TreeStyle()\n\n # customize\n ts.show_leaf_name = True\n ts.show_branch_length = False\n ts.show_branch_support = True\n #ts.scale = 50 # 50 pixels per branch length unit\n ts.branch_vertical_margin = 5 # pixels between adjacent branches\n #ts.title.add_face(ete3.TextFace(params['output_name']+\": \"+params['desc'], fsize=10), column=0)\n\n node_style = ete3.NodeStyle()\n node_style[\"fgcolor\"] = \"#606060\" # for node balls\n node_style[\"size\"] = 10 # for node balls (gets reset based on support)\n node_style[\"vt_line_color\"] = \"#606060\"\n node_style[\"hz_line_color\"] = \"#606060\"\n node_style[\"vt_line_width\"] = 2\n node_style[\"hz_line_width\"] = 2\n node_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n node_style[\"hz_line_type\"] = 0\n\n leaf_style = ete3.NodeStyle()\n leaf_style[\"fgcolor\"] = \"#ffffff\" # for node balls\n leaf_style[\"size\"] = 2 # for node balls (we're using it to add space)\n leaf_style[\"vt_line_color\"] = \"#606060\" # unecessary\n leaf_style[\"hz_line_color\"] = \"#606060\"\n leaf_style[\"vt_line_width\"] = 2\n leaf_style[\"hz_line_width\"] = 2\n leaf_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n leaf_style[\"hz_line_type\"] = 0\n\n for n in species_tree.traverse():\n if n.is_leaf():\n style = leaf_style\n genome_id = n.name\n #n.name = genome_sci_name_by_id[genome_id]\n n.name = None\n leaf_name_disp = genome_sci_name_by_id[genome_id]\n n.add_face(ete3.TextFace(leaf_name_disp, fsize=10), column=0, position=\"branch-right\")\n else:\n style = ete3.NodeStyle()\n for k in node_style.keys():\n style[k] = node_style[k]\n\n if n.support > 0.95:\n style[\"size\"] = 6\n elif n.support > 0.90:\n style[\"size\"] = 5\n elif n.support > 0.80:\n style[\"size\"] = 4\n else:\n style[\"size\"] = 2\n\n n.set_style(style)\n\n # save images\n dpi = 300\n img_units = \"in\"\n img_pix_width = 1200\n img_in_width = round(float(img_pix_width)/float(dpi), 1)\n img_html_width = img_pix_width // 2\n species_tree.render(output_png_file_path, w=img_in_width, units=img_units, dpi=dpi, tree_style=ts)\n species_tree.render(output_pdf_file_path, w=img_in_width, units=img_units, tree_style=ts) # dpi irrelevant\n\n\n # build report\n #\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n\n\n # build html report\n sp = '&nbsp;'\n text_color = \"#606060\"\n text_color_2 = \"#606060\"\n head_color_1 = \"#eeeeee\"\n head_color_2 = \"#eeeeee\"\n border_color = \"#cccccc\"\n border_cat_color = \"#ffccff\"\n #graph_color = \"lightblue\"\n #graph_width = 100\n #graph_char = \".\"\n graph_char = sp\n color_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e']\n max_color = len(color_list)-1\n cat_disp_trunc_len = 40\n cell_width = '10px'\n tree_scale_factor = 22.625\n tree_img_height = int(tree_scale_factor*len(genome_refs))\n extra_tree_rows = 3\n if len(genome_refs) > 20:\n graph_gen_fontsize = \"1\"\n elif len(genome_refs) > 10:\n graph_gen_fontsize = \"2\"\n else:\n graph_gen_fontsize = \"3\"\n if len(cats) > 20:\n graph_cat_fontsize = \"1\"\n elif len(cats) > 5:\n graph_cat_fontsize = \"2\"\n else:\n graph_cat_fontsize = \"3\"\n if int(graph_cat_fontsize) < int(graph_gen_fontsize):\n cell_fontsize = graph_gen_fontsize = graph_cat_fontsize\n else:\n cell_fontsize = graph_cat_fontsize = graph_gen_fontsize\n #graph_padding = \"5\"\n graph_padding = \"2\"\n graph_spacing = \"3\"\n #border = \"1\"\n border = \"0\"\n #row_spacing = \"-2\"\n num_rows = len(genome_refs)\n show_groups = False\n if len(group_order) > 0: show_groups = True\n\n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Functional Domain Profile with Species Tree</title>']\n html_report_lines += ['<style>']\n html_report_lines += [\".vertical-text {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 0.65em;\\n}\\n.vertical-text__inner {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.1;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += [\".vertical-text_title {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 1.0em;\\n}\\n.vertical-text__inner_title {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.0;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner_title:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += ['</style>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n\n # genomes as rows\n if 'vertical' in params and params['vertical'] == \"1\":\n # table header\n html_report_lines += ['<table cellpadding='+graph_padding+' cellspacing='+graph_spacing+' border='+border+'>']\n corner_rowspan = \"1\"\n if show_groups: corner_rowspan = \"2\"\n label = ''\n if params['namespace'] != 'custom':\n label = params['namespace']\n if label == 'PF':\n label = 'PFAM'\n elif label == 'TIGR':\n label = 'TIGRFAM'\n html_report_lines += ['<tr><td valign=bottom align=right rowspan='+corner_rowspan+'><div class=\"vertical-text_title\"><div class=\"vertical-text__inner_title\"><font color=\"'+text_color+'\">'+label+'</font></div></div></td>']\n \n # group headers\n if show_groups:\n for cat_group in group_order:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n max_group_width = 3*group_size[cat_group]\n if len(cat_group) > max_group_width:\n new_cat_group_words = []\n sentence_len = 0\n for w_i,word in enumerate(cat_group_words):\n new_cat_group_words.append(word)\n sentence_len += len(word)\n if w_i < len(cat_group_words)-1:\n if sentence_len + 1 + len(cat_group_words[w_i+1]) > max_group_width:\n new_cat_group_words[w_i] += '<br>'\n sentence_len = 0\n cat_group_words = new_cat_group_words\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \" \".join(cat_group_words)\n\n # DEBUG\n #if cat_group not in group_size:\n # self.log(console, \"CAT_GROUP: '\"+str(cat_group)+\"'\") # DEBUG\n # self.log(console, \"CAT_GROUP_DISP: '\"+str(cat_group_disp)+\"'\") # DEBUG\n # for cg in group_size:\n # self.log(console, \"CG: '\"+str(cg)+\"'\") # DEBUG\n\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white colspan='+str(group_size[cat_group])+'></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_1+'\"valign=middle align=center colspan='+str(group_size[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n html_report_lines += ['</tr><tr>']\n\n # column headers\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub (\"\\d*$\", \"\", cat)\n cell_title = domfam2name[namespace][cat].strip()\n cat_disp = cat\n cat_disp = re.sub ('^SEED', 'SEED:', cat_disp)\n else:\n cell_title = cat2name[params['namespace']][cat].strip()\n cat_disp = cat\n cat_disp = re.sub (\"TIGR_\", \"\", cat_disp)\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_2+'\"title=\"'+cell_title+'\" valign=bottom align=center>']\n if params['namespace'] != 'COG':\n html_report_lines += ['<div class=\"vertical-text\"><div class=\"vertical-text__inner\">']\n html_report_lines += ['<font color=\"'+text_color_2+'\" size='+graph_cat_fontsize+'><b>']\n #for c_i,c in enumerate(cat_disp):\n # if c_i < len(cat_disp)-1:\n # html_report_lines += [c+'<br>']\n # else:\n # html_report_lines += [c]\n html_report_lines += [cat_disp]\n html_report_lines += ['</b></font>']\n if params['namespace'] != 'COG':\n html_report_lines += ['</div></div>']\n html_report_lines += ['</td>']\n html_report_lines += ['</tr>']\n \n # add tree image\n html_report_lines += ['<tr><td align=\"left\" valign=\"top\" rowspan='+str(len(genome_refs)+extra_tree_rows)+'><img src=\"'+png_file+'\" height='+str(tree_img_height)+'></td>']\n\n # rest of rows\n for row_i,genome_ref in enumerate(genome_refs):\n genome_sci_name = genome_sci_name_by_ref[genome_ref]\n if row_i > 0:\n html_report_lines += ['<tr>']\n #html_report_lines += ['<td align=right><font color=\"'+text_color+'\" size='+graph_gen_fontsize+'><b><nobr>'+genome_sci_name+'</nobr></b></font></td>']\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n val = table_data[genome_ref][cat]\n if val == 0:\n cell_color = 'white'\n else: \n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n cell_color_i = max_color - int(round(max_color * (val-overall_low_val) / float(overall_high_val-overall_low_val)))\n c = color_list[cell_color_i]\n cell_color = '#'+c+c+c+c+'FF'\n\n if params['count_category'].startswith('perc'):\n cell_val = str(\"%.3f\"%table_data[genome_ref][cat])\n cell_val += '%'\n else:\n cell_val = str(table_data[genome_ref][cat])\n\n if 'heatmap' in params and params['heatmap'] == '1':\n if table_data[genome_ref][cat] == 0:\n this_text_color = text_color\n #this_graph_char = \"0\"\n this_graph_char = sp\n else:\n this_text_color = cell_color\n this_graph_char = graph_char\n html_report_lines += ['<td align=center valign=middle title=\"'+cell_val+'\" style=\"width:'+cell_width+'\" bgcolor=\"'+cell_color+'\"><font color=\"'+this_text_color+'\" size='+cell_fontsize+'>'+this_graph_char+'</font></td>']\n else:\n html_report_lines += ['<td align=center valign=middle style=\"'+cell_width+'; border-right:solid 2px '+border_color+'; border-bottom:solid 2px '+border_color+'\"><font color=\"'+text_color+'\" size='+cell_fontsize+'>'+cell_val+'</font></td>']\n\n html_report_lines += ['</tr>']\n # add extra blank rows to extend tree rule below grid\n for row_i in range(extra_tree_rows):\n html_report_lines += ['<tr><td bgcolor=\"white\" style=\"width:10px\"><font color=\"white\" size='+cell_fontsize+'>'+sp+'</font></td></tr>']\n\n html_report_lines += ['</table>']\n\n # genomes as columns\n else:\n raise ValueError (\"Do not yet support Genomes as columns\")\n\n\n # key table\n html_report_lines += ['<p>']\n html_report_lines += ['<table cellpadding=3 cellspacing=2 border='+border+'>']\n html_report_lines += ['<tr><td valign=middle align=left colspan=3 style=\"border-bottom:solid 4px '+border_color+'\"><font color=\"'+text_color+'\"><b>KEY</b></font></td></tr>']\n\n if show_groups:\n group_cat_i = 0\n for cat_group in group_order_with_blanks:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \"&nbsp;<br>\".join(cat_group_words)\n cat_group_disp += sp\n\n html_report_lines += ['<tr>']\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white rowspan='+str(group_size_with_blanks[cat_group])+' style=\"border-right:solid 4px '+border_color+'\"></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 4px '+border_color+'\" valign=top align=right rowspan='+str(group_size_with_blanks[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n\n # DEBUG\n #self.log (console, \"CAT GROUP: '\"+cat_group+\"' SIZE: '\"+str(group_size_with_blanks[cat_group])+\"'\")\n\n # add first cat for group\n first_cat = cats[group_cat_i]\n cell_color = 'white'\n #if not cat_seen[first_cat] and not show_blanks:\n if not cat_seen[first_cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = first_cat\n if first_cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', first_cat)\n cat_disp = re.sub ('^SEED', 'SEED:', first_cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = first_cat\n desc = cat2name[namespace][first_cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n # add rest of cats in group\n for c_i in range(group_cat_i, group_cat_i+group_size_with_blanks[cat_group]-1):\n cat = cats[c_i]\n cell_color = 'white'\n #if not cat_seen[cat] and not show_blanks:\n if not cat_seen[cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n \n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n else:\n for cat in cats:\n cell_color = 'white'\n if not cat_seen[cat] and not show_blanks:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', domfam)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n\n html_report_lines += ['</table>']\n\n # close\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n html_file = os.path.join (html_output_dir, 'domain_profile_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n #upload_ret = dfu.file_to_shock({'file_path': html_file,\n upload_ret = dfu.file_to_shock({'file_path': html_output_dir,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'domain_profile_report.html',\n 'label': 'Functional Domain Profile report'}\n ]\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_fxn_profile_phylo\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_fxn_profile_phylo return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_genome_circle_plot(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_genome_circle_plot_Input\"\n (view_genome_circle_plot() ** ** build a circle plot of a\n microbial genome) -> structure: parameter \"workspace_name\" of type\n \"workspace_name\" (** Common types), parameter \"input_genome_ref\"\n of type \"data_obj_ref\"\n :returns: instance of type \"view_genome_circle_plot_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_genome_circle_plot\n #END view_genome_circle_plot\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_genome_circle_plot return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_circle_plot(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_circle_plot_Input\"\n (view_pan_circle_plot() ** ** build a circle plot of a microbial\n genome with its pangenome members) -> structure: parameter\n \"workspace_name\" of type \"workspace_name\" (** Common types),\n parameter \"input_genome_ref\" of type \"data_obj_ref\", parameter\n \"input_pangenome_ref\" of type \"data_obj_ref\", parameter\n \"input_compare_genome_refs\" of type \"data_obj_ref\", parameter\n \"input_outgroup_genome_refs\" of type \"data_obj_ref\", parameter\n \"save_featuresets\" of type \"bool\"\n :returns: instance of type \"view_pan_circle_plot_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_circle_plot\n\n ### STEP 0: basic init\n console = []\n invalid_msgs = []\n self.log(console, 'Running view_pan_circle_plot(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_genome_ref',\n 'input_pangenome_ref'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_genome_ref']),\n str(params['input_pangenome_ref'])\n ]\n\n\n # set the output paths\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n html_output_dir = os.path.join(output_dir,'html_output')\n if not os.path.exists(html_output_dir):\n os.makedirs(html_output_dir)\n\n\n # get base genome\n #\n self.log(console, \"GETTING BASE GENOME OBJECT\")\n genome_sci_name_by_ref = dict()\n base_genome_ref = input_ref = params['input_genome_ref']\n base_genome_obj_name = None\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n base_genome_obj_name = input_obj_info[NAME_I]\n base_genome_obj_name = base_genome_obj_name.replace(\" \",\"_\")\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n try:\n base_genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n genome_sci_name_by_ref[base_genome_ref] = base_genome_obj['scientific_name']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n\n # get pangenome\n #\n self.log(console, \"GETTING PANGENOME OBJECT\")\n input_ref = params['input_pangenome_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n pg_obj_name = input_obj_info[NAME_I]\n pg_obj_name = pg_obj_name.replace(\" \", \"_\")\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Pangenome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n try:\n pg_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n\n # get genome_refs from pangenome and make sure requested genomes are found\n #\n self.log(console, \"READING GENOME REFS IN PANGENOME\")\n pg_genome_refs = pg_obj['genome_refs']\n compare_genome_refs = []\n compare_genomes_cnt = 0\n if 'input_compare_genome_refs' not in params or not params['input_compare_genome_refs']:\n for g_ref in pg_genome_refs:\n if g_ref == base_genome_ref:\n continue\n compare_genome_refs.append(g_ref)\n compare_genomes_cnt += 1\n else:\n for g_ref in params['input_compare_genome_refs']:\n if g_ref == base_genome_ref:\n continue\n compare_genome_refs.append(g_ref)\n compare_genomes_cnt += 1\n\n\n # get outgroup genomes and remove from compare_genomes\n #\n self.log(console, \"REMOVING OUTGROUP GENOME(s) FROM TARGETS\")\n outgroup_genome_refs = []\n outgroup_genome_refs_cnt = 0\n if 'input_outgroup_genome_refs' in params and params['input_outgroup_genome_refs']:\n for genome_ref in params['input_outgroup_genome_refs']:\n outgroup_genome_refs.append(genome_ref)\n outgroup_genome_refs_cnt += 1\n new_compare_genome_refs = []\n compare_genomes_cnt = 0\n for genome_ref in compare_genome_refs:\n if genome_ref not in outgroup_genome_refs:\n new_compare_genome_refs.append(genome_ref)\n compare_genomes_cnt += 1\n compare_genome_refs = new_compare_genome_refs\n\n\n # Make sure all requested genomes are in pangenome\n #\n self.log(console, \"CHECKING FOR REQUESTED GENOMES IN PANGENOME\")\n missing_genomes = []\n for genome_ref in [base_genome_ref] + compare_genome_refs + outgroup_genome_refs:\n if genome_ref not in pg_genome_refs:\n missing_genomes.append(genome_ref)\n if missing_genomes:\n msg = ''\n for genome_ref in missing_genomes:\n msg += \"genome \"+genome_ref+\" not found in pangenome\\n\"\n raise ValueError (msg)\n\n\n # Reorder compare genomes by fractional overlap to base by pangenome\n #\n self.log(console, \"ORDERING TARGET GENOMES BY OVERLAP WITH BASE\")\n compare_genome_cluster_overlap_cnt = dict()\n for genome_ref in compare_genome_refs:\n compare_genome_cluster_overlap_cnt[genome_ref] = 0\n\n for cluster in pg_obj['orthologs']:\n genomes_seen = dict()\n for cluster_member in cluster['orthologs']:\n feature_id = cluster_member[0]\n feature_len_maybe = cluster_member[1]\n genome_ref = cluster_member[2]\n genomes_seen[genome_ref] = True\n if base_genome_ref in genomes_seen:\n for genome_ref in compare_genome_refs:\n if genome_ref in genomes_seen:\n compare_genome_cluster_overlap_cnt[genome_ref] += 1\n\n sorted_compare_genome_refs = sorted(compare_genome_cluster_overlap_cnt, key=compare_genome_cluster_overlap_cnt.__getitem__, reverse=True)\n compare_genome_refs = sorted_compare_genome_refs\n\n\n # Get genome sci names\n #\n self.log(console, \"GETTING GENOME SCIENTIFIC NAMES\")\n for genome_ref in compare_genome_refs + outgroup_genome_refs:\n try:\n genome_obj = wsClient.get_objects([{'ref':genome_ref}])[0]['data']\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n except:\n raise ValueError (\"unable to fetch genome: \"+genome_ref)\n\n\n # Determine singleton, clade-core, universal, and partial pangenome\n # feature sets for base+compare genome set\n # (but not including outgroup genome features)\n #\n self.log(console, \"DETERMINING PANGENOME CATEGORIES OF FEATURES\")\n singleton_featureSet_elements = dict()\n partial_featureSet_elements = dict()\n core_featureSet_elements = dict()\n univ_featureSet_elements = dict()\n for cluster in pg_obj['orthologs']:\n genomes_seen = dict()\n fids_by_genome_ref = dict()\n for cluster_member in cluster['orthologs']:\n feature_id = cluster_member[0]\n feature_len_maybe = cluster_member[1]\n genome_ref = cluster_member[2]\n genomes_seen[genome_ref] = True\n try:\n fid_list = fids_by_genome_ref[genome_ref]\n except:\n fids_by_genome_ref[genome_ref] = []\n fids_by_genome_ref[genome_ref].append(feature_id)\n\n # determine categorization\n hit_cnt = 0\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n if genome_ref in genomes_seen:\n hit_cnt += 1\n if hit_cnt == 0: # nothing within requested genome set\n continue\n elif hit_cnt == 1: # singleton\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n if genome_ref in genomes_seen:\n for fid in fids_by_genome_ref[genome_ref]:\n #featureSet_element_id = genome_ref + self.genome_feature_id_delim + fid\n #singleton_featureSet_elements[featureSet_element_id] = [genome_ref]\n if fid in singleton_featureSet_elements:\n singleton_featureSet_elements[fid].append(genome_ref)\n else:\n singleton_featureSet_elements[fid] = [genome_ref]\n elif hit_cnt < compare_genomes_cnt + 1: # +1: include base genome\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n if genome_ref in genomes_seen:\n for fid in fids_by_genome_ref[genome_ref]:\n #featureSet_element_id = genome_ref + self.genome_feature_id_delim + fid\n #partial_featureSet_elements[featureSet_element_id] = [genome_ref]\n if fid in partial_featureSet_elements:\n partial_featureSet_elements[fid].append(genome_ref)\n else:\n partial_featureSet_elements[fid] = [genome_ref]\n else: # core\n outgroup_hit = False\n for genome_ref in outgroup_genome_refs:\n if genome_ref in genomes_seen:\n outgroup_hit = True\n break\n if outgroup_hit: # universal core\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n #if genome_ref in genomes_seen: # implicit\n for fid in fids_by_genome_ref[genome_ref]:\n #featureSet_element_id = genome_ref + self.genome_feature_id_delim + fid\n #univ_featureSet_elements[featureSet_element_id] = [genome_ref]\n if fid in univ_featureSet_elements:\n univ_featureSet_elements[fid].append(genome_ref)\n else:\n univ_featureSet_elements[fid] = [genome_ref]\n else: # clade-specific core\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n #if genome_ref in genomes_seen: # implicit\n for fid in fids_by_genome_ref[genome_ref]:\n #featureSet_element_id = genome_ref + self.genome_feature_id_delim + fid\n #core_featureSet_elements[featureSet_element_id] = [genome_ref]\n if fid in core_featureSet_elements:\n core_featureSet_elements[fid].append(genome_ref)\n else:\n core_featureSet_elements[fid] = [genome_ref]\n\n\n # Create and save featureSets\n #\n objects_created = []\n if 'save_featuresets' not in params or params['save_featuresets'] == None or params['save_featuresets'] == '' or int(params['save_featuresets']) != 1:\n self.log(console, \"SKIPPING FEATURESETS\")\n else:\n self.log(console, \"SAVING FEATURESETS\")\n\n if singleton_featureSet_elements:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".singleton_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" singleton pangenome features\"\n singleton_obj = { 'description': fs_desc,\n 'elements': singleton_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': singleton_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n singleton_featureSet_elements = {} # free memory\n singleton_obj = {} # free memory\n \n if partial_featureSet_elements:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".non-core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" non-core pangenome features\"\n partial_obj = { 'description': fs_desc,\n 'elements': partial_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': partial_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n partial_featureSet_elements = {} # free memory\n partial_obj = {} # free memory\n\n if core_featureSet_elements:\n if outgroup_genome_refs_cnt == 0:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" core pangenome features\"\n else:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".clade-specific_core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" clade-specific core pangenome features\"\n core_obj = { 'description': fs_desc,\n 'elements': core_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': core_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n core_featureSet_elements = {} # free memory\n core_obj = {} # free memory\n\n if univ_featureSet_elements:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".non-specific_core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" non-specific core pangenome features\"\n univ_obj = { 'description': fs_desc,\n 'elements': univ_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': univ_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n univ_featureSet_elements = {} # free memory\n univ_obj = {} # free memory\n\n\n # Get mapping of base genes to pangenome\n #\n self.log(console, \"DETERMINING MAPPING OF BASE GENES TO PANGENOME\")\n base_to_compare_redundant_map = dict()\n base_singletons = dict()\n base_cores = dict()\n base_universals = dict()\n for cluster in pg_obj['orthologs']:\n genomes_seen = dict()\n base_fids = []\n compare_genomes_seen = []\n outgroup_genomes_seen = []\n for cluster_member in cluster['orthologs']:\n feature_id = cluster_member[0]\n feature_len_maybe = cluster_member[1]\n genome_ref = cluster_member[2]\n genomes_seen[genome_ref] = True\n if genome_ref == base_genome_ref:\n base_fids.append(feature_id)\n if base_genome_ref in genomes_seen:\n universal = True\n core = True\n singleton = True\n for genome_ref in compare_genome_refs:\n if genome_ref in genomes_seen:\n singleton = False\n compare_genomes_seen.append(True)\n else:\n universal = False\n core = False\n compare_genomes_seen.append(False)\n for genome_ref in outgroup_genome_refs:\n if genome_ref in genomes_seen:\n singleton = False\n core = False\n else:\n universal = False\n for base_fid in base_fids:\n base_to_compare_redundant_map[base_fid] = compare_genomes_seen\n if universal:\n base_universals[base_fid] = True\n elif core:\n base_cores[base_fid] = True\n elif singleton:\n base_singletons[base_fid] = True\n \n\n # Get positions of genes in base genome\n #\n self.log(console, \"READING BASE GENOME COORDS\")\n sorted_base_contig_ids = []\n sorted_base_contig_lens = []\n unsorted_contig_lens = dict()\n sorted_contig_order = dict()\n feature_contig_id = dict()\n feature_pos_in_contig = dict()\n feature_order = []\n sum_contig_lens = 0\n\n # hopefully info sitting in Genome obj\n if 'contig_ids' in base_genome_obj and base_genome_obj['contig_ids'] != None:\n for contig_i,contig_id in enumerate(base_genome_obj['contig_ids']):\n contig_len = base_genome_obj['contig_lengths'][contig_i]\n unsorted_contig_lens[contig_id] = contig_len\n sum_contig_lens += contig_len\n\n # otherwise have to get contig ids from Assembly or ContigSet obj\n else: \n # Get genome_assembly_refs\n base_genome_assemby_ref = None\n base_genome_assembly_type = None\n if ('contigset_ref' not in base_genome_obj or base_genome_obj['contigset_ref'] == None) \\\n and ('assembly_ref' not in base_genome_obj or base_genome_obj['assembly_ref'] == None):\n msg = \"Genome \"+base_genome_obj_name+\" (ref:\"+base_genome_ref+\") \"+genome_sci_name_by_ref[base_genome_ref]+\" MISSING BOTH contigset_ref AND assembly_ref. Cannot process. Exiting.\"\n self.log(console, msg)\n #self.log(invalid_msgs, msg)\n #continue\n raise ValueError (msg)\n elif 'assembly_ref' in base_genome_obj and base_genome_obj['assembly_ref'] != None:\n msg = \"Genome \"+base_genome_obj_name+\" (ref:\"+base_genome_ref+\") \"+genome_sci_name_by_ref[base_genome_ref]+\" USING assembly_ref: \"+str(base_genome_obj['assembly_ref'])\n self.log (console, msg)\n base_genome_assembly_ref = base_genome_obj['assembly_ref']\n base_genome_assembly_type = 'assembly'\n elif 'contigset_ref' in base_genome_obj and base_genome_obj['contigset_ref'] != None:\n msg = \"Genome \"+base_genome_obj_name+\" (ref:\"+base_genome_ref+\") \"+genome_sci_name_by_ref[base_genome_ref]+\" USING contigset_ref: \"+str(base_genome_obj['contigset_ref'])\n self.log (console, msg)\n base_genome_assembly_ref = base_genome_obj['contigset_ref']\n base_genome_assembly_type = 'contigset'\n\n # get assembly obj and read contig ids and lengths (both contigset obj and assembly obj have list of contigs that \n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n #objects_list = wsClient.get_objects2({'objects':[{'ref':input_ref}]})['data']\n ass_obj = wsClient.get_objects([{'ref':base_genome_assembly_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch assembly: \"+base_genome_assembly_ref)\n\n if base_genome_assembly_type == 'assembly':\n for contig_key in ass_obj['contigs'].keys():\n contig_id = ass_obj['contigs'][contig_key]['contig_id']\n contig_len = ass_obj['contigs'][contig_key]['length']\n #print (\"CONTIG_ID: '\"+str(contig_id)+\"' CONTIG_LEN: '\"+str(contig_len)+\"'\\n\") # DEBUG\n unsorted_contig_lens[contig_id] = contig_len\n sum_contig_lens += contig_len\n else: # contigset obj\n for contig in ass_obj['contigs']:\n contig_id = contig['id']\n contig_len = contig['length']\n unsorted_contig_lens[contig_id] = contig_len\n sum_contig_lens += contig_len\n \n # order contigs by length and store by contig_id\n for order_i,contig_id in enumerate(sorted(unsorted_contig_lens, key=unsorted_contig_lens.__getitem__, reverse=True)):\n #print (\"STORING CONTIG ORDER: '\"+str(order_i)+\"' for CONTIG_ID: '\"+str(contig_id)+\"'\\n\") # DEBUG\n sorted_contig_order[contig_id] = order_i\n sorted_base_contig_ids.append(contig_id)\n sorted_base_contig_lens.append(unsorted_contig_lens[contig_id])\n feature_order.append([])\n\n for feature in base_genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n fid = feature['id']\n #print (\"FEATURE_ID: '\"+str(fid)+\"'\\n\") # DEBUG\n feature_contig_id[fid] = feature['location'][0][0]\n beg = feature['location'][0][1]\n strand = feature['location'][0][2]\n len = feature['location'][0][3]\n if strand == '-':\n feature_pos_in_contig[fid] = beg - int(len/2)\n else:\n feature_pos_in_contig[fid] = beg + int(len/2)\n contig_i = sorted_contig_order[feature_contig_id[fid]]\n feature_order[contig_i].append(fid)\n\n\n # Draw Circle Plot with matplotlib\n #\n self.log(console, \"CREATING CIRCLE PLOT\")\n img_dpi = 200\n img_units = \"in\"\n img_pix_width = 2000\n img_in_width = round(float(img_pix_width) / float(img_dpi), 2)\n img_html_width = img_pix_width // 4\n\n #genome_ring_scale_factor = 0.8\n genome_ring_scale_factor = 1.0 / compare_genomes_cnt\n #img_pix_width = img_dpi * compare_genomes_cnt * genome_ring_scale_factor\n\n origin_gap_angle = 20\n mark_width = 0.1\n ellipse_to_circle_scaling = 1.0\n ellipse_center_x = 0.50\n ellipse_center_y = 0.50\n ellipse_center = (ellipse_center_x, ellipse_center_y)\n lw_to_coord_scale = 0.005\n max_unscaled_rings = 4\n unscaled_ring_lw = 30\n outer_ring_radius = 0.8\n min_inner_radius = 0.3\n\n if compare_genomes_cnt <= max_unscaled_rings:\n gene_bar_lw = unscaled_ring_lw\n inner_radius = outer_ring_radius - lw_to_coord_scale * compare_genomes_cnt * gene_bar_lw\n else:\n inner_radius = min_inner_radius\n gene_bar_lw = genome_ring_scale_factor * (outer_ring_radius - min_inner_radius) / lw_to_coord_scale\n #genome_ring_spacing = 0.05 * gene_bar_lw\n genome_ring_spacing = 0.0\n gene_bar_lw -= genome_ring_spacing\n #self.log(console, \"gene_bar_lw: \"+str(gene_bar_lw)) # DEBUG\n #self.log(console, \"genome_ring_spacing: \"+str(genome_ring_spacing)) # DEBUG\n #self.log(console, \"inner_radius: \"+str(inner_radius)) # DEBUG\n #genome_ring_spacing = 0.05 * gene_bar_lw\n #genome_ring_spacing = 0.3 * gene_bar_lw\n #lw_to_coord_scale = 0.1\n base_singleton_color = \"red\"\n base_core_color = \"magenta\"\n #hit_core_color = \"darkmagenta\"\n hit_core_color = \"magenta\"\n #base_univ_color = \"blue\"\n base_univ_color = \"darkblue\"\n hit_univ_color = \"darkblue\"\n #base_nonspecific_core_color = \"purple\"\n #hit_nonspecific_core_color = \"purple\"\n base_nonspecific_core_color = \"darkblue\"\n hit_nonspecific_core_color = \"darkblue\"\n #base_partial_color = \"cyan\"\n #hit_partial_color = \"deepskyblue\"\n base_partial_color = \"deepskyblue\"\n #hit_partial_color = \"gray\" # too dark\n hit_partial_color = \"lightgray\"\n\n # Build image\n fig = pyplot.figure()\n fig.set_size_inches(img_in_width, img_in_width)\n ax = pyplot.subplot2grid( (1,1), (0,0), rowspan=1, colspan=1 )\n\n # Let's turn off visibility of all tic labels and boxes here\n for ax in fig.axes:\n ax.xaxis.set_visible(False) # remove axis labels and tics\n ax.yaxis.set_visible(False)\n for t in ax.get_xticklabels()+ax.get_yticklabels(): # remove tics\n t.set_visible(False)\n ax.spines['top'].set_visible(False) # Get rid of top axis line\n ax.spines['bottom'].set_visible(False) # bottom axis line\n ax.spines['left'].set_visible(False) # left axis line\n ax.spines['right'].set_visible(False) # right axis line\n\n # Add marks for genomes\n ax = fig.axes[0]\n base_contig_pos = 0\n for contig_i,contig_feature_order in enumerate(feature_order):\n if contig_i > 0:\n base_contig_pos += sorted_base_contig_lens[contig_i-1]\n\n # use base genome for angle\n #\n for fid in contig_feature_order:\n\n # base genome ring color\n if fid in base_singletons:\n gene_color = base_singleton_color\n this_mark_width = 2* mark_width\n z_level = 4\n elif fid in base_cores:\n gene_color = base_core_color\n hit_gene_color = hit_core_color\n this_mark_width = mark_width\n z_level = 3\n elif fid in base_universals:\n if outgroup_genome_refs_cnt == 0:\n gene_color = base_nonspecific_core_color\n hit_gene_color = hit_nonspecific_core_color\n else:\n gene_color = base_univ_color\n hit_gene_color = hit_univ_color\n this_mark_width = mark_width\n z_level = 2\n else:\n gene_color = base_partial_color\n hit_gene_color = hit_partial_color\n this_mark_width = mark_width\n z_level = 1\n gene_pos = base_contig_pos + feature_pos_in_contig[fid]\n \n arc_beg = 90 - origin_gap_angle/2.0 - (360-origin_gap_angle) * (float(gene_pos) / float(sum_contig_lens)) - this_mark_width\n arc_end = 90 - origin_gap_angle/2.0 - (360-origin_gap_angle) * (float(gene_pos) / float(sum_contig_lens)) + this_mark_width\n\n\n # draw base genome gene\n #gene_bar_radius = inner_radius + 0.5*gene_bar_lw*lw_to_coord_scale\n\n # old (with base in center)\n #gene_bar_radius = inner_radius\n # new (with base on outside)\n #gene_bar_radius = inner_radius + 0.5*(compare_genomes_cnt)*(gene_bar_lw+genome_ring_spacing)*lw_to_coord_scale\n #gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt + 0.5) * (gene_bar_lw+genome_ring_spacing)\n #gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-1) * (gene_bar_lw+genome_ring_spacing) + lw_to_coord_scale * (this_gene_bar_lw+genome_ring_spacing)\n this_gene_bar_lw = unscaled_ring_lw\n if gene_bar_lw == unscaled_ring_lw:\n gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt) * (gene_bar_lw+genome_ring_spacing)\n else:\n gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-1) * (gene_bar_lw+genome_ring_spacing) + 0.5 * lw_to_coord_scale * (this_gene_bar_lw+genome_ring_spacing)\n\n #self.log(console, str('BASE')+\" gene_bar_radius: \"+str(gene_bar_radius)) # DEBUG\n gene_x_radius = 1.0 * gene_bar_radius\n gene_y_radius = ellipse_to_circle_scaling * gene_bar_radius\n \n gene_arc = Arc (ellipse_center, gene_x_radius, gene_y_radius, \\\n theta1=arc_beg, theta2=arc_end, \\\n edgecolor=gene_color, lw=this_gene_bar_lw, alpha=1.0, zorder=z_level) # facecolor does nothing (no fill for Arc)\n ax.add_patch (gene_arc) \n\n # add homolog rings\n for genome_i,hit_flag in enumerate(base_to_compare_redundant_map[fid]):\n if not hit_flag:\n continue\n# if fid in base_cores:\n# #gene_color = \"darkmagenta\"\n# gene_color = \"magenta\"\n# z_level = 3\n# elif fid in base_universals:\n# gene_color = \"darkblue\"\n# z_level = 2\n# else:\n# gene_color = \"deepskyblue\"\n# z_level = 1\n #gene_bar_radius = inner_radius + 0.5*(compare_genomes_cnt-(genome_i+1))*(gene_bar_lw+genome_ring_spacing)*lw_to_coord_scale\n #gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt - (genome_i+1) + 0.5) * (gene_bar_lw+genome_ring_spacing)\n gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt - (genome_i+1)) * (gene_bar_lw+genome_ring_spacing)\n #self.log(console, str(genome_i)+\" gene_bar_radius: \"+str(gene_bar_radius)) # DEBUG\n gene_x_radius = 1.0 * gene_bar_radius\n gene_y_radius = ellipse_to_circle_scaling * gene_bar_radius\n gene_arc = Arc (ellipse_center, gene_x_radius, gene_y_radius, \\\n theta1=arc_beg, theta2=arc_end, \\\n edgecolor=hit_gene_color, lw=gene_bar_lw, alpha=1.0, zorder=z_level) # facecolor does nothing (no fill for Arc)\n ax.add_patch (gene_arc) \n\n # Add labels\n base_text_fontsize = 10\n if gene_bar_lw < unscaled_ring_lw:\n text_fontsize = int(max_unscaled_rings * base_text_fontsize * gene_bar_lw / unscaled_ring_lw)\n if text_fontsize > base_text_fontsize:\n text_fontsize = base_text_fontsize\n else:\n text_fontsize = base_text_fontsize\n text_color = \"#606060\"\n label_margin = 0.005\n y_downshift = 0.0075 * ellipse_to_circle_scaling\n #text_y_delta = 0.25\n #label_margin = 0.0\n #y_downshift = 0.0\n #text_y_delta = 0.0\n\n label_angle = (math.pi/180) * (90 - origin_gap_angle/2.0 - (360-origin_gap_angle))\n #label_radius = inner_radius + 0.5*gene_bar_lw*lw_to_coord_scale\n #label_radius = 0.5*inner_radius\n #label_radius = 0.5*inner_radius + text_y_delta*compare_genomes_cnt*(gene_bar_lw+genome_ring_spacing)*lw_to_coord_scale\n #label_radius = inner_radius + text_y_delta * lw_to_coord_scale * (compare_genomes_cnt + 0.5) * (gene_bar_lw+genome_ring_spacing)\n #label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt + 0.5) * (gene_bar_lw+genome_ring_spacing)\n this_gene_bar_lw = unscaled_ring_lw\n if gene_bar_lw == unscaled_ring_lw:\n label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt) * (gene_bar_lw+genome_ring_spacing)\n else:\n label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-1)*(gene_bar_lw+genome_ring_spacing) + 0.5*lw_to_coord_scale * (this_gene_bar_lw+genome_ring_spacing)\n label_radius *= 0.5 # why is this necessary?\n x_shift = label_radius * math.cos(label_angle)\n y_shift = label_radius * math.sin(label_angle)\n label_x_pos = ellipse_center_x + x_shift + label_margin\n label_y_pos = ellipse_center_y + y_shift - y_downshift\n label = str(0)\n ax.text (label_x_pos, label_y_pos, label, verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n for genome_i,genome_ref in enumerate(compare_genome_refs):\n #label_radius = 0.5*inner_radius + text_y_delta*(compare_genomes_cnt-(genome_i+1))*(gene_bar_lw+genome_ring_spacing)*lw_to_coord_scale\n #label_radius = inner_radius + text_y_delta * lw_to_coord_scale * (compare_genomes_cnt - (genome_i+1) + 0.5) * (gene_bar_lw+genome_ring_spacing)\n #label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt - (genome_i+1) + 0.5) * (gene_bar_lw+genome_ring_spacing)\n #label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-(genome_i+1+1))*(gene_bar_lw+genome_ring_spacing) + 0.5*lw_to_coord_scale * (gene_bar_lw+genome_ring_spacing)\n label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-(genome_i+1))*(gene_bar_lw+genome_ring_spacing)\n label_radius *= 0.5 # why is this necessary?\n x_shift = label_radius * math.cos(label_angle)\n y_shift = label_radius * math.sin(label_angle)\n label_x_pos = ellipse_center_x + x_shift + label_margin\n label_y_pos = ellipse_center_y + y_shift - y_downshift\n label = str(genome_i+1)\n ax.text (label_x_pos, label_y_pos, label, verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n # Add color key\n key_x_margin = 0.01\n key_y_margin = 0.01\n key_line_spacing = 0.015\n key_x_label_offset = 0.018\n box_gap = key_line_spacing/6.0\n box_h = key_line_spacing - box_gap\n box_w = box_h\n\n # base genome key\n ax.text (key_x_margin/2.0, 1.0-key_y_margin, genome_sci_name_by_ref[base_genome_ref], verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n key_config = [ { 'name': 'base singletons',\n 'y_shift': 1,\n 'color': base_singleton_color\n },\n { 'name': 'non-core',\n 'y_shift': 2,\n 'color': base_partial_color\n }\n ]\n if outgroup_genome_refs_cnt == 0:\n key_config.extend(\n [ { 'name': 'core',\n 'y_shift': 3,\n 'color': base_nonspecific_core_color\n }\n ])\n else:\n key_config.extend(\n [ { 'name': 'clade-specific core',\n 'y_shift': 3,\n 'color': base_core_color\n },\n { 'name': 'core + outgroup',\n 'y_shift': 4,\n 'color': base_univ_color\n }\n ])\n for k_config in key_config:\n key_box = Rectangle ((key_x_margin, 1.0-(key_y_margin+k_config['y_shift']*key_line_spacing)), box_w, box_h, facecolor=k_config['color'], edgecolor=text_color, alpha=1.0, zorder=1)\n ax.add_patch(key_box)\n ax.text (key_x_margin+key_x_label_offset, 1.0-(key_y_margin+box_gap+k_config['y_shift']*key_line_spacing), k_config['name'], verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n # rest of pangenome key\n ax.text (key_x_margin/2.0, 1.0-(key_y_margin+5.5*key_line_spacing), \"Pangenome\", verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n key_config = [ { 'name': 'non-core',\n 'y_shift': 6.5,\n 'color': hit_partial_color\n }\n ]\n if outgroup_genome_refs_cnt == 0:\n key_config.extend ([\n { 'name': 'core',\n 'y_shift': 7.5,\n 'color': hit_nonspecific_core_color\n }\n ])\n else:\n key_config.extend ([\n { 'name': 'clade-specific core',\n 'y_shift': 7.5,\n 'color': hit_core_color\n },\n { 'name': 'core + outgroup',\n 'y_shift': 8.5,\n 'color': hit_univ_color\n }\n ])\n for k_config in key_config:\n key_box = Rectangle ((key_x_margin, 1.0-(key_y_margin+k_config['y_shift']*key_line_spacing)), box_w, box_h, facecolor=k_config['color'], edgecolor=text_color, alpha=1.0, zorder=1)\n ax.add_patch(key_box)\n ax.text (key_x_margin+key_x_label_offset, 1.0-(key_y_margin+box_gap+k_config['y_shift']*key_line_spacing), k_config['name'], verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n\n # Save circle plot\n #\n self.log(console, \"SAVING CIRCLE PLOT\")\n png_file = base_genome_obj_name+'-pangenome_circle.png'\n pdf_file = base_genome_obj_name+'-pangenome_circle.pdf'\n output_png_file_path = os.path.join(html_output_dir, png_file);\n output_pdf_file_path = os.path.join(html_output_dir, pdf_file);\n fig.savefig(output_png_file_path, dpi=img_dpi)\n fig.savefig(output_pdf_file_path, format='pdf')\n\n\n # build report object\n #\n self.log(console, \"CREATING HTML REPORT\")\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n reportObj['objects_created'] = objects_created\n\n\n # build html report\n #\n circle_img_height = 1000\n cell_padding = 0\n cell_spacing = 10\n #cell_spacing = 0\n cell_border = 0\n sp = '&nbsp;'\n text_color = \"#606060\"\n font_size = '3'\n space_fontsize = '1'\n bar_char = '.'\n bar_fontsize = '1'\n bar_width = 50\n num_bars_per_node = 2 + 1\n \n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Pangenome Homolog Circle Plot</title>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n html_report_lines += ['<table cellpadding=\"'+str(cell_padding)+'\" cellspacing=\"'+str(cell_spacing)+'\" border=\"'+str(cell_border)+'\">']\n\n # add circle image\n circle_rowspan = 2 * (compare_genomes_cnt+outgroup_genome_refs_cnt+1)\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=\"middle\" align=\"left\" rowspan=\"'+str(circle_rowspan)+'\">']\n html_report_lines += ['<img src=\"'+png_file+'\" height='+str(circle_img_height)+'>']\n html_report_lines += ['</td>']\n\n # add labels\n for filler_line_i in range((compare_genomes_cnt+outgroup_genome_refs_cnt+1)//2):\n if filler_line_i > 0:\n html_report_lines += ['<tr>']\n html_report_lines += ['<td>'+sp+'</td></tr>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr><b>'+\"genome \"+str(0)+'</b></nobr></font></td>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr><b>'+str(genome_sci_name_by_ref[base_genome_ref])+'</b></nobr></font></td>']\n html_report_lines += ['</tr>']\n for genome_i,genome_ref in enumerate(compare_genome_refs):\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr>'+\"genome \"+str(genome_i+1)+'</nobr></font></td>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr>'+str(genome_sci_name_by_ref[genome_ref])+'</nobr></font></td>']\n html_report_lines += ['</tr>']\n for genome_i,genome_ref in enumerate(outgroup_genome_refs):\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr><i>'+\"outgroup\"+'</i></nobr></font></td>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr><i>'+str(genome_sci_name_by_ref[genome_ref])+'</i></nobr></font></td>']\n html_report_lines += ['</tr>']\n for filler_line_i in range((compare_genomes_cnt+outgroup_genome_refs_cnt+1)//2):\n html_report_lines += ['<tr><td>'+sp+'</td></tr>']\n\n # close\n html_report_lines += ['</table>']\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n self.log(console, \"SAVING AND UPLOADING HTML REPORT\")\n html_file = os.path.join (html_output_dir, 'pan_circle_plot_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n png_upload_ret = dfu.file_to_shock({'file_path': output_png_file_path,\n 'make_handle': 0})\n #'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading png_file to shock')\n\n try:\n pdf_upload_ret = dfu.file_to_shock({'file_path': output_pdf_file_path,\n 'make_handle': 0})\n #'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading pdf_file to shock')\n\n try:\n #upload_ret = dfu.file_to_shock({'file_path': html_file,\n upload_ret = dfu.file_to_shock({'file_path': html_output_dir,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['file_links'] = [{'shock_id': png_upload_ret['shock_id'],\n 'name': 'pan_circle_plot.png',\n 'label': 'Pangenome Circle Plot PNG'},\n {'shock_id': pdf_upload_ret['shock_id'],\n 'name': 'pan_circle_plot.pdf',\n 'label': 'Pangenome Circle Plot PDF'}\n ]\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'pan_circle_plot_report.html',\n 'label': 'Pangenome Circle Plot Report'}\n ]\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_pan_circle_plot\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_circle_plot return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_accumulation_plot(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_accumulation_plot_Input\"\n (view_pan_accumulation_plot() ** ** build an accumulation plot of\n a pangenome) -> structure: parameter \"workspace_name\" of type\n \"workspace_name\" (** Common types), parameter \"input_genome_ref\"\n of type \"data_obj_ref\", parameter \"input_pangenome_ref\" of type\n \"data_obj_ref\"\n :returns: instance of type \"view_pan_accumulation_plot_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_accumulation_plot\n #END view_pan_accumulation_plot\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_accumulation_plot return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_flower_venn(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_flower_venn_Input\"\n (view_pan_flower_venn() ** ** build a multi-member pangenome\n flower venn diagram) -> structure: parameter \"workspace_name\" of\n type \"workspace_name\" (** Common types), parameter\n \"input_genome_ref\" of type \"data_obj_ref\", parameter\n \"input_pangenome_ref\" of type \"data_obj_ref\"\n :returns: instance of type \"view_pan_flower_venn_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_flower_venn\n #END view_pan_flower_venn\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_flower_venn return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_pairwise_overlap(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_pairwise_overlap_Input\"\n (view_pan_pairwise_overlap() ** ** build a multi-member pangenome\n pairwise overlap plot) -> structure: parameter \"workspace_name\" of\n type \"workspace_name\" (** Common types), parameter\n \"input_genome_ref\" of type \"data_obj_ref\", parameter\n \"input_pangenome_ref\" of type \"data_obj_ref\"\n :returns: instance of type \"view_pan_pairwise_overlap_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_pairwise_overlap\n #END view_pan_pairwise_overlap\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_pairwise_overlap return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_phylo(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_phylo_Input\"\n (view_pan_phylo() ** ** show the pangenome accumulation using a\n tree) -> structure: parameter \"workspace_name\" of type\n \"workspace_name\" (** Common types), parameter\n \"input_pangenome_ref\" of type \"data_obj_ref\", parameter\n \"input_speciesTree_ref\" of type \"data_obj_ref\", parameter\n \"save_featuresets\" of type \"bool\"\n :returns: instance of type \"view_pan_phylo_Output\" -> structure:\n parameter \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_phylo\n\n ### STEP 0: basic init\n console = []\n self.log(console, 'Running view_pan_phylo(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_speciesTree_ref',\n 'input_pangenome_ref'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_speciesTree_ref']),\n str(params['input_pangenome_ref'])\n ]\n\n\n # set the output paths\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n html_output_dir = os.path.join(output_dir,'html_output')\n if not os.path.exists(html_output_dir):\n os.makedirs(html_output_dir)\n\n\n # get speciesTree\n #\n input_ref = params['input_speciesTree_ref']\n speciesTree_name = None\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n speciesTree_name = input_obj_info[NAME_I]\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseTrees.Tree\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n speciesTree_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch speciesTree: \"+input_ref)\n\n\n # get genome_refs from speciesTree and instantiate ETE3 tree and order\n #\n genome_refs = []\n genome_id_by_ref = dict()\n genome_ref_by_id = dict()\n for genome_id in speciesTree_obj['default_node_labels'].keys():\n genome_ref = speciesTree_obj['ws_refs'][genome_id]['g'][0]\n genome_id_by_ref[genome_ref] = genome_id\n genome_ref_by_id[genome_id] = genome_ref\n\n species_tree = ete3.Tree(speciesTree_obj['tree']) # instantiate ETE3 tree\n species_tree.ladderize()\n for genome_id in species_tree.get_leaf_names():\n genome_refs.append(genome_ref_by_id[genome_id])\n\n\n # get internal node ids based on sorted genome_refs of children\n #\n node_ids_by_refs = dict()\n genome_ref_to_node_ids_by_refs = dict()\n node_size = dict()\n node_order_by_ref = []\n node_num_id = -1\n for n in species_tree.traverse(\"preorder\"):\n if n.is_leaf():\n continue\n\n node_num_id += 1\n leaf_refs = []\n for genome_id in n.get_leaf_names():\n leaf_refs.append(genome_ref_by_id[genome_id])\n node_ref_id = \"+\".join(sorted(leaf_refs))\n node_size[node_ref_id] = len(leaf_refs)\n node_order_by_ref.append(node_ref_id)\n node_ids_by_refs[node_ref_id] = node_num_id\n\n # point each genome at its nodes\n for genome_ref in leaf_refs:\n if genome_ref not in genome_ref_to_node_ids_by_refs:\n genome_ref_to_node_ids_by_refs[genome_ref] = []\n genome_ref_to_node_ids_by_refs[genome_ref].append(node_ref_id)\n \n\n # get object names, sci names, protein-coding gene counts, and SEED annot\n #\n genome_obj_name_by_ref = dict()\n genome_sci_name_by_ref = dict()\n genome_sci_name_by_id = dict()\n genome_CDS_count_by_ref = dict()\n uniq_genome_ws_ids = dict()\n\n dom_hits = dict() # initialize dom_hits here because reading SEED within genome\n genes_with_hits_cnt = dict()\n\n for genome_ref in genome_refs:\n\n dom_hits[genome_ref] = dict()\n genes_with_hits_cnt[genome_ref] = dict()\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[genome_ref] = input_name\n\n try:\n genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n # sci name\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n genome_sci_name_by_id[genome_id_by_ref[genome_ref]] = genome_obj['scientific_name']\n \n # CDS cnt\n cds_cnt = 0\n for feature in genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n cds_cnt += 1\n genome_CDS_count_by_ref[genome_ref] = cds_cnt\n\n\n # get pangenome\n #\n self.log(console, \"GETTING PANGENOME OBJECT\")\n input_ref = params['input_pangenome_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n pg_obj_name = input_obj_info[NAME_I]\n pg_obj_name = pg_obj_name.replace(\" \", \"_\")\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Pangenome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n try:\n pg_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n\n\n # make sure species tree genomes are found in pangenome (reverse not required)\n for genome_ref in genome_refs:\n if genome_ref not in pg_obj['genome_refs']:\n raise ValueError (\"genome: '\"+str(genome_ref)+\"' from SpeciesTree not present in Pangenome object\")\n\n\n # determine pangenome accumulations of core, partial, and singleton\n #\n cluster_hits = dict()\n nodes_hit_by_gene = dict()\n for node_ref_id in node_ids_by_refs.keys():\n cluster_hits[node_ref_id] = []\n\n cluster_num = -1 # cluster ids themselves start from 1\n for homolog_cluster in pg_obj['orthologs']:\n cluster_num += 1\n for node_ref_id in node_ids_by_refs.keys():\n cluster_hits[node_ref_id].append(0)\n\n nodes_hit = dict()\n for gene in homolog_cluster['orthologs']:\n gene_id = gene[0]\n probably_gene_len_dont_need = gene[1]\n genome_ref = gene[2]\n\n if genome_ref not in genome_ref_to_node_ids_by_refs:\n continue\n for node_ref_id in genome_ref_to_node_ids_by_refs[genome_ref]:\n if node_ref_id not in nodes_hit:\n nodes_hit[node_ref_id] = dict()\n nodes_hit[node_ref_id][genome_ref] = True\n\n # store features\n if node_ref_id not in nodes_hit_by_gene:\n nodes_hit_by_gene[node_ref_id] = dict()\n if cluster_num not in nodes_hit_by_gene[node_ref_id]:\n nodes_hit_by_gene[node_ref_id][cluster_num] = dict()\n if genome_ref not in nodes_hit_by_gene[node_ref_id][cluster_num]:\n nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref] = []\n\n nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref].append(gene_id)\n \n # sum counts\n for node_ref_id in nodes_hit.keys():\n for genome_ref in nodes_hit[node_ref_id].keys():\n cluster_hits[node_ref_id][cluster_num] += 1\n\n # calc accumulations\n clusters_total = dict()\n clusters_singletons = dict()\n clusters_core = dict()\n clusters_partial = dict()\n clusters_singletons_by_node_and_cluster_flag = dict()\n clusters_core_by_node_and_cluster_flag = dict()\n clusters_partial_by_node_and_cluster_flag = dict()\n for node_ref_id in node_ids_by_refs.keys():\n clusters_total[node_ref_id] = 0\n clusters_singletons[node_ref_id] = 0\n clusters_core[node_ref_id] = 0\n clusters_partial[node_ref_id] = 0\n clusters_singletons_by_node_and_cluster_flag[node_ref_id] = dict()\n clusters_core_by_node_and_cluster_flag[node_ref_id] = dict()\n clusters_partial_by_node_and_cluster_flag[node_ref_id] = dict()\n\n for cluster_num,hit_cnt in enumerate(cluster_hits[node_ref_id]):\n if hit_cnt > 0:\n clusters_total[node_ref_id] += 1\n if hit_cnt == 1:\n clusters_singletons[node_ref_id] += 1\n clusters_singletons_by_node_and_cluster_flag[node_ref_id][cluster_num] = True\n \n elif hit_cnt == node_size[node_ref_id]:\n clusters_core[node_ref_id] += 1\n clusters_core_by_node_and_cluster_flag[node_ref_id][cluster_num] = True\n else:\n clusters_partial[node_ref_id] += 1\n clusters_partial_by_node_and_cluster_flag[node_ref_id][cluster_num] = True\n\n # get min and max cluster cnts\n INSANE_VALUE = 10000000000000000\n max_clusters_cnt = -INSANE_VALUE\n min_clusters_cnt = INSANE_VALUE\n for node_ref_id in node_ids_by_refs.keys():\n if clusters_total[node_ref_id] > max_clusters_cnt:\n max_clusters_cnt = clusters_total[node_ref_id]\n if clusters_total[node_ref_id] < min_clusters_cnt:\n min_clusters_cnt = clusters_total[node_ref_id]\n\n self.log(console, \"NODE: \"+node_ref_id+\" MIN: \"+str(min_clusters_cnt)+\" MAX: \"+str(max_clusters_cnt)) # DEBUG\n\n\n # Create FeatureSet objects for nodes\n #\n objects_created = []\n if 'save_featuresets' not in params or params['save_featuresets'] == None or params['save_featuresets'] == '' or int(params['save_featuresets']) != 1:\n self.log(console, \"SKIPPING FEATURESETS\")\n else:\n self.log(console, \"SAVING FEATURESETS\")\n\n for node_ref_id in sorted(node_ids_by_refs, key=node_ids_by_refs.get):\n\n node_num_id = str(node_ids_by_refs[node_ref_id])\n\n self.log(console, \"calculating feature sets for node \"+str(node_num_id))\n\n # Core\n if clusters_core[node_ref_id] > 0:\n\n self.log(console, \"\\t\"+\"adding CORE. Num clusters: \"+str(clusters_core[node_ref_id]))\n\n # build core featureset elements\n core_featureSet_elements = {}\n for cluster_num in sorted(clusters_core_by_node_and_cluster_flag[node_ref_id].keys()):\n for genome_ref in nodes_hit_by_gene[node_ref_id][cluster_num].keys():\n for gene_id in nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref]:\n if gene_id in core_featureSet_elements:\n core_featureSet_elements[gene_id].append(genome_ref)\n else:\n core_featureSet_elements[gene_id] = [genome_ref]\n\n # build object\n fs_name = pg_obj_name+\".node-\"+str(node_num_id)+\".core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".node-\"+str(node_num_id)+\" core pangenome features\"\n core_obj = { 'description': fs_desc,\n 'elements': core_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': core_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n core_featureSet_elements = {} # free memory\n core_obj = {} # free memory\n\n\n # Singletons\n if clusters_singletons[node_ref_id] > 0:\n\n self.log(console, \"\\t\"+\"adding SINGLETON. Num clusters: \"+str(clusters_singletons[node_ref_id]))\n\n # build singleton featureset elements\n singleton_featureSet_elements = {}\n for cluster_num in sorted(clusters_singletons_by_node_and_cluster_flag[node_ref_id].keys()):\n for genome_ref in nodes_hit_by_gene[node_ref_id][cluster_num].keys():\n for gene_id in nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref]:\n if gene_id in singleton_featureSet_elements:\n singleton_featureSet_elements[gene_id].append(genome_ref)\n else:\n singleton_featureSet_elements[gene_id] = [genome_ref]\n\n # build object\n fs_name = pg_obj_name+\".node-\"+str(node_num_id)+\".singleton_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".node-\"+str(node_num_id)+\" singleton pangenome features\"\n singleton_obj = { 'description': fs_desc,\n 'elements': singleton_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': singleton_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n singleton_featureSet_elements = {} # free memory\n singleton_obj = {} # free memory\n\n\n # Partial pangenome\n if clusters_partial[node_ref_id] > 0:\n\n self.log(console, \"\\t\"+\"adding PARTIAL. Num clusters: \"+str(clusters_partial[node_ref_id]))\n\n # build partial featureset elements\n partial_featureSet_elements = {}\n for cluster_num in sorted(clusters_partial_by_node_and_cluster_flag[node_ref_id].keys()):\n for genome_ref in nodes_hit_by_gene[node_ref_id][cluster_num].keys():\n for gene_id in nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref]:\n if gene_id in partial_featureSet_elements:\n partial_featureSet_elements[gene_id].append(genome_ref)\n else:\n partial_featureSet_elements[gene_id] = [genome_ref]\n\n # build object\n fs_name = pg_obj_name+\".node-\"+str(node_num_id)+\".non-core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".node-\"+str(node_num_id)+\" non-core pangenome features\"\n partial_obj = { 'description': fs_desc,\n 'elements': partial_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': partial_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n partial_featureSet_elements = {} # free memory\n partial_obj = {} # free memory\n\n\n # Draw tree (we already instantiated Tree above)\n #\n png_file = speciesTree_name+'-pangenome.png'\n pdf_file = speciesTree_name+'-pangenome.pdf'\n output_png_file_path = os.path.join(html_output_dir, png_file);\n output_pdf_file_path = os.path.join(html_output_dir, pdf_file);\n\n # init ETE3 accessory objects\n ts = ete3.TreeStyle()\n\n # customize\n min_pie_size = 1000\n max_pie_size = 2000\n leaf_fontsize = 500 # scale of everything is goofy in circle tree mode, and pie size affects type size and line thickness. ugh.\n node_fontsize = 500\n ts.mode = \"c\" # circular tree graph\n #ts.arc_start = -180 # 0 degrees = 3 o'clock\n #ts.arc_span = 180\n ts.show_leaf_name = True\n ts.show_branch_length = False\n ts.show_branch_support = True\n #ts.scale = 50 # 50 pixels per branch length unit\n ts.branch_vertical_margin = 5 # pixels between adjacent branches\n #ts.title.add_face(ete3.TextFace(params['output_name']+\": \"+params['desc'], fsize=10), column=0)\n\n node_style = ete3.NodeStyle()\n node_style[\"fgcolor\"] = \"#606060\" # for node balls\n node_style[\"size\"] = 10 # for node balls (gets reset based on support)\n node_style[\"vt_line_color\"] = \"#606060\"\n node_style[\"hz_line_color\"] = \"#606060\"\n node_style[\"vt_line_width\"] = 100 # 2\n node_style[\"hz_line_width\"] = 100 # 2\n node_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n node_style[\"hz_line_type\"] = 0\n\n leaf_style = ete3.NodeStyle()\n leaf_style[\"fgcolor\"] = \"#ffffff\" # for node balls\n leaf_style[\"size\"] = 100 # for node balls (we're using it to add space)\n leaf_style[\"vt_line_color\"] = \"#606060\" # unecessary\n leaf_style[\"hz_line_color\"] = \"#606060\"\n leaf_style[\"vt_line_width\"] = 100 # 2\n leaf_style[\"hz_line_width\"] = 100 # 2\n leaf_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n leaf_style[\"hz_line_type\"] = 0\n\n for n in species_tree.traverse(\"preorder\"):\n if n.is_leaf():\n style = leaf_style\n genome_id = n.name\n #n.name = genome_sci_name_by_id[genome_id]\n n.name = None\n leaf_name_disp = genome_sci_name_by_id[genome_id]\n n.add_face (ete3.TextFace(leaf_name_disp, fsize=leaf_fontsize), column=0, position=\"branch-right\")\n\n else:\n leaf_refs = []\n for genome_id in n.get_leaf_names():\n leaf_refs.append(genome_ref_by_id[genome_id])\n node_ref_id = \"+\".join(sorted (leaf_refs))\n node_num_id = node_ids_by_refs[node_ref_id]\n node_name_disp = str(node_num_id)\n #n.add_face (ete3.TextFace(node_name_disp, fsize=node_fontsize),column=0, position=\"branch-right\")\n n.add_face (ete3.TextFace(' '+node_name_disp+' ', fsize=node_fontsize),column=0)\n\n style = ete3.NodeStyle()\n for k in node_style.keys():\n style[k] = node_style[k]\n\n if n.support > 0.95:\n style[\"size\"] = 6\n elif n.support > 0.90:\n style[\"size\"] = 5\n elif n.support > 0.80:\n style[\"size\"] = 4\n else:\n style[\"size\"] = 2\n\n # yum! pie!\n pie_size = int(min_pie_size + float(max_pie_size-min_pie_size) * float(clusters_total[node_ref_id]-min_clusters_cnt) / float(max_clusters_cnt-min_clusters_cnt))\n singleton_perc = round(100.0*float(clusters_singletons[node_ref_id]) / float(clusters_total[node_ref_id]), 1)\n core_perc = round(100.0*float(clusters_core[node_ref_id]) / float(clusters_total[node_ref_id]), 1)\n partial_perc = round (100.0 - core_perc - singleton_perc, 1)\n\n pie_w = pie_h = pie_size\n pie_percs = [singleton_perc, partial_perc, core_perc]\n pie_colors = [\"IndianRed\", \"Orchid\", \"DodgerBlue\"]\n pie_line_color = \"White\"\n\n this_pieFace = ete3.PieChartFace(pie_percs, pie_w, pie_h, pie_colors, pie_line_color)\n n.add_face (this_pieFace, column=1)\n\n n.set_style(style)\n\n # save images\n dpi = 300\n img_units = \"in\"\n img_pix_width = 1200\n img_in_width = round(float(img_pix_width)/float(dpi), 1)\n img_html_width = img_pix_width // 2\n species_tree.render(output_png_file_path, w=img_in_width, units=img_units, dpi=dpi, tree_style=ts)\n species_tree.render(output_pdf_file_path, w=img_in_width, units=img_units, tree_style=ts) # dpi irrelevant\n\n\n # build report object\n #\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n\n\n # build html report\n #\n tree_img_height = 1000\n cell_padding = 0\n #cell_spacing = 5\n cell_spacing = 0\n cell_border = 0\n sp = '&nbsp;'\n horiz_sp = sp+sp+sp+sp\n text_color = \"#606060\"\n font_size = '2'\n space_fontsize = '1'\n bar_char = '.'\n bar_fontsize = '1'\n bar_width = 50\n cat_order = ['TOTAL', 'singleton', 'partial', 'perfect core']\n cat_colors = [text_color] + pie_colors\n #num_bars_per_node = 2*len(cat_order) + 1\n num_bars_per_node = len(cat_order) + 1\n \n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Pangenome Phylogenetic Context</title>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n html_report_lines += ['<table cellpadding=\"'+str(cell_padding)+'\" cellspacing=\"'+str(cell_spacing)+'\" border=\"'+str(cell_border)+'\">']\n\n # add tree image\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=\"top\" align=\"left\" rowspan=\"'+str(num_bars_per_node*len(node_ids_by_refs))+'\">']\n html_report_lines += ['<img src=\"'+png_file+'\" height='+str(tree_img_height)+'>']\n html_report_lines += ['</td>']\n\n # add key and bar graph\n max_cnt = 0\n for node_ref_id in node_order_by_ref:\n if clusters_total[node_ref_id] > max_cnt:\n max_cnt = clusters_total[node_ref_id]\n\n for node_i,node_ref_id in enumerate(node_order_by_ref):\n node_id = node_ids_by_refs[node_ref_id]\n if node_i > 0:\n html_report_lines += ['<tr>']\n\n # vals\n cat_cnts = dict()\n cat_percs = dict()\n cat_cnts['TOTAL'] = clusters_total[node_ref_id]\n cat_cnts['singleton'] = clusters_singletons[node_ref_id]\n cat_cnts['perfect core'] = clusters_core[node_ref_id]\n cat_cnts['partial'] = clusters_total[node_ref_id] - clusters_singletons[node_ref_id] - clusters_core[node_ref_id]\n cat_percs['TOTAL'] = '100'\n cat_percs['singleton'] = round (100.0*float(clusters_singletons[node_ref_id]) / float(clusters_total[node_ref_id]), 1)\n cat_percs['perfect core'] = round (100.0*float(clusters_core[node_ref_id]) / float(clusters_total[node_ref_id]), 1)\n if cat_cnts['partial'] == 0:\n cat_percs['partial'] = 0.0\n else:\n cat_percs['partial'] = round (100.0 - cat_percs['perfect core'] - cat_percs['singleton'], 1)\n\n # node id\n node_label = 'NODE '+str(node_id)\n html_report_lines += ['<td rowspan=\"'+str(num_bars_per_node)+'\" valign=\"top\" align=\"right\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><b><nobr>'+str(node_label)+'</nobr></b></font></td>']\n html_report_lines += ['<td rowspan=\"'+str(num_bars_per_node)+'\"><font size=\"'+str(space_fontsize)+'\">'+horiz_sp+'</font></td>']\n\n for cat_i,cat in enumerate(cat_order):\n if cat_i > 0:\n html_report_lines += ['<tr>']\n # cat name\n html_report_lines += ['<td valign=\"top\" align=\"right\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr>'+cat+'</nobr></font></td>']\n html_report_lines += ['<td><font size=\"'+str(space_fontsize)+'\">'+horiz_sp+'</font></td>']\n \n # cnt\n html_report_lines += ['<td valign=\"top\" align=\"right\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\">'+str(cat_cnts[cat])+'</font></td>']\n html_report_lines += ['<td><font size=\"'+str(space_fontsize)+'\">'+horiz_sp+'</font></td>']\n\n #perc\n html_report_lines += ['<td valign=\"top\" align=\"right\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\">'+str(cat_percs[cat])+'%'+'</font></td>']\n html_report_lines += ['<td><font size=\"'+str(space_fontsize)+'\">'+horiz_sp+'</font></td>']\n\n # bar\n this_width = int(round(float(bar_width) * (float(cat_cnts[cat])/float(max_cnt)), 0))\n for cell_i in range(this_width):\n html_report_lines += ['<td bgcolor=\"'+str(cat_colors[cat_i])+'\"><font size=\"'+str(bar_fontsize)+'\" color=\"'+str(cat_colors[cat_i])+'\">'+bar_char+'</font></td>']\n\n html_report_lines += ['</tr>']\n #html_report_lines += ['<tr><td><font size=\"'+str(space_fontsize)+'\">'+sp+'</font></td></tr>'] # space for blank row\n html_report_lines += ['<tr><td><font size=\"'+str(space_fontsize)+'\">'+sp+'</font></td></tr>'] # space for blank row\n \n\n # close\n html_report_lines += ['</table>']\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n html_file = os.path.join (html_output_dir, 'pan_phylo_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n png_upload_ret = dfu.file_to_shock({'file_path': output_png_file_path,\n 'make_handle': 0})\n #'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading png_file to shock')\n\n try:\n pdf_upload_ret = dfu.file_to_shock({'file_path': output_pdf_file_path,\n 'make_handle': 0})\n #'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading pdf_file to shock')\n\n try:\n #upload_ret = dfu.file_to_shock({'file_path': html_file,\n upload_ret = dfu.file_to_shock({'file_path': html_output_dir,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['file_links'] = [{'shock_id': png_upload_ret['shock_id'],\n 'name': 'phylogenetic_pangenome.png',\n 'label': 'Phylogenetic Pangenome PNG'},\n {'shock_id': pdf_upload_ret['shock_id'],\n 'name': 'phylogenetic_pangenome.pdf',\n 'label': 'Phylogenetic Pangenome PDF'}\n ]\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'pan_phylo_report.html',\n 'label': 'Phylogenetic Pangenome report'}\n ]\n\n # attach any created objects\n reportObj['objects_created'] = objects_created\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_pan_phylo\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_phylo return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n def status(self, ctx):\n #BEGIN_STATUS\n returnVal = {'state': \"OK\",\n 'message': \"\",\n 'version': self.VERSION,\n 'git_url': self.GIT_URL,\n 'git_commit_hash': self.GIT_COMMIT_HASH}\n #END_STATUS\n return [returnVal]\n" ]
[ [ "matplotlib.patches.Arc", "matplotlib.pyplot.figure", "matplotlib.patches.Rectangle", "matplotlib.pyplot.subplot2grid" ] ]
mbzhu1/ludwig
[ "13c35ec79f930e7dac295e642d92abe82f8c8046" ]
[ "tests/integration_tests/test_model_training_options.py" ]
[ "import json\nimport os.path\nimport re\nfrom collections import namedtuple\nimport logging\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\nfrom ludwig import globals as global_vars\nfrom ludwig.api import LudwigModel\nfrom ludwig.backend import LOCAL_BACKEND\nfrom ludwig.experiment import experiment_cli\nfrom ludwig.features.numerical_feature import numeric_transformation_registry\nfrom ludwig.modules.optimization_modules import optimizers_registry\nfrom ludwig.utils.data_utils import load_json, replace_file_extension\nfrom ludwig.utils.misc_utils import get_from_registry\nfrom tests.integration_tests.utils import category_feature, generate_data\n\nRANDOM_SEED = 42\nNUMBER_OBSERVATIONS = 500\n\nGeneratedData = namedtuple('GeneratedData',\n 'train_df validation_df test_df')\n\n\ndef get_feature_configs():\n input_features = [\n {'name': 'x', 'type': 'numerical'},\n ]\n output_features = [\n {'name': 'y', 'type': 'numerical',\n 'loss': {'type': 'mean_squared_error'},\n 'num_fc_layers': 5, 'fc_size': 64}\n ]\n\n return input_features, output_features\n\n\[email protected](scope='module')\ndef generated_data():\n # function generates simple training data that guarantee convergence\n # within 30 epochs for suitable config\n\n # generate data\n np.random.seed(RANDOM_SEED)\n x = np.array(range(NUMBER_OBSERVATIONS)).reshape(-1, 1)\n y = 2 * x + 1 + np.random.normal(size=x.shape[0]).reshape(-1, 1)\n raw_df = pd.DataFrame(np.concatenate((x, y), axis=1), columns=['x', 'y'])\n\n # create training data\n train, valid_test = train_test_split(raw_df, train_size=0.7)\n\n # create validation and test data\n validation, test = train_test_split(valid_test, train_size=0.5)\n\n return GeneratedData(train, validation, test)\n\n\[email protected](scope='module')\ndef generated_data_for_optimizer():\n # function generates simple training data that guarantee convergence\n # within 30 epochs for suitable config\n\n # generate data\n np.random.seed(RANDOM_SEED)\n x = np.array(range(NUMBER_OBSERVATIONS)).reshape(-1, 1)\n y = 2 * x + 1 + np.random.normal(size=x.shape[0]).reshape(-1, 1)\n raw_df = pd.DataFrame(np.concatenate((x, y), axis=1), columns=['x', 'y'])\n raw_df['x'] = (raw_df['x'] - raw_df['x'].min()) / \\\n (raw_df['x'].max() - raw_df['x'].min())\n raw_df['y'] = (raw_df['y'] - raw_df['y'].min()) / \\\n (raw_df['y'].max() - raw_df['y'].min())\n\n # create training data\n train, valid_test = train_test_split(raw_df, train_size=0.7)\n\n # create validation and test data\n validation, test = train_test_split(valid_test, train_size=0.5)\n\n return GeneratedData(train, validation, test)\n\n\[email protected]('early_stop', [3, 5])\ndef test_early_stopping(early_stop, generated_data, tmp_path):\n input_features, output_features = get_feature_configs()\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {\n 'type': 'concat'\n },\n 'training': {\n 'epochs': 30,\n 'early_stop': early_stop,\n 'batch_size': 16\n }\n }\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n # run experiment\n _, _, _, _, output_dir = experiment_cli(\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n output_directory=str(results_dir),\n config=config,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n skip_save_model=True,\n skip_save_log=True\n )\n\n # test existence of required files\n train_stats_fp = os.path.join(output_dir, 'training_statistics.json')\n metadata_fp = os.path.join(output_dir, 'description.json')\n assert os.path.isfile(train_stats_fp)\n assert os.path.isfile(metadata_fp)\n\n # retrieve results so we can validate early stopping\n with open(train_stats_fp, 'r') as f:\n train_stats = json.load(f)\n with open(metadata_fp, 'r') as f:\n metadata = json.load(f)\n\n # get early stopping value\n early_stop_value = metadata['config']['training']['early_stop']\n\n # retrieve validation losses\n vald_losses = np.array(train_stats['validation']['combined']['loss'])\n last_epoch = vald_losses.shape[0]\n best_epoch = np.argmin(vald_losses)\n\n # confirm early stopping\n assert (last_epoch - best_epoch - 1) == early_stop_value\n\n\[email protected]('skip_save_progress', [False, True])\[email protected]('skip_save_model', [False, True])\ndef test_model_progress_save(\n skip_save_progress,\n skip_save_model,\n generated_data,\n tmp_path\n):\n input_features, output_features = get_feature_configs()\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {'type': 'concat'},\n 'training': {'epochs': 5}\n }\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n # run experiment\n _, _, _, _, output_dir = experiment_cli(\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n output_directory=str(results_dir),\n config=config,\n skip_save_processed_input=True,\n skip_save_progress=skip_save_progress,\n skip_save_unprocessed_output=True,\n skip_save_model=skip_save_model,\n skip_save_log=True\n )\n\n # ========== Check for required result data sets =============\n if skip_save_model:\n model_dir = os.path.join(output_dir, 'model')\n files = [f for f in os.listdir(model_dir) if\n re.match(r'model_weights', f)]\n assert len(files) == 0\n else:\n model_dir = os.path.join(output_dir, 'model')\n files = [f for f in os.listdir(model_dir) if\n re.match(r'model_weights', f)]\n # at least one .index and one .data file, but .data may be more\n assert len(files) >= 2\n assert os.path.isfile(\n os.path.join(output_dir, 'model', 'checkpoint'))\n\n if skip_save_progress:\n assert not os.path.isdir(\n os.path.join(output_dir, 'model', 'training_checkpoints')\n )\n else:\n assert os.path.isdir(\n os.path.join(output_dir, 'model', 'training_checkpoints')\n )\n\n\[email protected]('optimizer', ['sgd', 'adam'])\ndef test_resume_training(optimizer, generated_data, tmp_path):\n input_features, output_features = get_feature_configs()\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {'type': 'concat'},\n 'training': {\n 'epochs': 2,\n 'early_stop': 1000,\n 'batch_size': 16,\n 'optimizer': {'type': optimizer}\n }\n }\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n _, _, _, _, output_dir1 = experiment_cli(\n config,\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n )\n\n config['training']['epochs'] = 4\n\n experiment_cli(\n config,\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n model_resume_path=output_dir1,\n )\n\n _, _, _, _, output_dir2 = experiment_cli(\n config,\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n )\n\n # compare learning curves with and without resuming\n ts1 = load_json(os.path.join(output_dir1, 'training_statistics.json'))\n ts2 = load_json(os.path.join(output_dir2, 'training_statistics.json'))\n print('ts1', ts1)\n print('ts2', ts2)\n assert ts1['training']['combined']['loss'] == ts2['training']['combined'][\n 'loss']\n\n # compare predictions with and without resuming\n y_pred1 = np.load(os.path.join(output_dir1, 'y_predictions.npy'))\n y_pred2 = np.load(os.path.join(output_dir2, 'y_predictions.npy'))\n print('y_pred1', y_pred1)\n print('y_pred2', y_pred2)\n assert np.all(np.isclose(y_pred1, y_pred2))\n\n\[email protected]('optimizer_type', optimizers_registry)\ndef test_optimizers(optimizer_type, generated_data_for_optimizer, tmp_path):\n input_features, output_features = get_feature_configs()\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {\n 'type': 'concat'\n },\n 'training': {\n 'epochs': 5,\n 'batch_size': 16,\n 'optimizer': {'type': optimizer_type}\n }\n }\n\n # special handling for adadelta, break out of local minima\n if optimizer_type == 'adadelta':\n config['training']['learning_rate'] = 0.1\n\n model = LudwigModel(config)\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n # run experiment\n train_stats, preprocessed_data, output_directory = model.train(\n training_set=generated_data_for_optimizer.train_df,\n output_directory=str(results_dir),\n config=config,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n skip_save_model=True,\n skip_save_log=True\n )\n\n # retrieve training losses for first and last epochs\n train_losses = np.array(train_stats['training']['combined']['loss'])\n last_epoch = train_losses.shape[0]\n\n # ensure train loss for last epoch is less than first epoch\n assert train_losses[last_epoch - 1] < train_losses[0]\n\n\ndef test_regularization(generated_data, tmp_path):\n input_features, output_features = get_feature_configs()\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {\n 'type': 'concat'\n },\n 'training': {\n 'epochs': 1,\n 'batch_size': 16,\n 'regularization_lambda': 1\n }\n }\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n regularization_losses = []\n for regularizer in [None, 'l1', 'l2', 'l1_l2']:\n tf.keras.backend.clear_session()\n np.random.seed(RANDOM_SEED)\n tf.random.set_seed(RANDOM_SEED)\n\n # setup regularization parameters\n config['output_features'][0][\n 'weights_regularizer'] = regularizer\n config['output_features'][0][\n 'bias_regularizer'] = regularizer\n config['output_features'][0][\n 'activity_regularizer'] = regularizer\n\n # run experiment\n _, _, _, _, output_dir = experiment_cli(\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n output_directory=str(results_dir),\n config=config,\n experiment_name='regularization',\n model_name=str(regularizer),\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n skip_save_model=True,\n skip_save_log=True\n )\n\n # test existence of required files\n train_stats_fp = os.path.join(output_dir, 'training_statistics.json')\n metadata_fp = os.path.join(output_dir, 'description.json')\n assert os.path.isfile(train_stats_fp)\n assert os.path.isfile(metadata_fp)\n\n # retrieve results so we can compare training loss with regularization\n with open(train_stats_fp, 'r') as f:\n train_stats = json.load(f)\n\n # retrieve training losses for all epochs\n train_losses = np.array(train_stats['training']['combined']['loss'])\n regularization_losses.append(train_losses[0])\n\n # create a set of losses\n regularization_losses_set = set(regularization_losses)\n\n # ensure all losses obtained with the different methods are different\n assert len(regularization_losses) == len(regularization_losses_set)\n\n\n# test cache checksum function\ndef test_cache_checksum(csv_filename, tmp_path):\n # setup for training\n input_features = [category_feature(vocab_size=5)]\n output_features = [category_feature(vocab_size=2)]\n\n source_dataset = os.path.join(tmp_path, csv_filename)\n source_dataset = generate_data(input_features, output_features,\n source_dataset)\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'preprocessing': {'text': {'most_common_word': 1000}},\n 'training': {'epochs': 2}\n }\n\n # conduct initial training\n output_directory = os.path.join(tmp_path, 'results')\n model = LudwigModel(config)\n _, _, train_output_directory1 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n first_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # conduct second training, should not force recreating hdf5\n model = LudwigModel(config)\n _, _, train_output_directory2 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # time stamps should be the same\n assert first_training_timestamp == current_training_timestamp\n\n # force recreating cache file by changing checksum\n prior_training_timestamp = current_training_timestamp\n config['preprocessing']['text']['most_common_word'] = 2000\n model = LudwigModel(config)\n _, _, train_output_directory3 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamp should differ\n assert prior_training_timestamp < current_training_timestamp\n\n # force recreating cache by updating modification time of source dataset\n prior_training_timestamp = current_training_timestamp\n os.utime(source_dataset)\n model = LudwigModel(config)\n _, _, train_output_directory4 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamps should be different\n assert prior_training_timestamp < current_training_timestamp\n\n # force change in feature preprocessing\n prior_training_timestamp = current_training_timestamp\n input_features = config['input_features'].copy()\n input_features[0]['preprocessing'] = {'lowercase': True}\n config['input_features'] = input_features\n model = LudwigModel(config)\n _, _, train_output_directory5 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamps should be different\n assert prior_training_timestamp < current_training_timestamp\n\n # force change in features names (and properties)\n prior_training_timestamp = current_training_timestamp\n input_features = [category_feature(vocab_size=5), category_feature()]\n source_dataset = generate_data(input_features, output_features,\n source_dataset)\n config['input_features'] = input_features\n model = LudwigModel(config)\n _, _, train_output_directory5 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamps should be different\n assert prior_training_timestamp < current_training_timestamp\n\n # force change in Ludwig version\n prior_training_timestamp = current_training_timestamp\n global_vars.LUDWIG_VERSION = 'new_version'\n model = LudwigModel(config)\n _, _, train_output_directory5 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamps should be different\n assert prior_training_timestamp < current_training_timestamp\n\n\[email protected](\n 'transformer_key', list(numeric_transformation_registry.keys())\n)\ndef test_numeric_transformer(transformer_key, tmpdir):\n Transformer = get_from_registry(transformer_key,\n numeric_transformation_registry)\n transformer_name = Transformer().__class__.__name__\n if transformer_name == 'Log1pTransformer':\n raw_values = np.random.lognormal(5, 2, size=100)\n else:\n raw_values = np.random.normal(5, 2, size=100)\n\n backend = LOCAL_BACKEND\n parameters = Transformer.fit_transform_params(raw_values, backend)\n if transformer_name in {'Log1pTransformer', 'IdentityTransformer'}:\n # should be empty\n assert not bool(parameters)\n else:\n # should not be empty\n assert bool(parameters)\n\n # instantiate numeric transformer\n numeric_transfomer = Transformer(**parameters)\n\n # transform values\n transformed_values = numeric_transfomer.transform(raw_values)\n\n # inverse transform the prior transformed values\n reconstructed_values = \\\n numeric_transfomer.inverse_transform(transformed_values)\n\n # should now match\n assert np.allclose(raw_values, reconstructed_values)\n\n # now test numeric transformer with output feature\n df = pd.DataFrame(np.array([raw_values, raw_values]).T, columns=['x', 'y'])\n config = {\n 'input_features': [\n {'name': 'x', 'type': 'numerical'}\n ],\n 'output_features': [\n {'name': 'y', 'type': 'numerical',\n 'preprocessing': {'normalization': transformer_key}}\n ],\n 'combiner': {\n 'type': 'concat',\n },\n 'training': {\n 'epochs': 2,\n 'batch_size': 16,\n }\n }\n\n args = {\n 'config': config,\n 'skip_save_processed_input': True,\n 'output_directory': os.path.join(tmpdir, 'results'),\n 'logging_level': logging.WARN\n }\n\n # ensure no exceptions are raised\n experiment_cli(dataset=df, **args)\n" ]
[ [ "numpy.allclose", "tensorflow.random.set_seed", "numpy.argmin", "numpy.random.seed", "numpy.isclose", "tensorflow.keras.backend.clear_session", "numpy.random.normal", "numpy.random.lognormal", "numpy.array", "numpy.concatenate", "sklearn.model_selection.train_test_split" ] ]
ous8292/arviz
[ "3d788cc7157b764130ee6f84bb2f42021e5ab258" ]
[ "arviz/plots/backends/bokeh/posteriorplot.py" ]
[ "\"\"\"Bokeh Plot posterior densities.\"\"\"\nfrom numbers import Number\nfrom typing import Optional\n\nimport numpy as np\nfrom bokeh.models.annotations import Title\n\nfrom ....stats import hdi\nfrom ....stats.density_utils import get_bins, histogram\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import (\n _scale_fig_size,\n calculate_point_estimate,\n format_sig_figs,\n make_label,\n round_num,\n)\nfrom .. import show_layout\nfrom . import backend_kwarg_defaults, create_axes_grid\n\n\ndef plot_posterior(\n ax,\n length_plotters,\n rows,\n cols,\n figsize,\n plotters,\n bw,\n circular,\n bins,\n kind,\n point_estimate,\n round_to,\n hdi_prob,\n multimodal,\n skipna,\n textsize,\n ref_val,\n rope,\n kwargs,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh posterior plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(\n (\"dpi\", \"plot.bokeh.figure.dpi\"),\n ),\n **backend_kwargs,\n }\n\n (figsize, ax_labelsize, *_, linewidth, _) = _scale_fig_size(figsize, textsize, rows, cols)\n\n if ax is None:\n ax = create_axes_grid(\n length_plotters,\n rows,\n cols,\n figsize=figsize,\n backend_kwargs=backend_kwargs,\n )\n else:\n ax = np.atleast_2d(ax)\n idx = 0\n for (var_name, selection, x), ax_ in zip(\n plotters, (item for item in ax.flatten() if item is not None)\n ):\n _plot_posterior_op(\n idx,\n x.flatten(),\n var_name,\n selection,\n ax=ax_,\n bw=bw,\n circular=circular,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n hdi_prob=hdi_prob,\n multimodal=multimodal,\n skipna=skipna,\n linewidth=linewidth,\n ref_val=ref_val,\n rope=rope,\n ax_labelsize=ax_labelsize,\n **kwargs,\n )\n idx += 1\n _title = Title()\n _title.text = make_label(var_name, selection)\n ax_.title = _title\n\n show_layout(ax, show)\n\n return ax\n\n\ndef _plot_posterior_op(\n idx,\n values,\n var_name,\n selection,\n ax,\n bw,\n circular,\n linewidth,\n bins,\n kind,\n point_estimate,\n hdi_prob,\n multimodal,\n skipna,\n ref_val,\n rope,\n ax_labelsize,\n round_to: Optional[int] = None,\n **kwargs,\n): # noqa: D202\n \"\"\"Artist to draw posterior.\"\"\"\n\n def format_as_percent(x, round_to=0):\n return \"{0:.{1:d}f}%\".format(100 * x, round_to)\n\n def display_ref_val(max_data):\n if ref_val is None:\n return\n elif isinstance(ref_val, dict):\n val = None\n for sel in ref_val.get(var_name, []):\n if all(\n k in selection and selection[k] == v for k, v in sel.items() if k != \"ref_val\"\n ):\n val = sel[\"ref_val\"]\n break\n if val is None:\n return\n elif isinstance(ref_val, list):\n val = ref_val[idx]\n elif isinstance(ref_val, Number):\n val = ref_val\n else:\n raise ValueError(\n \"Argument `ref_val` must be None, a constant, a list or a \"\n 'dictionary like {\"var_name\": [{\"ref_val\": ref_val}]}'\n )\n less_than_ref_probability = (values < val).mean()\n greater_than_ref_probability = (values >= val).mean()\n ref_in_posterior = \"{} <{:g}< {}\".format(\n format_as_percent(less_than_ref_probability, 1),\n val,\n format_as_percent(greater_than_ref_probability, 1),\n )\n ax.line([val, val], [0, 0.8 * max_data], line_color=\"blue\", line_alpha=0.65)\n\n ax.text(x=[values.mean()], y=[max_data * 0.6], text=[ref_in_posterior], text_align=\"center\")\n\n def display_rope(max_data):\n if rope is None:\n return\n elif isinstance(rope, dict):\n vals = None\n for sel in rope.get(var_name, []):\n # pylint: disable=line-too-long\n if all(k in selection and selection[k] == v for k, v in sel.items() if k != \"rope\"):\n vals = sel[\"rope\"]\n break\n if vals is None:\n return\n elif len(rope) == 2:\n vals = rope\n else:\n raise ValueError(\n \"Argument `rope` must be None, a dictionary like\"\n '{\"var_name\": {\"rope\": (lo, hi)}}, or an'\n \"iterable of length 2\"\n )\n rope_text = [f\"{val:.{format_sig_figs(val, round_to)}g}\" for val in vals]\n\n ax.line(\n vals,\n (max_data * 0.02, max_data * 0.02),\n line_width=linewidth * 5,\n line_color=\"red\",\n line_alpha=0.7,\n )\n\n text_props = dict(\n text_font_size=\"{}pt\".format(ax_labelsize), text_color=\"black\", text_align=\"center\"\n )\n\n ax.text(x=vals, y=[max_data * 0.2, max_data * 0.2], text=rope_text, **text_props)\n\n def display_point_estimate(max_data):\n if not point_estimate:\n return\n point_value = calculate_point_estimate(point_estimate, values, bw, circular)\n sig_figs = format_sig_figs(point_value, round_to)\n point_text = \"{point_estimate}={point_value:.{sig_figs}g}\".format(\n point_estimate=point_estimate, point_value=point_value, sig_figs=sig_figs\n )\n\n ax.text(x=[point_value], y=[max_data * 0.8], text=[point_text], text_align=\"center\")\n\n def display_hdi(max_data):\n # np.ndarray with 2 entries, min and max\n # pylint: disable=line-too-long\n hdi_probs = hdi(\n values, hdi_prob=hdi_prob, circular=circular, multimodal=multimodal, skipna=skipna\n ) # type: np.ndarray\n\n for hdi_i in np.atleast_2d(hdi_probs):\n ax.line(\n hdi_i,\n (max_data * 0.02, max_data * 0.02),\n line_width=linewidth * 2,\n line_color=\"black\",\n )\n\n ax.text(\n x=list(hdi_i) + [(hdi_i[0] + hdi_i[1]) / 2],\n y=[max_data * 0.07, max_data * 0.07, max_data * 0.3],\n text=list(map(str, map(lambda x: round_num(x, round_to), hdi_i)))\n + [format_as_percent(hdi_prob) + \" HDI\"],\n text_align=\"center\",\n )\n\n def format_axes():\n ax.yaxis.visible = False\n ax.yaxis.major_tick_line_color = None\n ax.yaxis.minor_tick_line_color = None\n ax.yaxis.major_label_text_font_size = \"0pt\"\n ax.xgrid.grid_line_color = None\n ax.ygrid.grid_line_color = None\n\n if skipna:\n values = values[~np.isnan(values)]\n\n if kind == \"kde\" and values.dtype.kind == \"f\":\n kwargs.setdefault(\"line_width\", linewidth)\n plot_kde(\n values,\n bw=bw,\n circular=circular,\n fill_kwargs={\"fill_alpha\": kwargs.pop(\"fill_alpha\", 0)},\n plot_kwargs=kwargs,\n ax=ax,\n rug=False,\n backend=\"bokeh\",\n backend_kwargs={},\n show=False,\n )\n _, hist, edges = histogram(values, bins=\"auto\")\n else:\n if bins is None:\n if values.dtype.kind == \"i\":\n bins = get_bins(values)\n else:\n bins = \"auto\"\n kwargs.setdefault(\"align\", \"left\")\n kwargs.setdefault(\"color\", \"blue\")\n _, hist, edges = histogram(values, bins=bins)\n ax.quad(\n top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_alpha=0.35, line_alpha=0.35\n )\n\n format_axes()\n max_data = hist.max()\n if hdi_prob != \"hide\":\n display_hdi(max_data)\n display_point_estimate(max_data)\n display_ref_val(max_data)\n display_rope(max_data)\n" ]
[ [ "numpy.atleast_2d", "numpy.isnan" ] ]
ERhamat/opendrr-data-store
[ "34a737e8636707f85191e2f97a4ae78e8469e317" ]
[ "scripts/combines_all_csvs.py" ]
[ "#script found online to combine all csvs into one\nimport os\nimport glob\nimport pandas as pd\n#directory link\nos.chdir(\"C:/Workspace/eRisk_CA/PSRA_sample_data/baseline/c-damage\")\nextension = 'csv'\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n#combine all files in the list\ncombined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])\n#export to csv\ncombined_csv.to_csv( \"damages-structural-mean_merge_baseline.csv\", index=False, encoding='utf-8-sig')" ]
[ [ "pandas.read_csv" ] ]
yweweler/ctc-asr
[ "4b24c658b43a28a4f939c95041953ad7a283ff1b" ]
[ "python/dataset/sd_estimator.py" ]
[ "\"\"\"\nCalculate mean and standard deviation for a given training txt file.\n\"\"\"\n\nimport os\nimport sys\nimport random\n\nfrom multiprocessing import Pool, Lock, cpu_count\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom python.load_sample import load_sample\nfrom python.params import BASE_PATH\n\n\n__DATASETS_PATH = os.path.join(BASE_PATH, '../datasets/speech_data')\n__FEATURE_TYPE = 'mel'\n\n\ndef calculate_dataset_stats(txt_path):\n \"\"\"Gather mean and standard deviation values. Averaged for every file in the\n training txt data file.\n\n Args:\n txt_path (str): Path to the `train.txt`.\n\n Returns:\n Nothing.\n \"\"\"\n # Read train.txt file.\n with open(txt_path, 'r') as f:\n lines = f.readlines()\n random.shuffle(lines)\n lines = lines[: int(2.0e5)] # To fit in RAM and not crash Numpy.\n\n # Setup thread pool.\n lock = Lock()\n features = [] # Output buffer.\n\n with Pool(processes=cpu_count()) as pool:\n for feature in tqdm(\n pool.imap_unordered(__stat_calculator, lines, chunksize=4),\n desc='Reading audio samples', total=len(lines), file=sys.stdout,\n unit='samples', dynamic_ncols=True):\n lock.acquire()\n features.append(feature)\n lock.release()\n\n # Reduce the [num_samples, time, num_features] to [total_time, num_features] array.\n features = np.concatenate(features)\n\n print('mean = {}'.format(np.mean(features)))\n print('std = {}'.format(np.std(features)))\n print()\n\n means = np.mean(features, axis=0)\n print('__global_mean = [' + ', '.join(map(str, means)) + ']')\n stds = np.std(features, axis=0)\n print('__global_std = [' + ', '.join(map(str, stds)) + ']')\n\n\ndef __stat_calculator(line):\n # Python multiprocessing helper method.\n wav_path, _ = line.split(' ', 1)\n wav_path = os.path.join(__DATASETS_PATH, wav_path)\n\n feature, _ = load_sample(wav_path, feature_type=__FEATURE_TYPE, feature_normalization='none')\n assert len(feature) > 1, 'Empty feature: {}'.format(wav_path)\n\n return feature\n\n\nif __name__ == '__main__':\n # Path to `train.txt` file.\n _test_txt_path = os.path.join(BASE_PATH, 'data', 'train.txt')\n\n # Display dataset stats.\n calculate_dataset_stats(_test_txt_path)\n" ]
[ [ "numpy.std", "numpy.concatenate", "numpy.mean" ] ]
changhiskhan/virtual-background
[ "0002d85b0a329611926077633163b45e6668f673" ]
[ "fakecam/fake.py" ]
[ "import os\nimport cv2\nimport numpy as np\nimport requests\nimport pyfakewebcam\nimport traceback\nimport time\n\ndef get_mask(frame, bodypix_url=os.environ.get(\"BODYPIX_URL\",\"http://bodypix:9000\")):\n _, data = cv2.imencode(\".jpg\", frame)\n r = requests.post(\n url=bodypix_url,\n data=data.tobytes(),\n headers={'Content-Type': 'application/octet-stream'})\n mask = np.frombuffer(r.content, dtype=np.uint8)\n mask = mask.reshape((frame.shape[0], frame.shape[1]))\n return mask\n\ndef post_process_mask(mask):\n mask = cv2.dilate(mask, np.ones((10,10), np.uint8) , iterations=1)\n mask = cv2.blur(mask.astype(float), (10,10))\n return mask\n\ndef shift_image(img, dx, dy):\n img = np.roll(img, dy, axis=0)\n img = np.roll(img, dx, axis=1)\n if dy>0:\n img[:dy, :] = 0\n elif dy<0:\n img[dy:, :] = 0\n if dx>0:\n img[:, :dx] = 0\n elif dx<0:\n img[:, dx:] = 0\n return img\n\ndef hologram_effect(img):\n # add a blue tint\n holo = cv2.applyColorMap(img, cv2.COLORMAP_WINTER)\n # add a halftone effect\n bandLength, bandGap = 2, 3\n for y in range(holo.shape[0]):\n if y % (bandLength+bandGap) < bandLength:\n holo[y,:,:] = holo[y,:,:] * np.random.uniform(0.1, 0.3)\n # add some ghosting\n holo_blur = cv2.addWeighted(holo, 0.2, shift_image(holo.copy(), 5, 5), 0.8, 0)\n holo_blur = cv2.addWeighted(holo_blur, 0.4, shift_image(holo.copy(), -5, -5), 0.6, 0)\n # combine with the original color, oversaturated\n out = cv2.addWeighted(img, 0.5, holo_blur, 0.6, 0)\n return out\n\ndef get_frame(cap, background_scaled, speed=True, effect=None):\n _, frame = cap.read()\n # fetch the mask with retries (the app needs to warmup and we're lazy)\n # e v e n t u a l l y c o n s i s t e n t\n mask = None\n while mask is None:\n try:\n\n if speed:\n shrinked_frame = cv2.resize(frame, (width//2, height//2)) \n shrinked_mask = get_mask(shrinked_frame)\n mask = cv2.resize(shrinked_mask, (width, height))\n else:\n mask = get_mask(frame)\n\n except requests.RequestException:\n print(\"mask request failed, retrying\")\n traceback.print_exc()\n time.sleep(5)\n \n # post-process mask and frame\n mask = post_process_mask(mask)\n if effect is not None: \n effect_fun = globals()[effect + '_effect']\n frame = effect_fun(frame)\n\n # composite the foreground and background\n inv_mask = 1-mask\n for c in range(frame.shape[2]): \n frame[:,:,c] = frame[:,:,c]*mask + background_scaled[:,:,c]*inv_mask\n\n\n return frame\n\n\nif __name__ == '__main__':\n\n actual_device = os.environ.get('ACTUAL_CAMERA','/dev/video0')\n fake_device = os.environ.get('FAKE_CAMERA','/dev/video20')\n width = int(os.environ.get('CAMERA_WIDTH',640))\n height = int(os.environ.get('CAMERA_HEIGHT',360))\n cam_fps = int(os.environ.get('CAMERA_FPS',24))\n is_background_video = os.environ.get('IS_VID_BACKGROUND', 'false') == 'true'\n background_file_path = os.environ.get('BACKGROUND_FILE', '/data/background.jpg')\n effect = os.environ.get('EFFECT', None)\n\n # setup access to the *real* webcam\n cap = cv2.VideoCapture(actual_device)\n \n cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n cap.set(cv2.CAP_PROP_FPS, cam_fps)\n\n # setup the fake camera\n fake = pyfakewebcam.FakeWebcam(fake_device, width, height)\n\n # load the virtual background\n _background_scaled = {}\n if is_background_video:\n def get_background_scaled(width, height):\n if int(time.time()) % 10 == 0 or len(_background_scaled) == 0:\n updated_background_file_size = os.stat(background_file_path).st_size\n if updated_background_file_size != _background_scaled.get('size', None):\n if 'cap' in _background_scaled:\n _background_scaled['cap'].release()\n _background_scaled['cap'] = cv2.VideoCapture(background_file_path)\n _background_scaled['size'] = updated_background_file_size\n background_cap = _background_scaled['cap']\n success, frame = background_cap.read()\n if success:\n return cv2.resize(frame, (width, height))\n background_cap.set(cv2.CAP_PROP_POS_FRAMES, 1)\n return get_background_scaled(width, height)\n else:\n def get_background_scaled(width, height):\n if int(time.time()) % 10 == 0 or len(_background_scaled) == 0:\n updated_background_file_size = os.stat(background_file_path).st_size\n if updated_background_file_size != _background_scaled.get('size', None):\n background = cv2.imread(background_file_path)\n _background_scaled['frame'] = cv2.resize(background,(width, height))\n _background_scaled['size'] = updated_background_file_size\n return _background_scaled['frame']\n\n while True:\n frame = get_frame(cap, get_background_scaled(width, height), effect=effect)\n # fake webcam expects RGB\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n fake.schedule_frame(frame)\n" ]
[ [ "numpy.random.uniform", "numpy.ones", "numpy.roll", "numpy.frombuffer" ] ]
AloBer03/MA_Alo-sBerger
[ "93e72f7940a3ea8bab3c72e00c92091c01dc5324" ]
[ "NNFS/nnma.py" ]
[ "## nnma\r\n\r\n## code from NNFS\r\n## My own comments are marked with ##\r\n## My own code start with ##-- and ends with --##\r\n\r\n## Makig a file with only the classes\r\n## This will enable to import nnma and not copy all the function into the new file\r\n\r\nimport matplotlib.gridspec as gridspec\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as cl\r\nimport copy\r\nimport pickle\r\nimport numpy as np\r\nimport nnfs\r\nimport os\r\nimport cv2\r\n\r\n\r\nnnfs.init()\r\n\r\n# Dense Layer\r\nclass Layer_Dense:\r\n\r\n\t# Layer initialization\r\n\tdef __init__(self, n_inputs, n_neuron,\r\n\t\t\t\t weight_regularizer_l1=0, weight_regularizer_l2=0,\r\n\t\t\t\t bias_regularizer_l1=0, bias_regularizer_l2=0):\r\n\t\t# Initialize weights and bias\r\n\t\tself.weights = 0.01 * np.random.randn(n_inputs, n_neuron)\r\n\t\tself.biases = np.zeros((1, n_neuron)) \r\n\t\t# Set regularization strength\r\n\t\tself.weight_regularizer_l1 = weight_regularizer_l1\r\n\t\tself.weight_regularizer_l2 = weight_regularizer_l2\r\n\t\tself.bias_regularizer_l1 = bias_regularizer_l1\r\n\t\tself.bias_regularizer_l2 = bias_regularizer_l2\r\n\t\t# Store stats\r\n\t\tself.stat = 'Layer_Dense: '+str(n_inputs)+', '+str(n_neuron)\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\t# Remember input values\r\n\t\tself.inputs = inputs\r\n\t\t# Calculate output through input, weights, bias\r\n\t\tself.output = np.dot(inputs, self.weights) + self.biases\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# Gradient on parameters\r\n\t\tself.dweights = np.dot(self.inputs.T, dvalues)\r\n\t\tself.dbiases = np.sum(dvalues, axis=0, keepdims=True)\r\n\t\t\r\n\t\t# Gradient on regularization\r\n\t\t# L1 on weights\r\n\t\tif self.weight_regularizer_l1 > 0:\r\n\t\t\tdL1 = np.ones_like(self.weights)\r\n\t\t\tdL1[self.weights < 0] = -1\r\n\t\t\tself.dweights += self.weight_regularizer_l1 * dL1\r\n\t\t# L2 on weights\r\n\t\tif self.weight_regularizer_l2 > 0:\r\n\t\t\tself.dweights += 2 * self.weight_regularizer_l2 * self.weights\r\n\r\n\t\t# L1 on biases\r\n\t\tif self.bias_regularizer_l1 > 0:\r\n\t\t\tdL1 = np.ones_like(self.biases)\r\n\t\t\tdL1[self.biases < 0] = -1\r\n\t\t\tself.dbiases += self.bias_regularizer_l1 * dL1\r\n\t\t# L2 on biases\r\n\t\tif self.bias_regularizer_l2 > 0:\r\n\t\t\tself.dbiases += 2 * self.bias_regularizer_l2 * self.biases\r\n\r\n\t\t# Gradient on values\r\n\t\tself.dinputs = np.dot(dvalues, self.weights.T)\r\n\r\n\t# Retrieve layer parameters\r\n\tdef get_parameters(self):\r\n\t\treturn self.weights, self.biases\r\n\r\n\t# Set weights and biases in a layer instance\r\n\tdef set_parameters(self, weights, biases):\r\n\t\tself.weights = weights\r\n\t\tself.biases = biases\r\n\r\n# Dropout\r\nclass Layer_Dropout:\r\n\r\n\t# Init\r\n\tdef __init__(self, rate):\r\n\t\t# Store rate, we invert it as for example for dropout of 0.1 we need success rate of 0.9\r\n\t\tself.rate = 1 - rate\r\n\t\t# Store stats\r\n\t\tself.stat = \"Layer_Dropout: rate:\"+str(rate)\r\n\r\n\t# Forward pass \r\n\tdef forward(self, inputs, training):\r\n\r\n\t\t# Save input values\r\n\t\tself.inputs = inputs\r\n\r\n\t\t# If not in the training mode - return values\r\n\t\tif not training:\r\n\t\t\tself.output = inputs.copy()\r\n\t\t\treturn\r\n\r\n\t\t# Generate and save scaled mask\r\n\t\tself.binary_mask = np.random.binomial(1, self.rate, size=inputs.shape) / self.rate\r\n\t\t# Apply mask to output values\r\n\t\tself.output = inputs * self.binary_mask\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# Gradient on values\r\n\t\tself.dinputs = dvalues * self.binary_mask\r\n\r\n# Input \"layer\"\r\nclass Layer_Input:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\tself.output = inputs\r\n\r\nclass Activation_ReLU:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\t# Remember the inputs values\r\n\t\tself.inputs = inputs\r\n\t\t# Calculate ouput value from inputs\r\n\t\tself.output = np.maximum(0, inputs)\r\n\t\t# Store stats\r\n\t\tself.stat = \"Activation_ReLU\"\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# Since we need to modify the originaal variable, let's make a copy of the values first\r\n\t\tself.dinputs = dvalues.copy()\r\n\r\n\t\t# Zero gradient where input values were nagative\r\n\t\tself.dinputs[self.inputs <= 0] = 0\r\n\r\n\t# Calculate predictions for outputs\r\n\tdef predictions(self, outputs):\r\n\t\treturn outputs\r\n\r\n# Softmax activation\r\nclass Activation_Softmax:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\r\n\t\t# Remember input values\r\n\t\tself.inputs = inputs\r\n\r\n\t\t# Get unnormalized probabilities\t\t \r\n\t\texp_values = np.exp(inputs - np.max(inputs, axis=1, keepdims=True)) \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \r\n\t\t# Normalize them for each sample \r\n\t\tprobabilities = exp_values / np.sum(exp_values, axis=1, keepdims=True)\r\n\r\n\t\tself.output = probabilities\r\n\r\n\t\t# Store stats\r\n\t\tself.stat = 'Activation_Softmax'\r\n\r\n\t# Backwards pass\r\n\tdef backward(self, dvalues):\r\n\r\n\t\t# Create uninitialized array\r\n\t\tself.dinputs = np.empty_like(dvalues)\r\n\r\n\t\t# Enumerate outputs and gradients\r\n\t\tfor index, (single_output, single_dvalues) in enumerate(zip(self.output, dvalues)):\r\n\t\t\t# Flatten output array\r\n\t\t\tsingle_output = single_output.reshape(-1, 1)\r\n\t\t\t# Calculate Jacobian matrix of the output\r\n\t\t\tjacobian_matrix = np.diagflat(single_output) - np.dot(single_output, single_output.T)\r\n\t\t\t# Calculate sample-wise gradient and add it to the array of sample gradients\r\n\t\t\tself.dinputs[index] = np.dot(jacobian_matrix, single_dvalues)\r\n\r\n\t# Calculate predictions for outputs\r\n\tdef predictions(self, outputs):\r\n\t\treturn np.argmax(outputs, axis=1)\r\n\r\n\t# Return the confidences (Aloïs)\r\n\tdef confidencces(self, outputs):\r\n\t\treturn outputs\r\n\r\n# Sigmoid activation\r\nclass Activation_Sigmoid:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\t# Save input and calculate/save output of the sigmoid function\r\n\t\tself.inputs = inputs\r\n\t\tself.output = 1 / (1 + np.exp(-inputs))\r\n\t\t# Store stats\r\n\t\tself.stat = 'Activation_Sigmoid'\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# Derivative - calculates form output of the sigmoid function\r\n\t\tself.dinputs = dvalues * (1 - self.output) * self.output\r\n\r\n\t# Calculate predictions for outputs\r\n\tdef predictions(self, outputs):\r\n\t\treturn (outputs > 0.5) * 1\r\n\r\n# Linear activation\r\nclass Activation_Linear:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\t# Just remember values\r\n\t\tself.inputs = inputs\r\n\t\tself.output = inputs\r\n\t\t# Store stats\r\n\t\tself.stat = 'Activation_Linear'\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# The derivative is 1, 1 * dvalues = dvalues - the chain rule\r\n\t\tself.dinputs = dvalues.copy()\r\n\r\n\t# Calculate predictions for outputs\r\n\tdef predictions(self, outputs):\r\n\t\treturn outputs\r\n\r\n#SGD optimizer\r\nclass Optimizer_SGD:\r\n\r\n\t# Initialize optimizer - set settings,\r\n\t# learning rate of 1. is default for this optimizer\r\n\tdef __init__(self, learning_rate=1.0, decay=0.0, momentum=0.):\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.current_learning_rate = learning_rate\r\n\t\tself.decay = decay\r\n\t\tself.iterations = 0\r\n\t\tself.momentum = momentum\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Optimizer_SGD'\r\n\r\n\t# Call once before any parameter updates \r\n\tdef pre_update_params(self):\r\n\t\tif self.decay:\r\n\t\t\tself.current_learning_rate = self.learning_rate * (1. / (1. + self.decay * self.iterations))\r\n\r\n\t# Update parameters \r\n\tdef update_params(self, layer):\r\n\r\n\t\t# If we use momentum\r\n\t\tif self.momentum:\r\n\t\t\t# If layer does not contain momentum arrays, create them filled with zeros\r\n\t\t\tif not hasattr(layer, 'weight_momentums'):\r\n\t\t\t\tlayer.weight_momentums = np.zeros_like(layer.weights)\r\n\t\t\t\t# If there is no momentum array for weights\r\n\t\t\t\t# The array doesn't exist for biases yet either\r\n\t\t\t\tlayer.bias_momentums = np.zeros_like(layer.biases)\r\n\r\n\t\t\t# Build weight updates with momentum - takes previous updates multiplied by retain factor\r\n\t\t\t# and update with current gradients\r\n\t\t\tweight_updates = self.momentum *layer.weight_momentums - \\\r\n\t\t\t\t\t\t\t self.current_learning_rate * layer.dweights\r\n\t\t\tlayer.weight_momentums = weight_updates\r\n\r\n\t\t\t# Build bias updates\r\n\t\t\tbias_updates = self.momentum * layer.bias_momentums - self.current_learning_rate * layer.dbiases\r\n\t\t\tlayer.bias_momentums = bias_updates\r\n\r\n\t\t# Vannilla SGD updates (as before momentum update)\r\n\t\telse: \r\n\t\t\tweight_updates = -self.current_learning_rate * layer.dweights\r\n\t\t\tbias_updates = -self.current_learning_rate * layer.dbiases\r\n\r\n\t\t# Update weights and biases using either vanilla or momentum updates\r\n\t\tlayer.weights += weight_updates\r\n\t\tlayer.biases += bias_updates\r\n\r\n\t# Call once after any parameter updates \r\n\tdef post_update_params(self):\r\n\t\tself.iterations += 1\r\n\r\n# Adagrad optimizer\r\nclass Optimizer_Adagrad:\r\n\r\n\t# Initialize optimizer - set settings\r\n\tdef __init__(self, learning_rate=1., decay=0., epsilon=1e-7):\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.current_learning_rate = learning_rate\r\n\t\tself.decay = decay\r\n\t\tself.iterations = 0\r\n\t\tself.epsilon = epsilon\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Optimizer_Adagrad'\r\n\t\t\r\n\r\n\t# Call once before any parameter updates \r\n\tdef pre_update_params(self):\r\n\t\tif self.decay:\r\n\t\t\tself.current_learning_rate = self.learning_rate * (1. / (1. + self.decay * self.iterations))\r\n\r\n\t# Update parameters \r\n\tdef update_params(self, layer):\r\n\r\n\t\t# If layer does not contain cache arrays, create them filled with zeros\r\n\t\tif not hasattr(layer, 'weight_cache'):\r\n\t\t\tlayer.weight_cache = np.zeros_like(layer.weights)\r\n\t\t\tlayer.bias_cache = np.zeros_like(layer.biases)\r\n\r\n\t\t# Update cache with squared current gradients\r\n\t\tlayer.weight_cache += layer.dweights**2\r\n\t\tlayer.bias_cache += layer.dbiases**2\r\n\t\t\r\n\t\t# Vanilla SGD parameter update + normalization with quare rooted cache\r\n\t\tlayer.weights += -self.current_learning_rate * \\\r\n\t\t\t\t\t\t layer.dweights / \\\r\n\t\t\t\t\t\t (np.sqrt(layer.weight_cache) + self.epsilon)\r\n\t\tlayer.biases += -self.current_learning_rate * \\\r\n\t\t\t\t\t\tlayer.dbiases / \\\r\n\t\t\t\t\t\t(np.sqrt(layer.bias_cache) + self.epsilon)\r\n\r\n\t# Call once after any parameter updates\r\n\tdef post_update_params(self):\r\n\t\tself.iterations += 1\r\n\r\n# RMSprop optimizer\r\nclass Optimizer_RMSprop:\r\n\r\n\t# Initialize optimizer - set settings\r\n\tdef __init__(self, learning_rate=0.001, decay=0.0, epsilon=1e-7, rho=0.9):\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.current_learning_rate = learning_rate\r\n\t\tself.decay = decay\r\n\t\tself.iterations = 0\r\n\t\tself.epsilon = epsilon\r\n\t\tself.rho = rho\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Optimizer_RMSprop'\r\n\r\n\t# Call once before any parameter updates \r\n\tdef pre_update_params(self):\r\n\t\tif self.decay:\r\n\t\t\tself.current_learning_rate = self.learning_rate * (1. / (1. + self.decay *self.iterations))\r\n\r\n\t# Update parameters \r\n\tdef update_params(self, layer):\r\n\r\n\t\t# If layer does not contain cache arrays, create them filled with zeros\r\n\t\tif not hasattr(layer, 'weight_cache'):\r\n\t\t\tlayer.weight_cache = np.zeros_like(layer.weights)\r\n\t\t\tlayer.bias_cache = np.zeros_like(layer.biases)\r\n\r\n\t\t# Update cache with squared current gradients\r\n\t\tlayer.weight_cache = self.rho * layer.weight_cache + \\\r\n\t\t\t\t\t\t\t (1 - self.rho) * layer.dweights**2\r\n\t\tlayer.bias_cache = self.rho * layer.bias_cache + \\\r\n\t\t\t\t\t\t (1 - self.rho) * layer.dbiases**2\r\n\r\n\t\t# Vanilla SGD parameter update + normalization\r\n\t\t# with square rooted cache\r\n\t\tlayer.weights += -self.current_learning_rate * layer.dweights / \\\r\n\t\t\t\t\t\t (np.sqrt(layer.weight_cache) + self.epsilon)\r\n\t\tlayer.biases += -self.current_learning_rate * layer.dbiases / \\\r\n\t\t\t\t\t\t(np.sqrt(layer.bias_cache) + self.epsilon)\r\n\r\n\t# Call once after any parameter updates\r\n\tdef post_update_params(self):\r\n\t\tself.iterations += 1\r\n\r\n# Adam optimizer\r\nclass Optimizer_Adam:\r\n\r\n\t# Initialize optimizer - set settings\r\n\tdef __init__(self, learning_rate=0.001, decay=0.0, epsilon=1e-7, beta_1=0.9, beta_2 = 0.999):\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.current_learning_rate = learning_rate\r\n\t\tself.decay = decay\r\n\t\tself.iterations = 0\r\n\t\tself.epsilon = epsilon\r\n\t\tself.beta_1 = beta_1\r\n\t\tself.beta_2 = beta_2\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Optimizer_Adam'\r\n\r\n\t# Call once before any parameter updates \r\n\tdef pre_update_params(self):\r\n\t\tif self.decay:\r\n\t\t\tself.current_learning_rate = self.learning_rate * (1. / (1. + self.decay *self.iterations))\r\n\r\n\t# Update parameters \r\n\tdef update_params(self, layer):\r\n\r\n\t\t# If layer does not contain momentum and cache arrays, create them filled with zeros\r\n\t\tif not hasattr(layer, 'weight_cache'):\r\n\t\t\tlayer.weight_momentums = np.zeros_like(layer.weights)\r\n\t\t\tlayer.weight_cache = np.zeros_like(layer.weights)\r\n\t\t\tlayer.bias_momentums = np.zeros_like(layer.biases)\r\n\t\t\tlayer.bias_cache = np.zeros_like(layer.biases)\r\n\r\n\t\t# Update Momentum with with current gradients\r\n\t\tlayer.weight_momentums = self.beta_1 * layer.weight_momentums + \\\r\n\t\t\t\t\t\t\t\t\t(1 - self.beta_1) * layer.dweights\r\n\t\tlayer.bias_momentums = self.beta_1 * layer.bias_momentums + \\\r\n\t\t\t\t\t\t\t\t\t(1 - self.beta_1) * layer.dbiases\t\t\r\n\t\t# Get corrected momentum\r\n\t\t# self.iteration is 0 at first pass and we need to start with 1 here\r\n\t\tweight_momentums_corrected = layer.weight_momentums / \\\r\n\t\t\t\t\t\t\t\t\t\t(1 - self.beta_1 ** (self.iterations + 1))\r\n\t\tbias_momentums_corrected = layer.bias_momentums / \\\r\n\t\t\t\t\t\t\t\t\t\t(1 - self.beta_1 **(self.iterations + 1))\r\n\t\t# Update cache with squared current gradients\r\n\t\tlayer.weight_cache = self.beta_2 * layer.weight_cache + \\\r\n\t\t\t\t\t\t\t (1 - self.beta_2) * layer.dweights ** 2\r\n\t\tlayer.bias_cache = self.beta_2 * layer.bias_cache + \\\r\n\t\t\t\t\t\t (1 - self.beta_2) * layer.dbiases ** 2\r\n\t\t# Get corrected cache\r\n\t\tweight_cache_corrected = layer.weight_cache / (1 - self.beta_2 ** (self.iterations + 1))\r\n\t\tbias_cache_corrected = layer.bias_cache / (1 - self.beta_2 ** (self.iterations + 1))\r\n\r\n\t\t# Vanilla SGD parameter update + normalization with square rooted cache\r\n\t\tlayer.weights += -self.current_learning_rate * weight_momentums_corrected / \\\r\n\t\t\t\t\t\t (np.sqrt(weight_cache_corrected) + self.epsilon)\r\n\t\tlayer.biases += -self.current_learning_rate * bias_momentums_corrected / \\\r\n\t\t\t\t\t\t(np.sqrt(bias_cache_corrected) + self.epsilon)\r\n\r\n\t# Call once after any parameter updates \r\n\tdef post_update_params(self):\r\n\t\tself.iterations += 1\r\n\r\n# Common loss class \r\nclass Loss:\r\n\r\n\t# Regularization loss calculation\r\n\tdef regularization_loss(self):\r\n\r\n\t\t# 0 by default\r\n\t\tregularization_loss = 0\r\n\r\n\t\t# Calculate regularization loss\r\n\t\t# iterate all trainable layers\r\n\t\tfor layer in self.trainable_layers:\r\n\r\n\t\t\t# L1 regularization - weithgts\r\n\t\t\t# calculate only when factor greater than 0\r\n\t\t\tif layer.weight_regularizer_l1 > 0:\r\n\t\t\t\tregularization_loss += layer.weight_regularizer_l1 * np.sum(np.abs(layer.weights))\r\n\r\n\t\t\t# L2 regularization - weights\r\n\t\t\tif layer.weight_regularizer_l2 > 0:\r\n\t\t\t\tregularization_loss += layer.weight_regularizer_l2 * np.sum(layer.weights * layer.weights)\r\n\r\n\t\t\t# L1 regularization - biases\r\n\t\t\t# calculate only when factor is greater than 0\r\n\t\t\tif layer.bias_regularizer_l1 > 0:\r\n\t\t\t\tregularization_loss += layer.bias_regularizer_l1 * np.sum(np.abs(layer.biases))\r\n\r\n\t\t\t# L2 regularization - biases\r\n\t\t\tif layer.bias_regularizer_l2 > 0:\r\n\t\t\t\tregularization_loss += layer.bias_regularizer_l2 * np.sum(layer.biases * layer.biases)\r\n\r\n\t\treturn regularization_loss\r\n\r\n\t# Set/remember trainable layers\r\n\tdef remember_trainable_layers(self, trainable_layers):\r\n\t\tself.trainable_layers = trainable_layers\r\n\r\n\t# Calculates the data and regularization losses\r\n\t# given model output and ground truth values\r\n\tdef calculate(self, output, y, *, include_regularization=False):\r\n\r\n\t\t# Calculate sample losses\r\n\t\tsample_losses = self.forward(output, y)\r\n\r\n\t\t# Calculate mean loss\r\n\t\tdata_loss = np.mean(sample_losses)\r\n\r\n\t\t# Add accumulated sum of losses and sample count\r\n\t\tself.accumulated_sum += np.sum(sample_losses)\r\n\t\tself.accumulated_count += len(sample_losses)\r\n\r\n\t\t# If just data loss - return it\r\n\t\tif not include_regularization:\r\n\t\t\treturn data_loss\r\n\r\n\t\t# Return the data and regularization losses\r\n\t\treturn data_loss, self.regularization_loss()\r\n\r\n\t# Calculate accumulated loss\r\n\tdef calculate_accumulated(self, *, include_regularization=False):\r\n\r\n\t\t# Calculate mean loss\r\n\t\tdata_loss = self.accumulated_sum / self.accumulated_count\r\n\r\n\t\t# If just data loss - return it\r\n\t\tif not include_regularization:\r\n\t\t\treturn data_loss\r\n\r\n\t\t# return the data and regularization losses\r\n\t\treturn data_loss, self.regularization_loss()\r\n\r\n\t# Reset variables for accumulated loss\r\n\tdef new_pass(self):\r\n\t\tself.accumulated_sum = 0\r\n\t\tself.accumulated_count = 0\r\n\r\n# Cross-entropy loss\r\nclass Loss_CategoricalCrossentropy(Loss):\r\n\r\n\t# Forward pass\r\n\tdef forward(self, y_pred, y_true):\r\n\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Loss_CategoricalCrossentropy'\r\n\r\n\t\t# Number of samples in a batch\r\n\t\tsamples = len(y_pred)\r\n\r\n\t\t# Clip data to prevent division by 0\r\n\t\t# Clip both sides to not drag mean towards any value\r\n\t\ty_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)\r\n\r\n\t\t# Probabilities for target values - only if categorical labels \r\n\t\tif len(y_true.shape) == 1:\r\n\t\t\tcorrect_confidences = y_pred_clipped[range(samples), y_true]\r\n\r\n\t\t# Mask values - only for one-hot encoded labels \r\n\t\telif len(y_true.shape) == 2:\r\n\t\t\tcorrect_confidences = np.sum(y_pred_clipped * y_true, axis=1)\r\n\r\n\t\t# Losses\r\n\t\tnegative_log_likelihoods = -np.log(correct_confidences)\r\n\t\treturn negative_log_likelihoods\r\n\r\n\t# Backwards pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\t\t# Number of labels in ervery sample\r\n\t\t# We'll use the first sample to count them\r\n\t\tlabels = len(dvalues[0])\r\n\r\n\t\t# If labels are sparse, turn them into one-hot vector\r\n\t\tif len(y_true.shape) == 1:\r\n\t\t\ty_true = np.eye(labels)[y_true]\r\n\r\n\t\t# Calculate gradient\r\n\t\tself.dinputs = -y_true / dvalues\r\n\t\t# Normalize gradient\r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Softmax classifier - combined Softmax activation and cross-entropy loss for faster backward step\r\nclass Activation_Softmax_Loss_CategoricalCrossentropy():\r\n\r\n\t# Backwards pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\r\n\t\t# If labels are one-hot encoded turn them into discrete values\r\n\t\tif len(y_true.shape) == 2:\r\n\t\t\ty_true = np.argmax(y_true, axis=1)\r\n\r\n\t\t# Copy so we can sagely modify \r\n\t\tself.dinputs = dvalues.copy()\r\n\t\t# Calculate gradient\r\n\t\tself.dinputs[range(samples), y_true] -= 1\r\n\t\t# Normalize gradient\r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Binary cross-entropy loss\r\nclass Loss_BinaryCrossentropy(Loss):\r\n\t\r\n\t# Forward pass\r\n\tdef forward(self, y_pred, y_true):\r\n\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Loss_BinaryCrossentropy'\r\n\r\n\t\t# Clip data to prevent dicision by 0\r\n\t\t# Clip both sides to not drag mean towards any value\r\n\t\ty_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)\r\n\r\n\t\t# Calculate samle-wise loss\r\n\t\tsample_losses = -(y_true * np.log(y_pred_clipped) + (1 - y_true) * np.log(1 - y_pred_clipped))\r\n\t\tsample_losses = np.mean(sample_losses, axis= -1)\r\n\r\n\t\t# Return losses\r\n\t\treturn sample_losses\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\t\t# Number of outputs in every sample\r\n\t\t# We'll use the first sample to count them\r\n\t\toutputs = len(dvalues[0])\r\n\r\n\t\t# Clip data to prevent division by 0\r\n\t\t# Clip both sides to not drag mean towards any value\r\n\t\tclipped_dvalues = np.clip(dvalues, 1e-7, 1 - 1e-7)\r\n\r\n\t\t# Calculate gradient\r\n\t\tself.dinputs = -(y_true / clipped_dvalues - (1 - y_true) / (1 - clipped_dvalues)) / outputs\r\n\t\t# Normalize gradient\r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Mean Squarred Error loss\r\nclass Loss_MeanSquaredError(Loss):\r\n\t\r\n\t# Forward pass\r\n\tdef forward(self, y_pred, y_true):\r\n\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Loss_MeanSquaredError'\r\n\r\n\t\t# Calculate loss\r\n\t\tsample_losses = np.mean((y_true - y_pred)**2, axis=-1)\r\n\r\n\t\t# Return losses\r\n\t\treturn sample_losses\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\t\t# Number of outputs in every sample\r\n\t\t# We'll use the first sample to count them\r\n\t\toutputs = len(dvalues[0])\r\n\r\n\t\t# Gradient on values\r\n\t\tself.dinputs = -2 * (y_true - dvalues) / outputs\r\n\t\t# Normalize gradient\r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Mean Absolute Error loss\r\nclass Loss_MeanAbsoluteError(Loss):\r\n\t\r\n\t# Forward pass\r\n\tdef forward(self, y_pred, y_true):\r\n\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Loss_MeanAbsoluteError'\r\n\r\n\t\t# Calculate loss\r\n\t\tsample_losses = np.mean(np.abs(y_true - y_pred), axis=-1)\r\n\r\n\t\t# Return losses\r\n\t\treturn sample_losses\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\t\t# Number of outputs in every sample\r\n\t\t# We'll use the first sample to count them\r\n\t\toutputs = len(dvalues[0])\r\n\r\n\t\t# Calculate gradient\r\n\t\tself.dinputs = np.sign(y_true - dvalues) / outputs\r\n\t\t# Normalize gradient \r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Common accuracy class\r\nclass Accuracy:\r\n\r\n\t# Calculate an accuracy\r\n\t# given predictions and ground truth values\r\n\tdef calculate(self, predictions, y):\r\n\r\n\t\t# Get comparison results\r\n\t\tcomparisons = self.compare(predictions, y)\r\n\r\n\t\t# Calculate an accuracy\r\n\t\taccuracy = np.mean(comparisons)\r\n\r\n\t\t# Add accumulated sum of matching values and sample count\r\n\t\tself.accumulated_sum += np.sum(comparisons)\r\n\t\tself.accumulated_count += len(comparisons)\r\n\r\n\t\t# Return accuracy\r\n\t\treturn accuracy\r\n\r\n\t# Calculate accumulated accuracy\r\n\tdef calculate_accumulated(self):\r\n\r\n\t\t# Calculate an accuracy\r\n\t\taccuracy = self.accumulated_sum / self.accumulated_count\r\n\r\n\t\t# Return the data and regularization losses\r\n\t\treturn accuracy\r\n\r\n\t# Reset variables for accumulated accuracy\r\n\tdef new_pass(self):\r\n\t\tself.accumulated_sum = 0\r\n\t\tself.accumulated_count = 0\r\n\r\n# Accuracy calculation for classification model\r\nclass Accuracy_Categorical(Accuracy):\r\n\r\n\tdef __init__(self, *, binary=False):\r\n\t\t# Binary model?\r\n\t\tself.binary = binary\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Accuracy_Categorical'\r\n\r\n\t# No initialization is needed\r\n\tdef init(self, y):\r\n\t\t# Needs to exist because it's called automatically\r\n\t\tpass\r\n\r\n\t# Compares predictions to the ground truth values\r\n\tdef compare(self, predictions, y):\r\n\t\tif not self.binary and len(y.shape) == 2:\r\n\t\t\ty = np.argmax(y, axis=1)\r\n\t\treturn predictions == y\r\n\r\n# Accuracy calculation for regression model\r\nclass Accuracy_Regression(Accuracy):\r\n\r\n\tdef __init__(self):\r\n\t\t# Create precision property\r\n\t\tself.precision = None\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Accuracy_Regression'\r\n\r\n\t# Calculates precision value based on passed-in ground truth values\r\n\tdef init(self, y, reinit=False):\r\n\t\tif self.precision is None or reinit:\r\n\t\t\tself.precision = np.std(y) / 250\r\n\r\n\t# Compares predictions to the ground truth values\r\n\tdef compare(self, predictions, y):\r\n\t\treturn np.absolute(predictions - y) < self.precision\r\n\r\n# Model class\r\nclass Model:\r\n\r\n\tdef __init__(self):\r\n\t\t# Create a list of network objects\r\n\t\tself.layers = []\r\n\t\t# Softmax calssifier's output object\r\n\t\tself.softmax_classifier_output = None\r\n\r\n\t# Add objects to the model\r\n\tdef add(self, layer):\r\n\t\tself.layers.append(layer)\r\n\r\n\t# Set loss, optimizer and accuracy\r\n\tdef set(self, *, loss=None, optimizer=None, accuracy=None): \r\n\t\t\r\n\t\tif loss is not None:\r\n\t\t\tself.loss = loss\r\n\r\n\t\tif optimizer is not None:\r\n\t\t\tself.optimizer = optimizer\r\n\t\t\r\n\t\tif accuracy is not None:\r\n\t\t\tself.accuracy = accuracy\r\n\r\n\t# Finalize the model\r\n\tdef finalize(self):\r\n\r\n\t\t# Create and set the input layer\r\n\t\tself.input_layer = Layer_Input()\r\n\r\n\t\t# Count all the objects\r\n\t\tlayer_count = len(self.layers)\r\n\r\n\t\t# Initialize a list containing trainable layers:\r\n\t\tself.trainable_layers = []\r\n\r\n\t\t# Iterate the objects\r\n\t\tfor i in range(layer_count):\r\n\r\n\t\t\t# If it's the first layer\r\n\t\t\t# the previous layer object is the input layer\r\n\t\t\tif i==0:\r\n\t\t\t\tself.layers[i].prev = self.input_layer\r\n\t\t\t\tself.layers[i].next = self.layers[i+1]\r\n\r\n\t\t\t# All layers except for the first and the last\r\n\t\t\telif i < layer_count -1:\r\n\t\t\t\tself.layers[i].prev = self.layers[i-1]\r\n\t\t\t\tself.layers[i].next = self.layers[i+1]\r\n\r\n\t\t\t# The last layer - the next object is the loss\r\n\t\t\t# Also let's save aside the reference to the last object whose output is the model's output\r\n\t\t\telse:\r\n\t\t\t\tself.layers[i].prev = self.layers[i-1]\r\n\t\t\t\tself.layers[i].next = self.loss\r\n\t\t\t\tself.output_layer_activation = self.layers[i]\r\n\r\n\t\t\t# If layer contains an attribute called \"weights\", it's a trainable alyer - \r\n\t\t\t# add it to the list of trainable layers\r\n\t\t\t# We don't need to check for biases - checking for weights is enough\r\n\t\t\tif hasattr(self.layers[i], 'weights'):\r\n\t\t\t\tself.trainable_layers.append(self.layers[i])\r\n\r\n\t\t# Update loss object with trainable layers\r\n\t\tif self.loss is not None:\r\n\t\t\tself.loss.remember_trainable_layers(self.trainable_layers)\r\n\r\n\t\t# If output activation is Softmax and loss function is Categorical Cross-Entropy\r\n\t\t# create an object of combined activation and loss function containing\r\n\t\t# faster gradient calculation\r\n\t\tif isinstance(self.layers[-1], Activation_Softmax) and \\\r\n\t\t isinstance(self.loss, Loss_CategoricalCrossentropy):\r\n\t\t\t# Create an object of combined activation and loss functions\r\n\t\t\tself.softmax_classifier_output = \\\r\n\t\t\t\tActivation_Softmax_Loss_CategoricalCrossentropy()\r\n\r\n\t# Train the model\r\n\tdef train(self, X, y, *, epochs=1, batch_size=None, print_every=1, validation_data=None):\r\n\r\n\t\t# Initialize accuracy object\r\n\t\tself.accuracy.init(y)\r\n\r\n\t\t# Default value if batch size is not set\r\n\t\ttrain_steps = 1\r\n\r\n\t\t# If there is validation data passed, set default number of steps for validation as well\r\n\t\tif validation_data is not None:\r\n\t\t\tvalidation_steps = 1\r\n\r\n\t\t\t# For better readability\r\n\t\t\tX_val, y_val = validation_data\r\n\r\n\t\t# Calculate number of steps\r\n\t\tif batch_size is not None:\r\n\t\t\ttrain_steps = len(X) // batch_size\r\n\t\t\t# Dividing rounds down. If there are some remaining data, but not a full batch,\r\n\t\t\t# this won't include it. Add 1 to include this not full batch\r\n\t\t\tif train_steps * batch_size < len(X):\r\n\t\t\t\ttrain_steps += 1\r\n\r\n\t\t\tif validation_data is not None:\r\n\t\t\t\tvalidation_steps = len(X_val) // batch_size\r\n\t\t\t\t# Dividing rounds down. If there are some remaining data, but not a full batch,\r\n\t\t\t\t# this won't include it. Add 1 to include this not full batch\r\n\t\t\t\tif validation_steps * batch_size < len(X_val):\r\n\t\t\t\t\tvalidation_steps += 1\r\n\r\n\t\t# Main training loop\r\n\t\tfor epoch in range(1, epochs+1):\r\n\r\n\t\t\t# Prit epoch number\r\n\t\t\tprint(f'epoch: {epoch}')\r\n\r\n\t\t\t# Reset accumulated values in loss and accuracy objects\r\n\t\t\tself.loss.new_pass()\r\n\t\t\tself.accuracy.new_pass()\r\n\r\n\t\t\t# Iterate over steps\r\n\t\t\tfor step in range(train_steps):\r\n\r\n\t\t\t\t# If batch size is not set - train using one step and full dataset\r\n\t\t\t\tif batch_size is None:\r\n\t\t\t\t\tbatch_X = X\r\n\t\t\t\t\tbatch_y = y \r\n\r\n\t\t\t\t# Otherwise slice a batch\r\n\t\t\t\telse:\r\n\t\t\t\t\t\tbatch_X = X[step*batch_size:(step+1)*batch_size]\r\n\t\t\t\t\t\tbatch_y = y[step*batch_size:(step+1)*batch_size]\r\n\r\n\t\t\t\t# Perform the forward pass\r\n\t\t\t\toutput = self.forward(batch_X, training=True)\r\n\r\n\t\t\t\t# Calculate loss\r\n\t\t\t\tdata_loss, regularization_loss = \\\r\n\t\t\t\t\tself.loss.calculate(output, batch_y, include_regularization=True)\r\n\t\t\t\tloss = data_loss + regularization_loss\r\n\r\n\t\t\t\t# Get predictions and calculate an accuracy\r\n\t\t\t\tpredictions = self.output_layer_activation.predictions(output)\r\n\t\t\t\taccuracy = self.accuracy.calculate(predictions, batch_y)\r\n\t\t\t\t\r\n\t\t\t\t# Perform a backward pass\r\n\t\t\t\tself.backward(output, batch_y)\r\n\r\n\t\t\t\t# Optimize (update parameters)\r\n\t\t\t\tself.optimizer.pre_update_params()\r\n\t\t\t\tfor layer in self.trainable_layers:\r\n\t\t\t\t\tself.optimizer.update_params(layer)\r\n\t\t\t\tself.optimizer.post_update_params()\r\n\r\n\t\t\t\t# Print a summary\r\n\t\t\t\tif not step % print_every or step == train_steps - 1:\r\n\t\t\t\t\tprint(f'step: {step}, ' +\r\n\t\t\t\t\t\t f'acc: {accuracy:.3f}, ' +\r\n\t\t\t\t\t\t f'loss: {loss:.3f} (' +\r\n\t\t\t\t\t\t f'data_loss: {data_loss:.3f}, ' +\r\n\t\t\t\t\t\t f'reg_loss: {regularization_loss:.3f}), ' +\r\n\t\t\t\t\t\t f'lr: {self.optimizer.current_learning_rate}')\r\n\r\n\t\t\t\t# Store stats for overall summary\r\n\t\t\t\tloss_list.append(loss)\r\n\t\t\t\taccuracy_list.append(accuracy)\r\n\t\t\t\tlr_list.append(self.optimizer.current_learning_rate)\r\n\r\n\t\t\t# Get and print epoch loss and accuracy\r\n\t\t\tepoch_data_loss, epoch_regularization_loss = \\\r\n\t\t\t\tself.loss.calculate_accumulated(include_regularization=True)\r\n\t\t\tepoch_loss = epoch_data_loss + epoch_regularization_loss\r\n\t\t\tepoch_accuracy = self.accuracy.calculate_accumulated()\r\n\r\n\t\t\tprint(f'training, ' +\r\n\t\t\t\t f'acc: {epoch_accuracy:.3f}, ' +\r\n\t\t\t\t f'loss: {epoch_loss:.3f} (' +\r\n\t\t\t\t f'data_loss: {epoch_data_loss:.3f}, ' +\r\n\t\t\t\t f'reg_loss: {epoch_regularization_loss:.3f}), ' +\r\n\t\t\t\t f'lr: {self.optimizer.current_learning_rate}')\r\n\r\n\r\n\t\t\t# If there is the validation data\r\n\t\t\tif validation_data is not None:\r\n\r\n\t\t\t\t# Evaluate the model\r\n\t\t\t\tself.evaluate(*validation_data, batch_size=batch_size)\r\n\r\n\t# Performs forward pass\r\n\tdef forward(self, X, training):\r\n\r\n\t\t# Call forward method on the input layer this will set the output property that\r\n\t\t# the first layer in \"prev\" object is expecting\r\n\t\tself.input_layer.forward(X, training)\r\n\r\n\t\t# Call forward method of every object in a chain \r\n\t\t# Pass output of the previous object as a parameter\r\n\t\tfor layer in self.layers:\r\n\t\t\tlayer.forward(layer.prev.output, training)\r\n\r\n\t\t# \"layer\" is now the last object from the list\r\n\t\t# return its output\r\n\t\treturn layer.output\r\n\r\n\t# Performs backward pass\r\n\tdef backward(self, output, y):\r\n\r\n\t\t# If softmax classifier\r\n\t\tif self.softmax_classifier_output is not None:\r\n\t\t\t# First call backward method on the combined activation/loss\r\n\t\t\t# this will set dinputs properly\r\n\t\t\tself.softmax_classifier_output.backward(output, y)\r\n\r\n\t\t\t# Since we'll not call backward method of the last layer\r\n\t\t\t# which is Softmax activation as we used combined activation/loss\r\n\t\t\t# object, let's set dinputs in this object \r\n\t\t\tself.layers[-1].dinputs = \\\r\n\t\t\t\tself.softmax_classifier_output.dinputs\r\n\r\n\t\t\t# Call backward method going through all the objects but last\r\n\t\t\t# in reversed order passing dinputs as a parameter\r\n\t\t\tfor layer in reversed(self.layers[:-1]):\r\n\t\t\t\tlayer.backward(layer.next.dinputs)\r\n\r\n\t\t\treturn\r\n\r\n\t\t# First call backward method on the loss this will set dinputs property\r\n\t\t# that the last layer will try to access shortly\r\n\t\tself.loss.backward(output, y)\r\n\r\n\t\t# Call backward method going through all the objects in reversed order\r\n\t\t# passing dipunpts as a parameter\r\n\t\tfor layer in reversed(self.layers):\r\n\t\t\tlayer.backward(layer.next.dinputs)\r\n\r\n\t# Evaluates the model using passed-in dataset\r\n\tdef evaluate(self, X_val, y_val, *, batch_size=None):\r\n\r\n\t\t# Default value if batch size is not being set\r\n\t\tvalidation_steps = 1\r\n\r\n\t\t# Calculate number of steps\r\n\t\tif batch_size is not None:\r\n\t\t\tvalidation_steps = len(X_val) // batch_size\r\n\t\t\t# Dividing rounds down. If there are some remaining data,\r\n\t\t\t# but not a full batch, this won't include it \r\n\t\t\t# Add '1' to include this not full batch\r\n\t\t\tif validation_steps * batch_size < len(X_val):\r\n\t\t\t\tvalidation_steps += 1\r\n\r\n\t\t# Reset accumulated values in loss and accuracy objects\r\n\t\tself.loss.new_pass()\r\n\t\tself.accuracy.new_pass()\r\n\r\n\t\t# Iterate over steps\r\n\t\tfor step in range(validation_steps):\r\n\r\n\t\t\t# If batch size is not set - train using one step and full dataset\r\n\t\t\tif batch_size is None:\r\n\t\t\t\tbatch_X = X_val\r\n\t\t\t\tbatch_y = y_val\r\n\r\n\t\t\t# Otherwise slice a batch\r\n\t\t\telse:\r\n\t\t\t\tbatch_X = X_val[step*batch_size:(step+1)*batch_size]\r\n\t\t\t\tbatch_y = y_val[step*batch_size:(step+1)*batch_size]\r\n\r\n\t\t\t# Perform the forward pass\r\n\t\t\toutput = self.forward(batch_X, training=False)\r\n\r\n\t\t\t# Calculate the los\r\n\t\t\tself.loss.calculate(output, batch_y)\r\n\r\n\t\t\t# Get predictions and calculate an accuracy\r\n\t\t\tpredictions = self.output_layer_activation.predictions(output)\r\n\t\t\tself.accuracy.calculate(predictions, batch_y)\r\n\r\n\t\t# Get and print validation loss and accuracy\r\n\t\tvalidation_loss = self.loss.calculate_accumulated()\r\n\t\tvalidation_accuracy = self.accuracy.calculate_accumulated()\r\n\r\n\t\t# Print a summary\r\n\t\tprint(f'validation, ' +\r\n\t\t\t f'acc: {validation_accuracy:.3f}, ' +\r\n\t\t\t f'loss: {validation_loss:.3f}')\r\n\r\n\t# Predicts onthe samples\r\n\tdef predict(self, X, *, batch_size=None):\r\n\r\n\t\t# Default value if batch size is not being set\r\n\t\tprediction_steps = 1\r\n\r\n\t\t# Calculate number of steps\r\n\t\tif batch_size is not None:\r\n\t\t\tprediction_steps = len(X) // batch_size\r\n\t\t\t# Dividing rounds down. If there are some remaining data,\r\n\t\t\t# but not a full batch, this won't include it \r\n\t\t\t# Add '1' to include this not full batch\r\n\t\t\tif prediction_steps * batch_size < len(X):\r\n\t\t\t\tprediction_steps += 1\r\n\r\n\t\t# Model outputs\r\n\t\toutput = []\r\n\r\n\t\t# Iterate over steps \r\n\t\tfor step in range(prediction_steps):\r\n\r\n\t\t\t# If batch size is not set - train ussing one step and full dataset\r\n\t\t\tif batch_size is None:\r\n\t\t\t\tbatch_X = X\r\n\r\n\t\t\t# Otherwise slice a batch \r\n\t\t\telse:\r\n\t\t\t\tbatch_X = X[step*batch_size:(step+1)*batch_size]\r\n\r\n\t\t\t# Perform the forward pass \r\n\t\t\tbatch_output = self.forward(batch_X, training=False)\r\n\r\n\t\t\t# Append batch prediciton to the list of predictions\r\n\t\t\toutput.append(batch_output)\r\n\r\n\t\t# Stack and return results\r\n\t\treturn np.vstack(output)\r\n\r\n\t# Retrieves and returns parameters of trainable layers\r\n\tdef get_parameters(self):\r\n\r\n\t\t# Create a list for parameters\r\n\t\tparameters = []\r\n\r\n\r\n\t\t# Iterate trainable layers and get their parameters\r\n\r\n\t\tfor layer in self.trainable_layers:\r\n\t\t\tparameters.append(layer.get_parameters())\r\n\r\n\t\t# Return a list\r\n\t\treturn parameters\r\n\r\n\t# Updates the model with new parameters\r\n\tdef set_parameters(self, parameters):\r\n\r\n\t\t# Iterate over the parameters and layers\r\n\t\t# and update each layers with each set of the parameters\r\n\t\tfor parameters_set, layer in zip(parameters, self.trainable_layers):\r\n\t\t\tlayer.set_parameters(*parameters_set)\r\n\r\n\t# Saves the parameters to a file\r\n\tdef save_parameters(self, path):\r\n\r\n\t\t# Open a file in the binary-write mode and save parameters to it\r\n\t\twith open(path, 'wb') as f:\r\n\t\t\tpickle.dump(self.get_parameters(), f)\r\n\r\n\t# Load the weights and updates a model instance with them\r\n\tdef load_parameters(self, path):\r\n\r\n\t\t# Open file in the binary-read mode, load weights and update trainable layers\r\n\t\twith open(path, 'rb') as f:\r\n\t\t\tself.set_parameters(pickle.load(f))\r\n\r\n\t# Saves the model\r\n\tdef save(self, path):\r\n\r\n\t\t# Make a deep copy of current model instance\r\n\t\tmodel = copy.deepcopy(self)\r\n\r\n\t\t# Reset accumulated values in loss and accuracy objects\r\n\t\tmodel.loss.new_pass()\r\n\t\tmodel.accuracy.new_pass()\r\n\r\n\t\t# Remove data from input layer and gradients from the loss object\r\n\t\tmodel.input_layer.__dict__.pop('output', None)\r\n\t\tmodel.loss.__dict__.pop('dinputs', None)\r\n\r\n\t\t# For each layer remove inputs, output and dinputs properties\r\n\t\tfor layer in model.layers:\r\n\t\t\tfor property in ['inputs', 'output', 'dinputs', 'dweights', 'dbiases']:\r\n\t\t\t\tlayer.__dict__.pop(property, None)\r\n\r\n\t\t# Open a file in the binary-write mode and save the model\r\n\t\twith open(path, 'wb') as f:\r\n\t\t\tpickle.dump(model, f)\r\n\r\n\t# Outputs stats about the model (Own)\r\n\tdef stats(self, sigma, path_name=None):\r\n\r\n\t\t# Print stats\r\n\t\t# Other layers\r\n\t\tlay = [self.loss, self.optimizer] # self.accuracy can be added\r\n\t\tle = len(self.layers) + len(lay)\r\n\t\tl = len(self.layers)\r\n\r\n\r\n\t\t# Set figure up\r\n\t\tfig = plt.figure(constrained_layout=True,figsize=(20,10))\r\n\t\tout_gs = fig.add_gridspec(2,7)\r\n\r\n\t\t# Get weights and biases\r\n\t\tweights = []\r\n\t\tbiases = []\r\n\t\tfor layer in self.trainable_layers:\r\n\t\t\tweights.append(layer.weights.tolist())\r\n\t\t\tbiases.append(layer.biases.tolist())\r\n\r\n\t\tf_ax1 = fig.add_subplot(out_gs[0,0])\r\n\t\tf_ax1.set_title('Model Struktur:')\r\n\t\tlayer_name = []\r\n\t\tfor i in range(le):\r\n\t\t\tif i < l:\r\n\t\t\t\tf_ax1.text(0.1,1-(i+1.5)*(1/(le+1)),f'Layer{i}: {self.layers[i].stat}')\r\n\t\t\t\tlayer_name.append(self.layers[i].stat)\r\n\t\t\telse:\r\n\t\t\t\tf_ax1.text(0.1,1-(i+1.5)*(1/(le+1)),f'Layer{i}: {lay[i-l].stat}')\r\n\t\t\t\tlayer_name.append(lay[i-l].stat)\r\n\r\n\t\tf_ax1.set_axis_off()\r\n\t\tf_ax2 = fig.add_subplot(out_gs[0,2:4])\r\n\t\tf_ax2.plot([np.average(loss_list[i:i+sigma]) for i in range(len(loss_list))])\r\n\t\tf_ax2.set_title(\"Loss\")\r\n\t\tf_ax2.set_xlabel(\"Steps\")\r\n\t\tf_ax3 = fig.add_subplot(out_gs[1,0:2])\r\n\t\tf_ax3.plot([np.average(lr_list[i:i+sigma]) for i in range(len(lr_list))])\r\n\t\tf_ax3.set_title(\"Learning_rate\")\r\n\t\tf_ax3.set_xlabel(\"Steps\")\r\n\t\tf_ax4 = fig.add_subplot(out_gs[1,2:4])\r\n\t\tf_ax4.plot([np.average(accuracy_list[i:i+sigma]) for i in range(len(accuracy_list))])\r\n\t\tf_ax4.set_title(\"Accuracy\")\r\n\t\tf_ax4.set_xlabel(\"Steps\")\r\n\r\n\t\tf_ax5 = fig.add_subplot(out_gs[0,5])\r\n\t\tf_ax5.set_title(\"Weights\")\r\n\t\tf_ax5.xaxis.set_visible(False)\r\n\t\tf_ax5.yaxis.set_visible(False)\r\n\t\tf_ax5.spines[\"left\"].set_color(\"white\")\r\n\t\tf_ax5.spines[\"right\"].set_color(\"white\")\r\n\t\tf_ax5.spines[\"bottom\"].set_color(\"white\")\r\n\t\tf_ax5.spines[\"top\"].set_color(\"white\")\r\n\r\n\t\tf_ax5_inner = out_gs[:2,5].subgridspec(1,len(weights))\r\n\t\taxs5 = f_ax5_inner.subplots(sharey=True)\r\n\t\tfor a, ax5 in np.ndenumerate(axs5):\t\t\t\t\r\n\t\t\tax5.pcolormesh(np.arange(0,len(weights[a[0]][0])+1,1),np.arange(0,len(weights[a[0]])+1,1),\r\n\t\t\t\t\t\t weights[a[0]], cmap=plt.get_cmap('seismic'))\r\n\t\t\tax5.set_title(f\"\\n Layer {a[0]+1}\")\r\n\t\t\tax5.set_xlabel(\"Neuron\")\r\n\r\n\t\tf_ax6 = fig.add_subplot(out_gs[0,6])\r\n\t\tf_ax6.set_title(\"Biases\")\r\n\t\tf_ax6.xaxis.set_visible(False)\r\n\t\tf_ax6.yaxis.set_visible(False)\r\n\t\tf_ax6.spines[\"left\"].set_color(\"white\")\r\n\t\tf_ax6.spines[\"right\"].set_color(\"white\")\r\n\t\tf_ax6.spines[\"bottom\"].set_color(\"white\")\r\n\t\tf_ax6.spines[\"top\"].set_color(\"white\")\r\n\r\n\t\tf_ax6_inner = out_gs[:2,6].subgridspec(1,len(biases))\r\n\t\taxs6 = f_ax6_inner.subplots(sharey=True)\r\n\t\tfor b, ax6 in np.ndenumerate(axs6):\r\n\t\t\t#norm = cl.Normalize(vmin=biases[b[0]].min(),vmax=biases[b[0]].max())\t\t\t\r\n\t\t\t# pcm = ax6.pcolormesh(np.arange(0,2,1),np.arange(0,len(biases[b[0]][0])+1,1),\r\n\t\t\t# \t\t\t np.array(biases[b[0]]).T, cmap=plt.get_cmap('seismic'), norm=norm)\r\n\t\t\t# fig.colorbar(pcm, ax=ax6, location=\"right\")\r\n\t\t\tax6.pcolormesh(np.arange(0,2,1),np.arange(0,len(biases[b[0]][0])+1,1),\r\n\t\t\t \t\t\t np.array(biases[b[0]]).T, cmap=plt.get_cmap('seismic'))\r\n\t\t\tax6.set_title(f\"\\n Layer {b[0]+1}\")\r\n\t\t\tif b[0]==0:\r\n\t\t\t\tax6.set_ylabel(\"Neuron\")\r\n\t\t\r\n\t\t# Save in a folder\r\n\t\tos.mkdir(path_name)\r\n\t\tfull_path_name = str(path_name) + '/'\r\n\r\n\t\t# Save the figure\r\n\t\tpath_name_png = str(full_path_name) + 'figure.PNG'\r\n\t\tplt.savefig(path_name_png)\r\n\t\tplt.show()\r\n\t\t\r\n\t\tpath_name_model = str(full_path_name) + 'Network.model'\r\n\t\tself.save(path_name_model)\r\n\r\n\t\tstatistics = np.array([layer_name, loss_list, lr_list, accuracy_list, weights, biases])\r\n\t\twith open(str(full_path_name)+'weibia', 'wb') as f:\r\n\t\t\tpickle.dump(statistics, f)\r\n\t\t\r\n\r\n\t# Loads and returns a model\r\n\t@staticmethod\r\n\tdef load(path):\r\n\r\n\t\t# Open file in the binary-read mode, load a model\r\n\t\twith open(path, 'rb') as f:\r\n\t\t\tmodel = pickle.load(f)\r\n\r\n\t\t# Return a model\r\n\t\treturn model\r\n\r\n\r\n# Loads a MNIST dataset\r\ndef load_mnist_dataset(dataset, path):\r\n\r\n\t# Scan all the directories and create a list of labels\r\n\tlabels = os.listdir(os.path.join(path, dataset))\r\n\r\n\t# Create lists for the samples and labels\r\n\tX = []\r\n\ty = []\r\n\r\n\t# For each label folder \r\n\tfor label in labels:\r\n\t\t# And for each image in given folder\r\n\t\tfor file in os.listdir(os.path.join(path, dataset, label)):\r\n\t\t\t# Read the image \r\n\t\t\timage = cv2.imread(os.path.join(path, dataset, label, file), cv2.IMREAD_GRAYSCALE)\r\n\r\n\t\t\tprint(label)\r\n\r\n\t\t\t# And append it and a label to the lists\r\n\t\t\tX.append(image)\r\n\t\t\ty.append(label)\r\n\r\n\t# Convert the data to proper numpy arrays and return \r\n\treturn np.array(X), np.array(y).astype('uint8') # say that y is int and not float\r\n\r\n# MNIST dataset (train + test)\r\ndef create_data_mnist(path):\r\n\r\n\t# Load both sets seperately\r\n\tX, y = load_mnist_dataset('train', path)\r\n\tX_test, y_test = load_mnist_dataset('test', path)\r\n\r\n\t# And return all the data\r\n\treturn X, y, X_test, y_test\r\n" ]
[ [ "numpy.sum", "numpy.ones_like", "numpy.log", "numpy.vstack", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.abs", "numpy.empty_like", "matplotlib.pyplot.get_cmap", "numpy.absolute", "numpy.average", "numpy.mean", "numpy.sqrt", "numpy.eye", "numpy.zeros", "numpy.argmax", "numpy.arange", "numpy.max", "numpy.maximum", "numpy.std", "numpy.random.binomial", "numpy.zeros_like", "numpy.sign", "numpy.random.randn", "numpy.exp", "numpy.ndenumerate", "numpy.clip", "matplotlib.pyplot.show", "numpy.diagflat", "numpy.array", "numpy.dot" ] ]
amitkumarj441/hafnian
[ "3d0b79c77180db7e415b96826707f8049d690208" ]
[ "thewalrus/tests/test_hermite_multidimensional.py" ]
[ "# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the batch hafnian wrapper function\"\"\"\n# pylint: disable=no-self-use,redefined-outer-name\nfrom itertools import product\n\nimport numpy as np\n\nfrom scipy.special import eval_hermitenorm, eval_hermite\n\nfrom thewalrus import hermite_multidimensional, hafnian_batched, hafnian_repeated\n\n\ndef test_hermite_multidimensional_renorm():\n \"\"\" This tests the renormalized batchhafnian wrapper function to compute photon number statistics for a fixed gaussian state.\n\t\"\"\"\n B = np.sqrt(0.5) * np.array([[0, 1], [1, 0]]) + 0 * 1j\n res = 10\n expected = np.diag(0.5 ** (np.arange(0, res) / 2))\n array = hermite_multidimensional(-B, res, renorm=True)\n\n assert np.allclose(array, expected)\n\n\ndef test_reduction_to_physicists_polys():\n \"\"\"Tests that the multidimensional hermite polynomials reduce to the regular physicists' hermite polynomials in the appropriate limit\"\"\"\n x = np.arange(-1, 1, 0.1)\n init = 1\n n_max = 5\n A = np.ones([init, init], dtype=complex)\n vals = np.array(\n [hermite_multidimensional(2 * A, n_max, y=np.array([x0], dtype=complex)) for x0 in x]\n ).T\n expected = np.array([eval_hermite(i, x) for i in range(len(vals))])\n assert np.allclose(vals, expected)\n\n\ndef test_reduction_to_probabilist_polys():\n \"\"\"Tests that the multidimensional hermite polynomials reduce to the regular probabilist' hermite polynomials in the appropriate limit\"\"\"\n x = np.arange(-1, 1, 0.1)\n init = 1\n n_max = 5\n A = np.ones([init, init], dtype=complex)\n vals = np.array(\n [hermite_multidimensional(A, n_max, y=np.array([x0], dtype=complex)) for x0 in x]\n ).T\n expected = np.array([eval_hermitenorm(i, x) for i in range(len(vals))])\n assert np.allclose(vals, expected)\n\n\ndef test_hafnian_batched():\n \"\"\"Test hafnian_batched against hafnian_repeated for a random symmetric matrix\"\"\"\n n_modes = 4\n A = np.random.rand(n_modes, n_modes) + 1j * np.random.rand(n_modes, n_modes)\n A += A.T\n n_photon = 5\n v1 = np.array([hafnian_repeated(A, q) for q in product(np.arange(n_photon), repeat=n_modes)])\n assert np.allclose(hafnian_batched(A, n_photon, make_tensor=False), v1)\n\n\ndef test_hafnian_batched_loops():\n \"\"\"Test hafnian_batched with loops against hafnian_repeated with loops for a random symmetric matrix\n and a random vector of loops\n \"\"\"\n n_modes = 4\n A = np.random.rand(n_modes, n_modes) + 1j * np.random.rand(n_modes, n_modes)\n A += A.T\n mu = np.random.rand(n_modes) + 1j * np.random.rand(n_modes)\n n_photon = 5\n v1 = np.array(\n [\n hafnian_repeated(A, q, mu=mu, loop=True)\n for q in product(np.arange(n_photon), repeat=n_modes)\n ]\n )\n expected = hafnian_batched(A, n_photon, mu=mu, make_tensor=False)\n\n assert np.allclose(expected, v1)\n\n\ndef test_hafnian_batched_loops_no_edges():\n \"\"\"Test hafnian_batched with loops against hafnian_repeated with loops for a random symmetric matrix\n and a random vector of loops\n \"\"\"\n n_modes = 4\n A = np.zeros([n_modes, n_modes], dtype=complex)\n mu = np.random.rand(n_modes) + 1j * np.random.rand(n_modes)\n n_photon = 5\n v1 = np.array(\n [\n hafnian_repeated(A, q, mu=mu, loop=True)\n for q in product(np.arange(n_photon), repeat=n_modes)\n ]\n )\n expected = hafnian_batched(A, n_photon, mu=mu, make_tensor=False)\n\n assert np.allclose(expected, v1)\n\n\ndef test_hafnian_batched_zero_loops_no_edges():\n \"\"\"Test hafnian_batched with loops against hafnian_repeated with loops for a the zero matrix\n and a loops\n \"\"\"\n n_modes = 4\n A = np.zeros([n_modes, n_modes], dtype=complex)\n n_photon = 5\n v1 = np.array(\n [hafnian_repeated(A, q, loop=True) for q in product(np.arange(n_photon), repeat=n_modes)]\n )\n expected = hafnian_batched(A, n_photon, make_tensor=False)\n\n assert np.allclose(expected, v1)\n" ]
[ [ "numpy.allclose", "numpy.ones", "numpy.zeros", "scipy.special.eval_hermitenorm", "numpy.arange", "numpy.random.rand", "numpy.sqrt", "numpy.array", "scipy.special.eval_hermite" ] ]
AdityaNG/cone-detector-tf
[ "f2eede83caf64753c7b70b3ce017a26d8903469c" ]
[ "cone_detector_lib.py" ]
[ "from __future__ import division\n\nimport argparse\nimport logging.config\nimport os\nimport time\n\nimport cv2\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nfrom utils import cv_utils\nfrom utils import operations as ops\nfrom utils import tf_utils\n\nlogging.config.fileConfig('logging.ini')\n\nFROZEN_GRAPH_PATH = 'models/ssd_mobilenet_v1/frozen_inference_graph.pb'\n\nSCORE_THRESHOLD = 0.5\nNON_MAX_SUPPRESSION_THRESHOLD = 0.5\n\n\nclass ConeDetector:\n\n def __init__(self) -> None:\n #detection_graph = tf_utils.load_model(FROZEN_GRAPH_PATH)\n\n #self.sess = tf.Session(graph=detection_graph)\n pass\n\n def ispath(path):\n if not os.path.exists(path):\n raise argparse.ArgumentTypeError('No such file or directory: ' + path)\n else:\n return path\n\n def find_cones(self, img, crop_size=None):\n # Read TensorFlow graph\n detection_graph = tf_utils.load_model(FROZEN_GRAPH_PATH)\n\n with tf.Session(graph=detection_graph) as sess:\n tic = time.time()\n\n boxes = []\n\n if crop_size:\n crop_height = crop_width = crop_size\n crop_step_vertical = crop_step_horizontal = crop_size - 20\n crops, crops_coordinates = ops.extract_crops(\n img, crop_height, crop_width,\n crop_step_vertical, crop_step_horizontal)\n\n detection_dict = tf_utils.run_inference_for_batch(crops, sess)\n\n for box_absolute, boxes_relative in zip(\n crops_coordinates, detection_dict['detection_boxes']):\n boxes.extend(ops.get_absolute_boxes(\n box_absolute,\n boxes_relative[np.any(boxes_relative, axis=1)]))\n\n boxes = np.vstack(boxes)\n boxes = ops.non_max_suppression_fast(\n boxes, NON_MAX_SUPPRESSION_THRESHOLD)\n else:\n detection_dict = tf_utils.run_inference_for_batch(\n np.expand_dims(img, axis=0), sess)\n boxes = detection_dict['detection_boxes']\n boxes = boxes[np.any(boxes, axis=2)]\n\n boxes_scores = detection_dict['detection_scores']\n boxes_scores = boxes_scores[np.nonzero(boxes_scores)]\n\n for box, score in zip(boxes, boxes_scores):\n if score > SCORE_THRESHOLD:\n ymin, xmin, ymax, xmax = box\n color_detected_rgb = cv_utils.predominant_rgb_color(\n img, ymin, xmin, ymax, xmax)\n text = '{:.2f}'.format(score)\n cv_utils.add_rectangle_with_text(\n img, ymin, xmin, ymax, xmax,\n color_detected_rgb, text)\n\n toc = time.time()\n processing_time_ms = (toc - tic) * 1000\n logging.debug('Detected {} objects in {:.2f} ms'.format(\n len(boxes), processing_time_ms))\n\n return img\n" ]
[ [ "numpy.vstack", "tensorflow.compat.v1.Session", "numpy.any", "numpy.expand_dims", "tensorflow.compat.v1.disable_v2_behavior", "numpy.nonzero" ] ]
MosyMosy/VDT
[ "e07f28d0cd6367ed30740c147ed2f270ead8fb63" ]
[ "models/resnet10_BITrans.py" ]
[ "import torch\n# from torch.autograd import Variable\nimport torch.nn as nn\nimport math\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.nn.utils.weight_norm import WeightNorm\nfrom Batchtransfer_EMA import BatchInstanceTransNorm as BIT2d\n\ndef init_layer(L):\n # Initialization using fan-in\n if isinstance(L, nn.Conv2d):\n n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels\n L.weight.data.normal_(0,math.sqrt(2.0/float(n)))\n elif isinstance(L, BIT2d):\n L.weight.data.fill_(1)\n L.bias.data.fill_(0)\n\nclass Flatten(nn.Module):\n def __init__(self):\n super(Flatten, self).__init__()\n \n def forward(self, x): \n return x.view(x.size(0), -1)\n\n# Simple ResNet Block\nclass SimpleBlock(nn.Module):\n maml = False #Default\n def __init__(self, indim, outdim, half_res):\n super(SimpleBlock, self).__init__()\n self.indim = indim\n self.outdim = outdim\n\n self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)\n self.BN1 = BIT2d(outdim)\n \n self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)\n self.BN2 = BIT2d(outdim)\n\n self.relu1 = nn.ReLU(inplace=True)\n self.relu2 = nn.ReLU(inplace=True)\n\n self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]\n\n self.half_res = half_res\n\n # if the input number of channels is not equal to the output, then need a 1x1 convolution\n if indim!=outdim:\n\n self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)\n self.BNshortcut = BIT2d(outdim)\n\n self.parametrized_layers.append(self.shortcut)\n self.parametrized_layers.append(self.BNshortcut)\n self.shortcut_type = '1x1'\n else:\n self.shortcut_type = 'identity'\n\n for layer in self.parametrized_layers:\n init_layer(layer)\n\n def forward(self, x):\n out = self.C1(x)\n out = self.BN1(out)\n out = self.relu1(out)\n\n out = self.C2(out)\n out = self.BN2(out)\n short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))\n out = out + short_out\n out = self.relu2(out)\n return out\n\n# Bottleneck block\nclass BottleneckBlock(nn.Module):\n def __init__(self, indim, outdim, half_res):\n super(BottleneckBlock, self).__init__()\n bottleneckdim = int(outdim/4)\n self.indim = indim\n self.outdim = outdim\n\n self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)\n self.BN1 = BIT2d(bottleneckdim)\n self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)\n self.BN2 = BIT2d(bottleneckdim)\n self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)\n self.BN3 = BIT2d(outdim)\n\n self.relu = nn.ReLU()\n self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2, self.C3, self.BN3]\n self.half_res = half_res\n\n\n # if the input number of channels is not equal to the output, then need a 1x1 convolution\n if indim!=outdim:\n\n self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if half_res else 1, bias=False)\n\n self.parametrized_layers.append(self.shortcut)\n self.shortcut_type = '1x1'\n else:\n self.shortcut_type = 'identity'\n\n for layer in self.parametrized_layers:\n init_layer(layer)\n\n\n def forward(self, x):\n\n short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)\n out = self.C1(x)\n out = self.BN1(out)\n out = self.relu(out)\n out = self.C2(out)\n out = self.BN2(out)\n out = self.relu(out)\n out = self.C3(out)\n out = self.BN3(out)\n out = out + short_out\n\n out = self.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = False):\n # list_of_num_layers specifies number of layers in each stage\n # list_of_out_dims specifies number of output channel for each stage\n super(ResNet,self).__init__()\n assert len(list_of_num_layers)==4, 'Can have only four stages'\n\n conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n bn1 = BIT2d(64)\n\n relu = nn.ReLU()\n pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n init_layer(conv1)\n init_layer(bn1)\n\n trunk = [conv1, bn1, relu, pool1]\n\n indim = 64\n for i in range(4):\n\n for j in range(list_of_num_layers[i]):\n half_res = (i>=1) and (j==0)\n B = block(indim, list_of_out_dims[i], half_res)\n trunk.append(B)\n indim = list_of_out_dims[i]\n\n if flatten:\n # avgpool = nn.AvgPool2d(7)\n avgpool = nn.AdaptiveAvgPool2d((1, 1))\n trunk.append(avgpool)\n trunk.append(Flatten())\n self.final_feat_dim = indim\n else:\n self.final_feat_dim = [ indim, 7, 7]\n\n self.trunk = nn.Sequential(*trunk)\n\n def forward(self,x):\n out = self.trunk(x)\n return out\n\ndef ResNet10_BITrans( flatten = True):\n return ResNet(SimpleBlock, [1,1,1,1],[64,128,256,512], flatten)\n\n\n\n\n\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU" ] ]
AlkalineDevelopment/56openpilot
[ "fb9a557d77bc8409ff14261e4a05fcd2da709836" ]
[ "selfdrive/controls/lib/lateral_planner.py" ]
[ "import math\nimport numpy as np\nfrom common.realtime import sec_since_boot, DT_MDL\nfrom common.numpy_fast import interp\nfrom selfdrive.swaglog import cloudlog\nfrom selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import LateralMpc\nfrom selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS\nfrom selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE\nfrom selfdrive.config import Conversions as CV\nimport cereal.messaging as messaging\nfrom cereal import log\n\nLaneChangeState = log.LateralPlan.LaneChangeState\nLaneChangeDirection = log.LateralPlan.LaneChangeDirection\n\nLANE_CHANGE_SPEED_MIN = 25 * CV.MPH_TO_MS\nLANE_CHANGE_TIME_MAX = 10.\n\nDESIRES = {\n LaneChangeDirection.none: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,\n },\n LaneChangeDirection.left: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,\n },\n LaneChangeDirection.right: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,\n },\n}\n\n\nclass LateralPlanner:\n def __init__(self, CP, use_lanelines=True, wide_camera=False):\n self.use_lanelines = use_lanelines\n self.LP = LanePlanner(wide_camera)\n\n self.last_cloudlog_t = 0\n self.steer_rate_cost = CP.steerRateCost\n\n self.solution_invalid_cnt = 0\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n self.lane_change_timer = 0.0\n self.lane_change_ll_prob = 1.0\n self.keep_pulse_timer = 0.0\n self.prev_one_blinker = False\n self.desire = log.LateralPlan.Desire.none\n\n self.path_xyz = np.zeros((TRAJECTORY_SIZE, 3))\n self.path_xyz_stds = np.ones((TRAJECTORY_SIZE, 3))\n self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))\n self.t_idxs = np.arange(TRAJECTORY_SIZE)\n self.y_pts = np.zeros(TRAJECTORY_SIZE)\n\n self.lat_mpc = LateralMpc()\n self.reset_mpc(np.zeros(6))\n\n def reset_mpc(self, x0=np.zeros(6)):\n self.x0 = x0\n self.lat_mpc.reset(x0=self.x0)\n\n def update(self, sm):\n v_ego = sm['carState'].vEgo\n active = sm['controlsState'].active\n measured_curvature = sm['controlsState'].curvature\n\n md = sm['modelV2']\n self.LP.parse_model(sm['modelV2'])\n if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:\n self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])\n self.t_idxs = np.array(md.position.t)\n self.plan_yaw = list(md.orientation.z)\n if len(md.position.xStd) == TRAJECTORY_SIZE:\n self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])\n\n # Lane change logic\n one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker\n below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN\n\n if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX):\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n else:\n # LaneChangeState.off\n if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:\n self.lane_change_state = LaneChangeState.preLaneChange\n self.lane_change_ll_prob = 1.0\n\n # LaneChangeState.preLaneChange\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n # Set lane change direction\n if sm['carState'].leftBlinker:\n self.lane_change_direction = LaneChangeDirection.left\n elif sm['carState'].rightBlinker:\n self.lane_change_direction = LaneChangeDirection.right\n else: # If there are no blinkers we will go back to LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n\n torque_applied = sm['carState'].steeringPressed and \\\n ((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))\n\n blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))\n\n if not one_blinker or below_lane_change_speed:\n self.lane_change_state = LaneChangeState.off\n elif torque_applied and not blindspot_detected:\n self.lane_change_state = LaneChangeState.laneChangeStarting\n\n # LaneChangeState.laneChangeStarting\n elif self.lane_change_state == LaneChangeState.laneChangeStarting:\n # fade out over .5s\n self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2 * DT_MDL, 0.0)\n\n # 98% certainty\n lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob\n if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:\n self.lane_change_state = LaneChangeState.laneChangeFinishing\n\n # LaneChangeState.laneChangeFinishing\n elif self.lane_change_state == LaneChangeState.laneChangeFinishing:\n # fade in laneline over 1s\n self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)\n if self.lane_change_ll_prob > 0.99:\n self.lane_change_direction = LaneChangeDirection.none\n if one_blinker:\n self.lane_change_state = LaneChangeState.preLaneChange\n else:\n self.lane_change_state = LaneChangeState.off\n\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:\n self.lane_change_timer = 0.0\n else:\n self.lane_change_timer += DT_MDL\n\n self.prev_one_blinker = one_blinker\n\n self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]\n\n # Send keep pulse once per second during LaneChangeStart.preLaneChange\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.laneChangeStarting]:\n self.keep_pulse_timer = 0.0\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n self.keep_pulse_timer += DT_MDL\n if self.keep_pulse_timer > 1.0:\n self.keep_pulse_timer = 0.0\n elif self.desire in [log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight]:\n self.desire = log.LateralPlan.Desire.none\n\n # Turn off lanes during lane change\n if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:\n self.LP.lll_prob *= self.lane_change_ll_prob\n self.LP.rll_prob *= self.lane_change_ll_prob\n if self.use_lanelines:\n d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)\n self.lat_mpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, self.steer_rate_cost)\n else:\n d_path_xyz = self.path_xyz\n path_cost = np.clip(abs(self.path_xyz[0, 1] / self.path_xyz_stds[0, 1]), 0.5, 1.5) * MPC_COST_LAT.PATH\n # Heading cost is useful at low speed, otherwise end of plan can be off-heading\n heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])\n self.lat_mpc.set_weights(path_cost, heading_cost, self.steer_rate_cost)\n y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:, 1])\n heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)\n self.y_pts = y_pts\n\n assert len(y_pts) == LAT_MPC_N + 1\n assert len(heading_pts) == LAT_MPC_N + 1\n self.x0[4] = v_ego\n self.lat_mpc.run(self.x0,\n v_ego,\n CAR_ROTATION_RADIUS,\n y_pts,\n heading_pts)\n # init state for next\n self.x0[3] = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.lat_mpc.x_sol[:, 3])\n\n # Check for infeasible MPC solution\n mpc_nans = any(math.isnan(x) for x in self.lat_mpc.x_sol[:, 3])\n t = sec_since_boot()\n if mpc_nans or self.lat_mpc.solution_status != 0:\n self.reset_mpc()\n self.x0[3] = measured_curvature\n if t > self.last_cloudlog_t + 5.0:\n self.last_cloudlog_t = t\n cloudlog.warning(\"Lateral mpc - nan: True\")\n\n if self.lat_mpc.cost > 20000. or mpc_nans:\n self.solution_invalid_cnt += 1\n else:\n self.solution_invalid_cnt = 0\n\n def publish(self, sm, pm):\n plan_solution_valid = self.solution_invalid_cnt < 2\n plan_send = messaging.new_message('lateralPlan')\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2'])\n plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)\n plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]\n plan_send.lateralPlan.psis = [float(x) for x in self.lat_mpc.x_sol[0:CONTROL_N, 2]]\n plan_send.lateralPlan.curvatures = [float(x) for x in self.lat_mpc.x_sol[0:CONTROL_N, 3]]\n plan_send.lateralPlan.curvatureRates = [float(x) for x in self.lat_mpc.u_sol[0:CONTROL_N - 1]] + [0.0]\n plan_send.lateralPlan.lProb = float(self.LP.lll_prob)\n plan_send.lateralPlan.rProb = float(self.LP.rll_prob)\n plan_send.lateralPlan.dProb = float(self.LP.d_prob)\n\n plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)\n\n plan_send.lateralPlan.desire = self.desire\n plan_send.lateralPlan.useLaneLines = self.use_lanelines\n plan_send.lateralPlan.laneChangeState = self.lane_change_state\n plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction\n\n pm.send('lateralPlan', plan_send)\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.column_stack", "numpy.arange", "numpy.array", "numpy.linalg.norm" ] ]
n778509775/JDLBER
[ "20f209348f3aa10b85c61efd7253c94cd64a6a8a" ]
[ "network.py" ]
[ "#!/usr/bin/env python\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef init_weights(m):\n \"\"\" initialize weights of fully connected layer\n \"\"\"\n if type(m) == nn.Linear:\n nn.init.orthogonal_(m.weight, gain=1)\n m.bias.data.zero_()\n elif type(m) == nn.BatchNorm1d:\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\nclass Encoder(nn.Module):\n def __init__(self, num_inputs):\n super(Encoder, self).__init__()\n self.encoder = nn.Sequential(\n nn.BatchNorm1d(num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(num_inputs, num_inputs),\n\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(num_inputs, num_inputs))\n self.encoder.apply(init_weights)\n def forward(self, x):\n x = self.encoder(x)\n return x\n\n\n# Decoder_a\nclass Decoder_a(nn.Module):\n def __init__(self, num_inputs):\n super(Decoder_a, self).__init__()\n self.decoder = nn.Sequential(\n nn.Linear(num_inputs, num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Linear(num_inputs, num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(num_inputs, num_inputs))\n self.decoder.apply(init_weights)\n def forward(self, x):\n x = self.decoder(x)\n return x\n\n# Decoder_b\nclass Decoder_b(nn.Module):\n def __init__(self, num_inputs):\n super(Decoder_b, self).__init__()\n self.decoder = nn.Sequential(\n nn.Linear(num_inputs, num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Linear(num_inputs, num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(num_inputs, num_inputs))\n self.decoder.apply(init_weights)\n def forward(self, x):\n x = self.decoder(x)\n return x\n\n#classifier combine with autoencoder\nclass Discriminator(nn.Module):\n def __init__(self, num_inputs):\n super(Discriminator, self).__init__()\n\n self.model = nn.Sequential(\n nn.Linear(num_inputs, 64),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(64, 16),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(16, 1),\n nn.Sigmoid(),\n )\n\n def forward(self, z):\n validity = self.model(z)\n return validity\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.BatchNorm1d", "torch.nn.Sigmoid", "torch.nn.init.orthogonal_", "torch.nn.LeakyReLU" ] ]
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
[ "09c17e42c2e173a6ab10339f08fbc1505db8ea56" ]
[ "lab_session/calculate_pi.py" ]
[ "import random\nimport matplotlib.pyplot as plt\n\npi_vals = []\n\npi = 0\nn = 100\nm = 10**6\nfor i in range(m):\n\tfor j in range(n):\n\t\t[x, y] = [random.random(), random.random()]\n\t\tif x**2 + y**2 <= 1.0:\n\t\t\tpi += 1\n\tpi = (pi/n)*4\n\n\tpi_vals.append(pi)\n\nitern = [i for i in range(m)]\nplt.plot(itern, pi_vals, '.')\nplt.show()" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show" ] ]
bmu/pandas
[ "549b72f07ffdeb6d54b2865c90d95a256e4231ad" ]
[ "pandas/core/panel.py" ]
[ "\"\"\"\nContains data structures designed for manipulating panel (3-dimensional) data\n\"\"\"\n# pylint: disable=E1103,W0231,W0212,W0621\nfrom __future__ import division\nfrom pandas.compat import (map, zip, range, lrange, lmap, u, OrderedDict,\n OrderedDefaultdict)\nfrom pandas import compat\nimport sys\nimport warnings\nimport numpy as np\nfrom pandas.core.common import (PandasError, _try_sort, _default_index,\n _infer_dtype_from_scalar, notnull, is_list_like)\nfrom pandas.core.categorical import Categorical\nfrom pandas.core.index import (Index, MultiIndex, _ensure_index,\n _get_combined_index)\nfrom pandas.core.indexing import maybe_droplevels\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays,\n create_block_manager_from_blocks)\nfrom pandas.core.series import Series\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.tools.util import cartesian_product\nfrom pandas import compat\nfrom pandas.util.decorators import (deprecate, Appender, Substitution,\n deprecate_kwarg)\nimport pandas.core.common as com\nimport pandas.core.ops as ops\nimport pandas.core.nanops as nanops\nimport pandas.computation.expressions as expressions\nfrom pandas import lib\n\n_shared_doc_kwargs = dict(\n axes='items, major_axis, minor_axis',\n klass=\"Panel\",\n axes_single_arg=\"{0,1,2,'items','major_axis','minor_axis'}\")\n_shared_doc_kwargs['args_transpose'] = (\"three positional arguments: each one\"\n \"of\\n %s\" %\n _shared_doc_kwargs['axes_single_arg'])\n\n\ndef _ensure_like_indices(time, panels):\n \"\"\"\n Makes sure that time and panels are conformable\n \"\"\"\n n_time = len(time)\n n_panel = len(panels)\n u_panels = np.unique(panels) # this sorts!\n u_time = np.unique(time)\n if len(u_time) == n_time:\n time = np.tile(u_time, len(u_panels))\n if len(u_panels) == n_panel:\n panels = np.repeat(u_panels, len(u_time))\n return time, panels\n\n\ndef panel_index(time, panels, names=['time', 'panel']):\n \"\"\"\n Returns a multi-index suitable for a panel-like DataFrame\n\n Parameters\n ----------\n time : array-like\n Time index, does not have to repeat\n panels : array-like\n Panel index, does not have to repeat\n names : list, optional\n List containing the names of the indices\n\n Returns\n -------\n multi_index : MultiIndex\n Time index is the first level, the panels are the second level.\n\n Examples\n --------\n >>> years = range(1960,1963)\n >>> panels = ['A', 'B', 'C']\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),\n (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),\n (1962, 'C')], dtype=object)\n\n or\n\n >>> import numpy as np\n >>> years = np.repeat(range(1960,1963), 3)\n >>> panels = np.tile(['A', 'B', 'C'], 3)\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),\n (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),\n (1962, 'C')], dtype=object)\n \"\"\"\n time, panels = _ensure_like_indices(time, panels)\n time_factor = Categorical.from_array(time)\n panel_factor = Categorical.from_array(panels)\n\n labels = [time_factor.codes, panel_factor.codes]\n levels = [time_factor.categories, panel_factor.categories]\n return MultiIndex(levels, labels, sortorder=None, names=names,\n verify_integrity=False)\n\n\nclass Panel(NDFrame):\n\n \"\"\"\n Represents wide format panel data, stored as 3-dimensional array\n\n Parameters\n ----------\n data : ndarray (items x major x minor), or dict of DataFrames\n items : Index or array-like\n axis=0\n major_axis : Index or array-like\n axis=1\n minor_axis : Index or array-like\n axis=2\n dtype : dtype, default None\n Data type to force, otherwise infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n \"\"\"\n\n @property\n def _constructor(self):\n return type(self)\n\n _constructor_sliced = DataFrame\n\n def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,\n copy=False, dtype=None):\n self._init_data(data=data, items=items, major_axis=major_axis,\n minor_axis=minor_axis, copy=copy, dtype=dtype)\n\n def _init_data(self, data, copy, dtype, **kwargs):\n \"\"\"\n Generate ND initialization; axes are passed\n as required objects to __init__\n \"\"\"\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]\n\n if kwargs:\n raise TypeError('_init_data() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n axes = None\n if isinstance(data, BlockManager):\n if any(x is not None for x in passed_axes):\n axes = [x if x is not None else y\n for x, y in zip(passed_axes, data.axes)]\n mgr = data\n elif isinstance(data, dict):\n mgr = self._init_dict(data, passed_axes, dtype=dtype)\n copy = False\n dtype = None\n elif isinstance(data, (np.ndarray, list)):\n mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)\n copy = False\n dtype = None\n else: # pragma: no cover\n raise PandasError('Panel constructor not properly called!')\n\n NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)\n\n def _init_dict(self, data, axes, dtype=None):\n haxis = axes.pop(self._info_axis_number)\n\n # prefilter if haxis passed\n if haxis is not None:\n haxis = _ensure_index(haxis)\n data = OrderedDict((k, v) for k, v\n in compat.iteritems(data) if k in haxis)\n else:\n ks = list(data.keys())\n if not isinstance(data, OrderedDict):\n ks = _try_sort(ks)\n haxis = Index(ks)\n\n for k, v in compat.iteritems(data):\n if isinstance(v, dict):\n data[k] = self._constructor_sliced(v)\n\n # extract axis for remaining axes & create the slicemap\n raxes = [self._extract_axis(self, data, axis=i)\n if a is None else a for i, a in enumerate(axes)]\n raxes_sm = self._extract_axes_for_slice(self, raxes)\n\n # shallow copy\n arrays = []\n haxis_shape = [len(a) for a in raxes]\n for h in haxis:\n v = values = data.get(h)\n if v is None:\n values = np.empty(haxis_shape, dtype=dtype)\n values.fill(np.nan)\n elif isinstance(v, self._constructor_sliced):\n d = raxes_sm.copy()\n d['copy'] = False\n v = v.reindex(**d)\n if dtype is not None:\n v = v.astype(dtype)\n values = v.values\n arrays.append(values)\n\n return self._init_arrays(arrays, haxis, [haxis] + raxes)\n\n def _init_arrays(self, arrays, arr_names, axes):\n return create_block_manager_from_arrays(arrays, arr_names, axes)\n\n @classmethod\n def from_dict(cls, data, intersect=False, orient='items', dtype=None):\n \"\"\"\n Construct Panel from dict of DataFrame objects\n\n Parameters\n ----------\n data : dict\n {field : DataFrame}\n intersect : boolean\n Intersect indexes of input DataFrames\n orient : {'items', 'minor'}, default 'items'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the items of the result panel, pass 'items'\n (default). Otherwise if the columns of the values of the passed\n DataFrame objects should be the items (which in the case of\n mixed-dtype data you should do), instead pass 'minor'\n\n\n Returns\n -------\n Panel\n \"\"\"\n orient = orient.lower()\n if orient == 'minor':\n new_data = OrderedDefaultdict(dict)\n for col, df in compat.iteritems(data):\n for item, s in compat.iteritems(df):\n new_data[item][col] = s\n data = new_data\n elif orient != 'items': # pragma: no cover\n raise ValueError('Orientation must be one of {items, minor}.')\n\n d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)\n ks = list(d['data'].keys())\n if not isinstance(d['data'], OrderedDict):\n ks = list(sorted(ks))\n d[cls._info_axis_name] = Index(ks)\n return cls(**d)\n\n def __getitem__(self, key):\n if isinstance(self._info_axis, MultiIndex):\n return self._getitem_multilevel(key)\n if not (is_list_like(key) or isinstance(key, slice)):\n return super(Panel, self).__getitem__(key)\n return self.ix[key]\n\n def _getitem_multilevel(self, key):\n info = self._info_axis\n loc = info.get_loc(key)\n if isinstance(loc, (slice, np.ndarray)):\n new_index = info[loc]\n result_index = maybe_droplevels(new_index, key)\n slices = [loc] + [slice(None) for x in range(\n self._AXIS_LEN - 1)]\n new_values = self.values[slices]\n\n d = self._construct_axes_dict(self._AXIS_ORDERS[1:])\n d[self._info_axis_name] = result_index\n result = self._constructor(new_values, **d)\n return result\n else:\n return self._get_item_cache(key)\n\n def _init_matrix(self, data, axes, dtype=None, copy=False):\n values = self._prep_ndarray(self, data, copy=copy)\n\n if dtype is not None:\n try:\n values = values.astype(dtype)\n except Exception:\n raise ValueError('failed to cast to %s' % dtype)\n\n shape = values.shape\n fixed_axes = []\n for i, ax in enumerate(axes):\n if ax is None:\n ax = _default_index(shape[i])\n else:\n ax = _ensure_index(ax)\n fixed_axes.append(ax)\n\n return create_block_manager_from_blocks([values], fixed_axes)\n\n #----------------------------------------------------------------------\n # Comparison methods\n\n def _compare_constructor(self, other, func):\n if not self._indexed_same(other):\n raise Exception('Can only compare identically-labeled '\n 'same type objects')\n\n new_data = {}\n for col in self._info_axis:\n new_data[col] = func(self[col], other[col])\n\n d = self._construct_axes_dict(copy=False)\n return self._constructor(data=new_data, **d)\n\n #----------------------------------------------------------------------\n # Magic methods\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular Panel\n\n Invoked by unicode(df) in py2 only.\n Yields a Unicode String in both py2/py3.\n \"\"\"\n\n class_name = str(self.__class__)\n\n shape = self.shape\n dims = u('Dimensions: %s') % ' x '.join(\n [\"%d (%s)\" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])\n\n def axis_pretty(a):\n v = getattr(self, a)\n if len(v) > 0:\n return u('%s axis: %s to %s') % (a.capitalize(),\n com.pprint_thing(v[0]),\n com.pprint_thing(v[-1]))\n else:\n return u('%s axis: None') % a.capitalize()\n\n output = '\\n'.join(\n [class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])\n return output\n\n def _get_plane_axes_index(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes indexes\n \"\"\"\n axis_name = self._get_axis_name(axis)\n\n if axis_name == 'major_axis':\n index = 'minor_axis'\n columns = 'items'\n if axis_name == 'minor_axis':\n index = 'major_axis'\n columns = 'items'\n elif axis_name == 'items':\n index = 'major_axis'\n columns = 'minor_axis'\n\n return index, columns\n\n def _get_plane_axes(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes\n \"\"\"\n return [ self._get_axis(axi) for axi in self._get_plane_axes_index(axis) ]\n\n fromDict = from_dict\n\n def to_sparse(self, fill_value=None, kind='block'):\n \"\"\"\n Convert to SparsePanel\n\n Parameters\n ----------\n fill_value : float, default NaN\n kind : {'block', 'integer'}\n\n Returns\n -------\n y : SparseDataFrame\n \"\"\"\n from pandas.core.sparse import SparsePanel\n frames = dict(compat.iteritems(self))\n return SparsePanel(frames, items=self.items,\n major_axis=self.major_axis,\n minor_axis=self.minor_axis,\n default_kind=kind,\n default_fill_value=fill_value)\n\n def to_excel(self, path, na_rep='', engine=None, **kwargs):\n \"\"\"\n Write each DataFrame in Panel to a separate excel sheet\n\n Parameters\n ----------\n path : string or ExcelWriter object\n File path or existing ExcelWriter\n na_rep : string, default ''\n Missing data representation\n engine : string, default None\n write engine to use - you can also set this via the options\n ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n\n Other Parameters\n ----------------\n float_format : string, default None\n Format string for floating point numbers\n cols : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : upper left cell row to dump data frame\n startcol : upper left cell column to dump data frame\n\n Notes\n -----\n Keyword arguments (and na_rep) are passed to the ``to_excel`` method\n for each DataFrame written.\n \"\"\"\n from pandas.io.excel import ExcelWriter\n\n if isinstance(path, compat.string_types):\n writer = ExcelWriter(path, engine=engine)\n else:\n writer = path\n kwargs['na_rep'] = na_rep\n\n for item, df in compat.iteritems(self):\n name = str(item)\n df.to_excel(writer, name, **kwargs)\n writer.save()\n\n def as_matrix(self):\n self._consolidate_inplace()\n return self._data.as_matrix()\n\n #----------------------------------------------------------------------\n # Getting and setting elements\n\n def get_value(self, *args, **kwargs):\n \"\"\"\n Quickly retrieve single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n nargs = len(args)\n nreq = self._AXIS_LEN\n\n # require an arg for each axis\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis, you gave'\n ' {0} args, but {1} are required'.format(nargs,\n nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('get_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n return lower.get_value(*args[1:], takeable=takeable)\n\n def set_value(self, *args, **kwargs):\n \"\"\"\n Quickly set single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n value : scalar\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n panel : Panel\n If label combo is contained, will be reference to calling Panel,\n otherwise a new object\n \"\"\"\n # require an arg for each axis and the value\n nargs = len(args)\n nreq = self._AXIS_LEN + 1\n\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis plus the '\n 'value provided, you gave {0} args, but {1} are '\n 'required'.format(nargs, nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('set_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n try:\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n lower.set_value(*args[1:], takeable=takeable)\n return self\n except KeyError:\n axes = self._expand_axes(args)\n d = self._construct_axes_dict_from(self, axes, copy=False)\n result = self.reindex(**d)\n args = list(args)\n likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])\n made_bigger = not np.array_equal(\n axes[0], self._info_axis)\n # how to make this logic simpler?\n if made_bigger:\n com._possibly_cast_item(result, args[0], likely_dtype)\n\n return result.set_value(*args)\n\n def _box_item_values(self, key, values):\n if self.ndim == values.ndim:\n result = self._constructor(values)\n\n # a dup selection will yield a full ndim\n if result._get_axis(0).is_unique:\n result = result[key]\n\n return result\n\n d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])\n return self._constructor_sliced(values, **d)\n\n def __setitem__(self, key, value):\n shape = tuple(self.shape)\n if isinstance(value, self._constructor_sliced):\n value = value.reindex(\n **self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))\n mat = value.values\n elif isinstance(value, np.ndarray):\n if value.shape != shape[1:]:\n raise ValueError(\n 'shape of value must be {0}, shape of given object was '\n '{1}'.format(shape[1:], tuple(map(int, value.shape))))\n mat = np.asarray(value)\n elif np.isscalar(value):\n dtype, value = _infer_dtype_from_scalar(value)\n mat = np.empty(shape[1:], dtype=dtype)\n mat.fill(value)\n else:\n raise TypeError('Cannot set item of type: %s' % str(type(value)))\n\n mat = mat.reshape(tuple([1]) + shape[1:])\n NDFrame._set_item(self, key, mat)\n\n def _unpickle_panel_compat(self, state): # pragma: no cover\n \"Unpickle the panel\"\n _unpickle = com._unpickle_array\n vals, items, major, minor = state\n\n items = _unpickle(items)\n major = _unpickle(major)\n minor = _unpickle(minor)\n values = _unpickle(vals)\n wp = Panel(values, items, major, minor)\n self._data = wp._data\n\n def conform(self, frame, axis='items'):\n \"\"\"\n Conform input DataFrame to align with chosen axis pair.\n\n Parameters\n ----------\n frame : DataFrame\n axis : {'items', 'major', 'minor'}\n\n Axis the input corresponds to. E.g., if axis='major', then\n the frame's columns would be items, and the index would be\n values of the minor axis\n\n Returns\n -------\n DataFrame\n \"\"\"\n axes = self._get_plane_axes(axis)\n return frame.reindex(**self._extract_axes_for_slice(self, axes))\n\n def head(self, n=5):\n raise NotImplementedError\n\n def tail(self, n=5):\n raise NotImplementedError\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\" don't allow a multi reindex on Panel or above ndim \"\"\"\n return False\n\n def dropna(self, axis=0, how='any', inplace=False):\n \"\"\"\n Drop 2D from panel, holding passed axis constant\n\n Parameters\n ----------\n axis : int, default 0\n Axis to hold constant. E.g. axis=1 will drop major_axis entries\n having a certain amount of NA data\n how : {'all', 'any'}, default 'any'\n 'any': one or more values are NA in the DataFrame along the\n axis. For 'all' they all must be.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n dropped : Panel\n \"\"\"\n axis = self._get_axis_number(axis)\n\n values = self.values\n mask = com.notnull(values)\n\n for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):\n mask = mask.sum(ax)\n\n per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])\n\n if how == 'all':\n cond = mask > 0\n else:\n cond = mask == per_slice\n\n new_ax = self._get_axis(axis)[cond]\n result = self.reindex_axis(new_ax, axis=axis)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _combine(self, other, func, axis=0):\n if isinstance(other, Panel):\n return self._combine_panel(other, func)\n elif isinstance(other, DataFrame):\n return self._combine_frame(other, func, axis=axis)\n elif np.isscalar(other):\n return self._combine_const(other, func)\n\n def _combine_const(self, other, func):\n new_values = func(self.values, other)\n d = self._construct_axes_dict()\n return self._constructor(new_values, **d)\n\n def _combine_frame(self, other, func, axis=0):\n index, columns = self._get_plane_axes(axis)\n axis = self._get_axis_number(axis)\n\n other = other.reindex(index=index, columns=columns)\n\n if axis == 0:\n new_values = func(self.values, other.values)\n elif axis == 1:\n new_values = func(self.values.swapaxes(0, 1), other.values.T)\n new_values = new_values.swapaxes(0, 1)\n elif axis == 2:\n new_values = func(self.values.swapaxes(0, 2), other.values)\n new_values = new_values.swapaxes(0, 2)\n\n return self._constructor(new_values, self.items, self.major_axis,\n self.minor_axis)\n\n def _combine_panel(self, other, func):\n items = self.items.union(other.items)\n major = self.major_axis.union(other.major_axis)\n minor = self.minor_axis.union(other.minor_axis)\n\n # could check that everything's the same size, but forget it\n this = self.reindex(items=items, major=major, minor=minor)\n other = other.reindex(items=items, major=major, minor=minor)\n\n result_values = func(this.values, other.values)\n\n return self._constructor(result_values, items, major, minor)\n\n def major_xs(self, key, copy=None):\n \"\"\"\n Return slice of panel along major axis\n\n Parameters\n ----------\n key : object\n Major axis label\n copy : boolean [deprecated]\n Whether to make a copy of the data\n\n Returns\n -------\n y : DataFrame\n index -> minor axis, columns -> items\n\n Notes\n -----\n major_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or levels\n it is a superset of major_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n if copy is not None:\n warnings.warn(\"copy keyword is deprecated, \"\n \"default is to return a copy or a view if possible\")\n\n return self.xs(key, axis=self._AXIS_LEN - 2)\n\n def minor_xs(self, key, copy=None):\n \"\"\"\n Return slice of panel along minor axis\n\n Parameters\n ----------\n key : object\n Minor axis label\n copy : boolean [deprecated]\n Whether to make a copy of the data\n\n Returns\n -------\n y : DataFrame\n index -> major axis, columns -> items\n\n Notes\n -----\n minor_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or levels\n it is a superset of minor_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n if copy is not None:\n warnings.warn(\"copy keyword is deprecated, \"\n \"default is to return a copy or a view if possible\")\n\n return self.xs(key, axis=self._AXIS_LEN - 1)\n\n def xs(self, key, axis=1, copy=None):\n \"\"\"\n Return slice of panel along selected axis\n\n Parameters\n ----------\n key : object\n Label\n axis : {'items', 'major', 'minor}, default 1/'major'\n copy : boolean [deprecated]\n Whether to make a copy of the data\n\n Returns\n -------\n y : ndim(self)-1\n\n Notes\n -----\n xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or levels\n it is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n if copy is not None:\n warnings.warn(\"copy keyword is deprecated, \"\n \"default is to return a copy or a view if possible\")\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n return self[key]\n\n self._consolidate_inplace()\n axis_number = self._get_axis_number(axis)\n new_data = self._data.xs(key, axis=axis_number, copy=False)\n result = self._construct_return_type(new_data)\n copy = new_data.is_mixed_type\n result._set_is_copy(self, copy=copy)\n return result\n\n _xs = xs\n\n def _ixs(self, i, axis=0):\n \"\"\"\n i : int, slice, or sequence of integers\n axis : int\n \"\"\"\n\n ax = self._get_axis(axis)\n key = ax[i]\n\n # xs cannot handle a non-scalar key, so just reindex here\n # if we have a multi-index and a single tuple, then its a reduction (GH 7516)\n if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):\n if is_list_like(key):\n indexer = {self._get_axis_name(axis): key}\n return self.reindex(**indexer)\n\n # a reduction\n if axis == 0:\n values = self._data.iget(i)\n return self._box_item_values(key, values)\n\n # xs by position\n self._consolidate_inplace()\n new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)\n return self._construct_return_type(new_data)\n\n def groupby(self, function, axis='major'):\n \"\"\"\n Group data on given axis, returning GroupBy object\n\n Parameters\n ----------\n function : callable\n Mapping function for chosen access\n axis : {'major', 'minor', 'items'}, default 'major'\n\n Returns\n -------\n grouped : PanelGroupBy\n \"\"\"\n from pandas.core.groupby import PanelGroupBy\n axis = self._get_axis_number(axis)\n return PanelGroupBy(self, function, axis=axis)\n\n def to_frame(self, filter_observations=True):\n \"\"\"\n Transform wide format into long (stacked) format as DataFrame whose\n columns are the Panel's items and whose index is a MultiIndex formed\n of the Panel's major and minor axes.\n\n Parameters\n ----------\n filter_observations : boolean, default True\n Drop (major, minor) pairs without a complete set of observations\n across all the items\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n _, N, K = self.shape\n\n if filter_observations:\n # shaped like the return DataFrame\n mask = com.notnull(self.values).all(axis=0)\n # size = mask.sum()\n selector = mask.ravel()\n else:\n # size = N * K\n selector = slice(None, None)\n\n data = {}\n for item in self.items:\n data[item] = self[item].values.ravel()[selector]\n\n def construct_multi_parts(idx, n_repeat, n_shuffle=1):\n axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)\n labels = [x[selector] for x in axis_idx.labels]\n levels = axis_idx.levels\n names = axis_idx.names\n return labels, levels, names\n\n def construct_index_parts(idx, major=True):\n levels = [idx]\n if major:\n labels = [np.arange(N).repeat(K)[selector]]\n names = idx.name or 'major'\n else:\n labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]\n labels = [labels.ravel()[selector]]\n names = idx.name or 'minor'\n names = [names]\n return labels, levels, names\n\n if isinstance(self.major_axis, MultiIndex):\n major_labels, major_levels, major_names = construct_multi_parts(\n self.major_axis, n_repeat=K)\n else:\n major_labels, major_levels, major_names = construct_index_parts(\n self.major_axis)\n\n if isinstance(self.minor_axis, MultiIndex):\n minor_labels, minor_levels, minor_names = construct_multi_parts(\n self.minor_axis, n_repeat=N, n_shuffle=K)\n else:\n minor_labels, minor_levels, minor_names = construct_index_parts(\n self.minor_axis, major=False)\n\n levels = major_levels + minor_levels\n labels = major_labels + minor_labels\n names = major_names + minor_names\n\n index = MultiIndex(levels=levels, labels=labels,\n names=names, verify_integrity=False)\n\n return DataFrame(data, index=index, columns=self.items)\n\n to_long = deprecate('to_long', to_frame)\n toLong = deprecate('toLong', to_frame)\n\n def apply(self, func, axis='major', **kwargs):\n \"\"\"\n Applies function along input axis of the Panel\n\n Parameters\n ----------\n func : function\n Function to apply to each combination of 'other' axes\n e.g. if axis = 'items', then the combination of major_axis/minor_axis\n will be passed a Series\n axis : {'major', 'minor', 'items'}\n Additional keyword arguments will be passed as keywords to the function\n\n Examples\n --------\n >>> p.apply(numpy.sqrt) # returns a Panel\n >>> p.apply(lambda x: x.sum(), axis=0) # equiv to p.sum(0)\n >>> p.apply(lambda x: x.sum(), axis=1) # equiv to p.sum(1)\n >>> p.apply(lambda x: x.sum(), axis=2) # equiv to p.sum(2)\n\n Returns\n -------\n result : Pandas Object\n \"\"\"\n\n if kwargs and not isinstance(func, np.ufunc):\n f = lambda x: func(x, **kwargs)\n else:\n f = func\n\n # 2d-slabs\n if isinstance(axis, (tuple,list)) and len(axis) == 2:\n return self._apply_2d(f, axis=axis)\n\n axis = self._get_axis_number(axis)\n\n # try ufunc like\n if isinstance(f, np.ufunc):\n try:\n result = np.apply_along_axis(func, axis, self.values)\n return self._wrap_result(result, axis=axis)\n except (AttributeError):\n pass\n\n # 1d\n return self._apply_1d(f, axis=axis)\n\n def _apply_1d(self, func, axis):\n\n axis_name = self._get_axis_name(axis)\n ax = self._get_axis(axis)\n ndim = self.ndim\n values = self.values\n\n # iter thru the axes\n slice_axis = self._get_axis(axis)\n slice_indexer = [0]*(ndim-1)\n indexer = np.zeros(ndim, 'O')\n indlist = list(range(ndim))\n indlist.remove(axis)\n indexer[axis] = slice(None, None)\n indexer.put(indlist, slice_indexer)\n planes = [ self._get_axis(axi) for axi in indlist ]\n shape = np.array(self.shape).take(indlist)\n\n # all the iteration points\n points = cartesian_product(planes)\n\n results = []\n for i in range(np.prod(shape)):\n\n # construct the object\n pts = tuple([ p[i] for p in points ])\n indexer.put(indlist, slice_indexer)\n\n obj = Series(values[tuple(indexer)],index=slice_axis,name=pts)\n result = func(obj)\n\n results.append(result)\n\n # increment the indexer\n slice_indexer[-1] += 1\n n = -1\n while (slice_indexer[n] >= shape[n]) and (n > (1-ndim)):\n slice_indexer[n-1] += 1\n slice_indexer[n] = 0\n n -= 1\n\n # empty object\n if not len(results):\n return self._constructor(**self._construct_axes_dict())\n\n # same ndim as current\n if isinstance(results[0],Series):\n arr = np.vstack([ r.values for r in results ])\n arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))\n tranp = np.array([axis]+indlist).argsort()\n arr = arr.transpose(tuple(list(tranp)))\n return self._constructor(arr,**self._construct_axes_dict())\n\n # ndim-1 shape\n results = np.array(results).reshape(shape)\n if results.ndim == 2 and axis_name != self._info_axis_name:\n results = results.T\n planes = planes[::-1]\n return self._construct_return_type(results,planes)\n\n def _apply_2d(self, func, axis):\n \"\"\" handle 2-d slices, equiv to iterating over the other axis \"\"\"\n\n ndim = self.ndim\n axis = [ self._get_axis_number(a) for a in axis ]\n\n # construct slabs, in 2-d this is a DataFrame result\n indexer_axis = list(range(ndim))\n for a in axis:\n indexer_axis.remove(a)\n indexer_axis = indexer_axis[0]\n\n slicer = [ slice(None,None) ] * ndim\n ax = self._get_axis(indexer_axis)\n\n results = []\n for i, e in enumerate(ax):\n\n slicer[indexer_axis] = i\n sliced = self.iloc[tuple(slicer)]\n\n obj = func(sliced)\n results.append((e,obj))\n\n return self._construct_return_type(dict(results))\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n if numeric_only:\n raise NotImplementedError(\n 'Panel.{0} does not implement numeric_only.'.format(name))\n\n axis_name = self._get_axis_name(axis)\n axis_number = self._get_axis_number(axis_name)\n f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)\n\n result = f(self.values)\n\n axes = self._get_plane_axes(axis_name)\n if result.ndim == 2 and axis_name != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n def _construct_return_type(self, result, axes=None):\n \"\"\" return the type for the ndim of the result \"\"\"\n ndim = getattr(result,'ndim',None)\n\n # need to assume they are the same\n if ndim is None:\n if isinstance(result,dict):\n ndim = getattr(list(compat.itervalues(result))[0],'ndim',None)\n\n # a saclar result\n if ndim is None:\n ndim = 0\n\n # have a dict, so top-level is +1 dim\n else:\n ndim += 1\n\n # scalar\n if ndim == 0:\n return Series(result)\n\n # same as self\n elif self.ndim == ndim:\n \"\"\" return the construction dictionary for these axes \"\"\"\n if axes is None:\n return self._constructor(result)\n return self._constructor(result, **self._construct_axes_dict())\n\n # sliced\n elif self.ndim == ndim + 1:\n if axes is None:\n return self._constructor_sliced(result)\n return self._constructor_sliced(\n result, **self._extract_axes_for_slice(self, axes))\n\n raise PandasError('invalid _construct_return_type [self->%s] '\n '[result->%s]' % (self, result))\n\n def _wrap_result(self, result, axis):\n axis = self._get_axis_name(axis)\n axes = self._get_plane_axes(axis)\n if result.ndim == 2 and axis != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n @Appender(_shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None\n else kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None\n else kwargs.pop('minor', None))\n return super(Panel, self).reindex(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None\n else kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None\n else kwargs.pop('minor', None))\n return super(Panel, self).rename(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=np.nan):\n return super(Panel, self).reindex_axis(labels=labels, axis=axis,\n method=method, level=level,\n copy=copy, limit=limit,\n fill_value=fill_value)\n\n @Appender(_shared_docs['transpose'] % _shared_doc_kwargs)\n def transpose(self, *args, **kwargs):\n return super(Panel, self).transpose(*args, **kwargs)\n\n def count(self, axis='major'):\n \"\"\"\n Return number of observations over requested axis.\n\n Parameters\n ----------\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n count : DataFrame\n \"\"\"\n i = self._get_axis_number(axis)\n\n values = self.values\n mask = np.isfinite(values)\n result = mask.sum(axis=i,dtype='int64')\n\n return self._wrap_result(result, axis)\n\n @deprecate_kwarg(old_arg_name='lags', new_arg_name='periods')\n def shift(self, periods=1, freq=None, axis='major'):\n \"\"\"\n Shift major or minor axis by specified number of leads/lags. Drops\n periods right now compared with DataFrame.shift\n\n Parameters\n ----------\n lags : int\n axis : {'major', 'minor'}\n\n Returns\n -------\n shifted : Panel\n \"\"\"\n if freq:\n return self.tshift(periods, freq, axis=axis)\n\n if axis == 'items':\n raise ValueError('Invalid axis')\n\n return super(Panel, self).slice_shift(periods, axis=axis)\n\n def tshift(self, periods=1, freq=None, axis='major', **kwds):\n return super(Panel, self).tshift(periods, freq, axis, **kwds)\n\n def join(self, other, how='left', lsuffix='', rsuffix=''):\n \"\"\"\n Join items with other Panel either on major and minor axes column\n\n Parameters\n ----------\n other : Panel or list of Panels\n Index should be similar to one of the columns in this one\n how : {'left', 'right', 'outer', 'inner'}\n How to handle indexes of the two objects. Default: 'left'\n for joining on index, None otherwise\n * left: use calling frame's index\n * right: use input frame's index\n * outer: form union of indexes\n * inner: use intersection of indexes\n lsuffix : string\n Suffix to use from left frame's overlapping columns\n rsuffix : string\n Suffix to use from right frame's overlapping columns\n\n Returns\n -------\n joined : Panel\n \"\"\"\n from pandas.tools.merge import concat\n\n if isinstance(other, Panel):\n join_major, join_minor = self._get_join_index(other, how)\n this = self.reindex(major=join_major, minor=join_minor)\n other = other.reindex(major=join_major, minor=join_minor)\n merged_data = this._data.merge(other._data, lsuffix, rsuffix)\n return self._constructor(merged_data)\n else:\n if lsuffix or rsuffix:\n raise ValueError('Suffixes not supported when passing '\n 'multiple panels')\n\n if how == 'left':\n how = 'outer'\n join_axes = [self.major_axis, self.minor_axis]\n elif how == 'right':\n raise ValueError('Right join not supported with multiple '\n 'panels')\n else:\n join_axes = None\n\n return concat([self] + list(other), axis=0, join=how,\n join_axes=join_axes, verify_integrity=True)\n\n def update(self, other, join='left', overwrite=True, filter_func=None,\n raise_conflict=False):\n \"\"\"\n Modify Panel in place using non-NA values from passed\n Panel, or object coercible to Panel. Aligns on items\n\n Parameters\n ----------\n other : Panel, or object coercible to Panel\n join : How to join individual DataFrames\n {'left', 'right', 'outer', 'inner'}, default 'left'\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling panel\n filter_func : callable(1d-array) -> 1d-array<boolean>, default None\n Can choose to replace values other than NA. Return True for values\n that should be updated\n raise_conflict : bool\n If True, will raise an error if a DataFrame and other both\n contain data in the same place.\n \"\"\"\n\n if not isinstance(other, self._constructor):\n other = self._constructor(other)\n\n axis_name = self._info_axis_name\n axis_values = self._info_axis\n other = other.reindex(**{axis_name: axis_values})\n\n for frame in axis_values:\n self[frame].update(other[frame], join, overwrite, filter_func,\n raise_conflict)\n\n def _get_join_index(self, other, how):\n if how == 'left':\n join_major, join_minor = self.major_axis, self.minor_axis\n elif how == 'right':\n join_major, join_minor = other.major_axis, other.minor_axis\n elif how == 'inner':\n join_major = self.major_axis.intersection(other.major_axis)\n join_minor = self.minor_axis.intersection(other.minor_axis)\n elif how == 'outer':\n join_major = self.major_axis.union(other.major_axis)\n join_minor = self.minor_axis.union(other.minor_axis)\n return join_major, join_minor\n\n # miscellaneous data creation\n @staticmethod\n def _extract_axes(self, data, axes, **kwargs):\n \"\"\" return a list of the axis indicies \"\"\"\n return [self._extract_axis(self, data, axis=i, **kwargs) for i, a\n in enumerate(axes)]\n\n @staticmethod\n def _extract_axes_for_slice(self, axes):\n \"\"\" return the slice dictionary for these axes \"\"\"\n return dict([(self._AXIS_SLICEMAP[i], a)\n for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN -\n len(axes):], axes)])\n\n @staticmethod\n def _prep_ndarray(self, values, copy=True):\n if not isinstance(values, np.ndarray):\n values = np.asarray(values)\n # NumPy strings are a pain, convert to object\n if issubclass(values.dtype.type, compat.string_types):\n values = np.array(values, dtype=object, copy=True)\n else:\n if copy:\n values = values.copy()\n if values.ndim != self._AXIS_LEN:\n raise ValueError(\"The number of dimensions required is {0}, \"\n \"but the number of dimensions of the \"\n \"ndarray given was {1}\".format(self._AXIS_LEN,\n values.ndim))\n return values\n\n @staticmethod\n def _homogenize_dict(self, frames, intersect=True, dtype=None):\n \"\"\"\n Conform set of _constructor_sliced-like objects to either\n an intersection of indices / columns or a union.\n\n Parameters\n ----------\n frames : dict\n intersect : boolean, default True\n\n Returns\n -------\n dict of aligned results & indicies\n \"\"\"\n\n result = dict()\n # caller differs dict/ODict, presered type\n if isinstance(frames, OrderedDict):\n result = OrderedDict()\n\n adj_frames = OrderedDict()\n for k, v in compat.iteritems(frames):\n if isinstance(v, dict):\n adj_frames[k] = self._constructor_sliced(v)\n else:\n adj_frames[k] = v\n\n axes = self._AXIS_ORDERS[1:]\n axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(\n self, adj_frames, axes, intersect=intersect))])\n\n reindex_dict = dict(\n [(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])\n reindex_dict['copy'] = False\n for key, frame in compat.iteritems(adj_frames):\n if frame is not None:\n result[key] = frame.reindex(**reindex_dict)\n else:\n result[key] = None\n\n axes_dict['data'] = result\n return axes_dict\n\n @staticmethod\n def _extract_axis(self, data, axis=0, intersect=False):\n\n index = None\n if len(data) == 0:\n index = Index([])\n elif len(data) > 0:\n raw_lengths = []\n indexes = []\n\n have_raw_arrays = False\n have_frames = False\n\n for v in data.values():\n if isinstance(v, self._constructor_sliced):\n have_frames = True\n indexes.append(v._get_axis(axis))\n elif v is not None:\n have_raw_arrays = True\n raw_lengths.append(v.shape[axis])\n\n if have_frames:\n index = _get_combined_index(indexes, intersect=intersect)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError('ndarrays must match shape on axis %d' % axis)\n\n if have_frames:\n if lengths[0] != len(index):\n raise AssertionError('Length of data and index must match')\n else:\n index = Index(np.arange(lengths[0]))\n\n if index is None:\n index = Index([])\n\n return _ensure_index(index)\n\n @classmethod\n def _add_aggregate_operations(cls, use_numexpr=True):\n \"\"\" add the operations to the cls; evaluate the doc strings again \"\"\"\n\n # doc strings substitors\n _agg_doc = \"\"\"\nWrapper method for %%s\n\nParameters\n----------\nother : %s or %s\"\"\" % (cls._constructor_sliced.__name__, cls.__name__) + \"\"\"\naxis : {\"\"\" + ', '.join(cls._AXIS_ORDERS) + \"}\" + \"\"\"\nAxis to broadcast over\n\nReturns\n-------\n\"\"\" + cls.__name__ + \"\\n\"\n\n def _panel_arith_method(op, name, str_rep=None, default_axis=None,\n fill_zeros=None, **eval_kwargs):\n def na_op(x, y):\n try:\n result = expressions.evaluate(op, str_rep, x, y,\n raise_on_error=True,\n **eval_kwargs)\n except TypeError:\n result = op(x, y)\n\n # handles discrepancy between numpy and numexpr on division/mod\n # by 0 though, given that these are generally (always?)\n # non-scalars, I'm not sure whether it's worth it at the moment\n result = com._fill_zeros(result, x, y, name, fill_zeros)\n return result\n\n @Substitution(name)\n @Appender(_agg_doc)\n def f(self, other, axis=0):\n return self._combine(other, na_op, axis=axis)\n f.__name__ = name\n return f\n\n # add `div`, `mul`, `pow`, etc..\n ops.add_flex_arithmetic_methods(\n cls, _panel_arith_method, use_numexpr=use_numexpr,\n flex_comp_method=ops._comp_method_PANEL)\n\nPanel._setup_axes(axes=['items', 'major_axis', 'minor_axis'],\n info_axis=0,\n stat_axis=1,\n aliases={'major': 'major_axis',\n 'minor': 'minor_axis'},\n slicers={'major_axis': 'index',\n 'minor_axis': 'columns'})\n\nops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)\nPanel._add_aggregate_operations()\nPanel._add_numeric_operations()\n\nWidePanel = Panel\nLongPanel = DataFrame\n" ]
[ [ "pandas.core.groupby.PanelGroupBy", "pandas.compat.iteritems", "pandas.core.generic.NDFrame._set_item", "pandas.compat.map", "pandas.core.common._default_index", "pandas.core.internals.create_block_manager_from_arrays", "numpy.asarray", "pandas.core.common._possibly_cast_item", "pandas.compat.zip", "pandas.core.categorical.Categorical.from_array", "numpy.apply_along_axis", "numpy.isscalar", "pandas.core.common._fill_zeros", "numpy.isfinite", "numpy.vstack", "pandas.util.decorators.deprecate_kwarg", "pandas.compat.itervalues", "pandas.core.common.PandasError", "pandas.compat.OrderedDefaultdict", "pandas.util.decorators.Appender", "pandas.core.index.MultiIndex", "pandas.core.sparse.SparsePanel", "pandas.core.common.is_list_like", "pandas.core.generic.NDFrame.__init__", "numpy.unique", "pandas.core.common._infer_dtype_from_scalar", "pandas.core.index._ensure_index", "pandas.core.ops.add_special_arithmetic_methods", "numpy.zeros", "pandas.core.common._try_sort", "pandas.core.indexing.maybe_droplevels", "pandas.util.decorators.deprecate", "numpy.arange", "numpy.prod", "pandas.core.common.notnull", "pandas.compat.u", "pandas.core.ops.add_flex_arithmetic_methods", "numpy.empty", "pandas.compat.OrderedDict", "pandas.core.series.Series", "pandas.util.decorators.Substitution", "pandas.core.index.Index", "pandas.core.index._get_combined_index", "pandas.computation.expressions.evaluate", "pandas.compat.range", "pandas.tools.util.cartesian_product", "numpy.array_equal", "numpy.array", "pandas.core.frame.DataFrame", "pandas.core.internals.create_block_manager_from_blocks", "pandas.io.excel.ExcelWriter", "pandas.core.common.pprint_thing" ] ]
DengSonic/PyFR
[ "dde524ed56f4a4feca376b51db4b21eb6fa4b113" ]
[ "pyfr/backends/openmp/base.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom pyfr.backends.base import BaseBackend\n\n\nclass OpenMPBackend(BaseBackend):\n name = 'openmp'\n\n def __init__(self, cfg):\n super().__init__(cfg)\n\n # Take the default alignment requirement to be 32-bytes\n self.alignb = cfg.getint('backend-openmp', 'alignb', 32)\n\n if self.alignb < 32 or (self.alignb & (self.alignb - 1)):\n raise ValueError('Alignment must be a power of 2 and >= 32')\n\n # Compute the SoA size\n self.soasz = self.alignb // np.dtype(self.fpdtype).itemsize\n\n from pyfr.backends.openmp import (blasext, cblas, gimmik, packing,\n provider, types, xsmm)\n\n # Register our data types\n self.base_matrix_cls = types.OpenMPMatrixBase\n self.const_matrix_cls = types.OpenMPConstMatrix\n self.matrix_cls = types.OpenMPMatrix\n self.matrix_bank_cls = types.OpenMPMatrixBank\n self.matrix_slice_cls = types.OpenMPMatrixSlice\n self.queue_cls = types.OpenMPQueue\n self.view_cls = types.OpenMPView\n self.xchg_matrix_cls = types.OpenMPXchgMatrix\n self.xchg_view_cls = types.OpenMPXchgView\n\n # Instantiate mandatory kernel provider classes\n kprovcls = [provider.OpenMPPointwiseKernelProvider,\n blasext.OpenMPBlasExtKernels,\n packing.OpenMPPackingKernels,\n gimmik.OpenMPGiMMiKKernels]\n self._providers = [k(self) for k in kprovcls]\n\n # Instantiate optional kernel provider classes\n for k in [xsmm.OpenMPXSMMKernels, cblas.OpenMPCBLASKernels]:\n try:\n self._providers.append(k(self))\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n pass\n\n # Pointwise kernels\n self.pointwise = self._providers[0]\n\n def _malloc_impl(self, nbytes):\n data = np.zeros(nbytes + self.alignb, dtype=np.uint8)\n offset = -data.ctypes.data % self.alignb\n\n return data[offset:nbytes + offset]\n" ]
[ [ "numpy.dtype", "numpy.zeros" ] ]
lartpang/ZoomNet
[ "1f329e80db5469eaf6a513ec384cd19bafdaece2" ]
[ "utils/pipeline/dataloader.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2021/5/29\n# @Author : Lart Pang\n# @GitHub : https://github.com/lartpang\n\nfrom functools import partial\n\nfrom torch.utils import data\n\nfrom utils import builder, misc\n\n\ndef get_tr_loader(cfg, shuffle=True, drop_last=True, pin_memory=True):\n dataset = builder.build_obj_from_registry(\n registry_name=\"DATASETS\",\n obj_name=cfg.datasets.train.dataset_type,\n obj_cfg=dict(\n root=[(name, path) for name, path in cfg.datasets.train.path.items()],\n shape=cfg.datasets.train.shape,\n extra_scales=cfg.train.ms.extra_scales if cfg.train.ms.enable else None,\n interp_cfg=cfg.datasets.train.get(\"interp_cfg\", None),\n ),\n )\n if cfg.use_ddp:\n train_sampler = data.distributed.DistributedSampler(dataset, shuffle=shuffle)\n shuffle = False\n else:\n train_sampler = None\n shuffle = shuffle\n\n if cfg.train.ms.enable:\n collate_fn = getattr(dataset, \"collate_fn\", None)\n assert collate_fn is not None\n else:\n collate_fn = None\n\n loader = data.DataLoader(\n dataset=dataset,\n batch_size=cfg.train.batch_size,\n sampler=train_sampler,\n shuffle=shuffle,\n num_workers=cfg.train.num_workers,\n drop_last=drop_last,\n pin_memory=pin_memory,\n collate_fn=collate_fn,\n worker_init_fn=partial(misc.customized_worker_init_fn, base_seed=cfg.base_seed)\n if cfg.use_custom_worker_init\n else None,\n )\n print(f\"Length of Trainset: {len(dataset)}\")\n return loader\n\n\ndef get_te_loader(cfg, shuffle=False, drop_last=False, pin_memory=True) -> list:\n for i, (te_data_name, te_data_path) in enumerate(cfg.datasets.test.path.items()):\n dataset = builder.build_obj_from_registry(\n registry_name=\"DATASETS\",\n obj_name=cfg.datasets.test.dataset_type,\n obj_cfg=dict(\n root=(te_data_name, te_data_path),\n shape=cfg.datasets.test.shape,\n interp_cfg=cfg.datasets.test.get(\"interp_cfg\", None),\n ),\n )\n\n loader = data.DataLoader(\n dataset=dataset,\n batch_size=cfg.test.batch_size,\n num_workers=cfg.test.num_workers,\n shuffle=shuffle,\n drop_last=drop_last,\n pin_memory=pin_memory,\n collate_fn=getattr(dataset, \"collate_fn\", None),\n worker_init_fn=partial(misc.customized_worker_init_fn, base_seed=cfg.base_seed)\n if cfg.use_custom_worker_init\n else None,\n )\n print(f\"Testing with testset: {te_data_name}: {len(dataset)}\")\n yield te_data_name, te_data_path, loader\n" ]
[ [ "torch.utils.data.distributed.DistributedSampler" ] ]
luca-morreale/semantic-segmentation-pytorch
[ "d823fb4115a7ef5c8d47b3e5995a498bbcd9a9b6" ]
[ "visualization/kitti_visualizer.py" ]
[ "import os\nimport numpy as np\nfrom lib.utils.utils import unique\nfrom visualization.utils_name_generation import generate_image_name\nimport cv2\n\ncolormap = {\n 0: (128, 128, 128), # Sky\n 1: (128, 0, 0), # Building\n 2: (128, 64, 128), # Road\n 3: (0, 0, 192), # Sidewalk\n 4: (64, 64, 128), # Fence\n 5: (128, 128, 0), # Vegetation\n 6: (192, 192, 128), # Pole\n 7: (64, 0, 128), # Car\n 8: (192, 128, 128), # Sign\n 9: (64, 64, 0), # Pedestrian\n 10: (0, 128, 192), # Cyclist\n 11: (0, 0, 0) # Void\n}\n\nconversion_list = {\n 1: 1, #wall\n 2: 1, #building;edifice\n 3: 0, #sky\n 4: 2, #floor;flooring\n 5: 5, #tree\n 6: 1, #ceiling\n 7: 2, #road;route\n 8: 11, #bed\n 9: 1, #windowpane;window\n 10: 5, #grass\n 11: 11, #cabinet\n 12: 3, #sidewalk;pavement\n 13: 9, #person;individual;someone;somebody;mortal;soul\n 14: 2, #earth;ground\n 15: 1, #door;double;door\n 16: 11, #table\n 17: 11, #mountain;mount\n 18: 5, #plant;flora;plant;life\n 19: 11, #curtain;drape;drapery;mantle;pall\n 20: 11, #chair\n 21: 7, #car;auto;automobile;machine;motorcar\n 22: 11, #water\n 23: 11, #painting;picture\n 24: 11, #sofa;couch;lounge\n 25: 11, #shelf\n 26: 1, #house\n 27: 11, #sea\n 28: 11, #mirror\n 29: 11, #rug;carpet;carpeting\n 30: 2, #field\n 31: 11, #armchair\n 32: 11, #seat\n 33: 4, #fence;fencing\n 34: 11, #desk\n 35: 11, #rock;stone\n 36: 11, #wardrobe;closet;press\n 37: 6, #lamp\n 38: 11, #bathtub;bathing;tub;bath;tub\n 39: 4, #railing;rail\n 40: 11, #,cushion\n 41: 11, #base;pedestal;stand\n 42: 11, #box\n 43: 6, #column;pillar\n 44: 8, #signboard;sign\n 45: 11, #chest;of;drawers;chest;bureau;dresser\n 46: 11, #counter\n 47: 2, #sand\n 48: 11, #sink\n 49: 1, #skyscraper\n 50: 11, #fireplace;hearth;open;fireplace\n 51: 11, #refrigerator;icebox\n 52: 11, #grandstand;covered;stand\n 53: 2, #,path\n 54: 4, #stairs;steps\n 55: 2, #runway\n 56: 1, #case;display;case;showcase;vitrine\n 57: 11, #pool;table;billiard;table;snooker;table\n 58: 11, #pillow\n 59: 11, #screen;door;screen\n 60: 4, #stairway;staircase\n 61: 11, #river\n 62: 11, #,bridge;span\n 63: 11, #bookcase\n 64: 11, #blind;screen\n 65: 11, #coffee;table;cocktail;table\n 66: 11, #toilet;can;commode;crapper;pot;potty;stool;throne\n 67: 11, #flower\n 68: 11, #book\n 69: 11, #hill\n 70: 11, #bench\n 71: 11, #countertop\n 72: 11, #stove;kitchen;stove;range;kitchen;range;cooking;stove\n 73: 11, #palm;palm;tree\n 74: 11, #kitchen;island\n 75: 11, #computer;computing;machine;computing;device;data;processor;electronic;computer;information;processing;system\n 76: 11, #swivel;chair\n 77: 11, #boat\n 78: 11, #bar\n 79: 11, #arcade;machine\n 80: 11, #hovel;hut;hutch;shack;shanty\n 81: 7, #bus;autobus;coach;charabanc;double-decker;jitney;motorbus;motorcoach;omnibus;passenger;vehicle\n 82: 11, #towel\n 83: 6, #light;light;source\n 84: 7, #truck;motortruck\n 85: 1, #tower\n 86: 11, #chandelier;pendant;pendent\n 87: 11, #awning;sunshade;sunblind\n 88: 6, #streetlight;street;lamp\n 89: 11, #booth;cubicle;stall;kiosk\n 90: 11, #television;television;receiver;television;set;tv;tv;set;idiot;box;boob;tube;telly;goggle;box\n 91: 11, #airplane;aeroplane;plane\n 92: 11, #dirt;track\n 93: 11, #apparel;wearing;apparel;dress;clothes\n 94: 6, #pole\n 95: 3, #land;ground;soil\n 96: 11, #bannister;banister;balustrade;balusters;handrail\n 97: 11, #escalator;moving;staircase;moving;stairway\n 98: 11, #ottoman;pouf;pouffe;puff;hassock\n 99: 11, #bottle\n 100: 11, #buffet;counter;sideboard\n 101: 11, #poster;posting;placard;notice;bill;card\n 102: 11, #stage\n 103: 7, #van\n 104: 11, #ship\n 105: 11, #fountain\n 106: 11, #conveyer;belt;conveyor;belt;conveyer;conveyor;transporter\n 107: 11, #canopy\n 108: 11, #washer;automatic;washer;washing;machine\n 109: 11, #plaything;toy\n 110: 11, #swimming;pool;swimming;bath;natatorium\n 111: 11, #0,stool\n 112: 11, #barrel;cask\n 113: 11, #basket;handbasket\n 114: 11, #waterfall;falls\n 115: 11, #tent;collapsible;shelter\n 116: 11, #bag\n 117: 10, #minibike;motorbike\n 118: 11, #cradle\n 119: 11, #oven\n 120: 11, #ball\n 121: 11, #food;solid;food\n 122: 11, #step;stair\n 123: 7, #tank;storage;tank\n 124: 11, #trade;name;brand;name;brand;marque\n 125: 11, #microwave;microwave;oven\n 126: 11, #pot;flowerpot\n 127: 11, #animal;animate;being;beast;brute;creature;fauna\n 128: 10, #bicycle;bike;wheel;cycle\n 129: 11, #lake\n 130: 11, #dishwasher;dish;washer;dishwashing;machine\n 131: 11, #screen;silver;screen;projection;screen\n 132: 11, #blanket;cover\n 133: 11, #sculpture\n 134: 11, #hood;exhaust;hood\n 135: 11, #sconce\n 136: 11, #vase\n 137: 8, #traffic;light;traffic;signal;stoplight\n 138: 11, #tray\n 139: 11, #ashcan;trash;can;garbage;can;wastebin;ash;bin;ash-bin;ashbin;dustbin;trash;barrel;trash;bin\n 140: 11, #fan\n 141: 11, #pier;wharf;wharfage;dock\n 142: 11, #crt;screen\n 143: 11, #plate\n 144: 11, #monitor;monitoring;device\n 145: 11, #bulletin;board;notice;board\n 146: 11, #shower\n 147: 11, #radiator\n 148: 11, #glass;drinking;glass\n 149: 11, #clock\n 150: 11, #flag\n}\n\n\ndef convert_labels_to_kitti(predictions, mode='BGR'):\n predictions = predictions.astype('int')\n labelmap_kitti = np.zeros(predictions.shape, dtype=np.uint8)\n labelmap_rgb = np.zeros((predictions.shape[0], predictions.shape[1], 3),\n dtype=np.uint8)\n for label in unique(predictions):\n if label < 0:\n continue\n\n label_kitti = conversion_list[label + 1]\n\n labelmap_rgb += (predictions == label)[:, :, np.newaxis] * \\\n np.tile(np.uint8(colormap[label_kitti]),\n (predictions.shape[0], predictions.shape[1], 1))\n labelmap_kitti[predictions == label] = label_kitti\n\n if mode == 'BGR':\n return labelmap_kitti, labelmap_rgb[:, :, ::-1]\n else:\n return labelmap_kitti, labelmap_rgb\n\n\ndef visualize_result(data, preds, args):\n (img, info) = data\n\n kitti_pred, pred_color = convert_labels_to_kitti(preds)\n\n # aggregate images and save\n im_vis = pred_color.astype(np.uint8)\n\n img_name_rgb, img_name = generate_image_name(info)\n a = os.path.join(args.output_path, img_name_rgb)\n print(a)\n cv2.imwrite(os.path.join(args.output_path, img_name_rgb), im_vis)\n\n # aggregate images and save\n im_vis = kitti_pred.astype(np.uint8)\n cv2.imwrite(os.path.join(args.output_path, img_name), im_vis)\n\n" ]
[ [ "numpy.zeros", "numpy.uint8" ] ]
sofieditmer/deep_learning
[ "43f7f97f09aef1057e088356094d3e869cff5cba" ]
[ "utils/utils.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nThis script stores a plotting function.\n\"\"\"\n\n# Dependencies\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Function for plotting loss and accuracy learning curves.\ndef plot_history(H, epochs):\n \"\"\"\n Utility function for plotting model history using matplotlib\n \n H: model history \n epochs: number of epochs for which the model was trained\n \"\"\"\n plt.style.use(\"fivethirtyeight\")\n plt.figure()\n plt.plot(np.arange(0, epochs), H.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"accuracy\"], label=\"train_acc\")\n plt.plot(np.arange(0, epochs), H.history[\"val_accuracy\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.tight_layout()\n plt.show()" ]
[ [ "matplotlib.pyplot.style.use", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel" ] ]
something678/TodKat
[ "b26d9c617684e60cd25ff225a71adb6bfa3b0a6c" ]
[ "sentence_transformers/SentenceTransformer.py" ]
[ "import json\nimport logging\nimport os\nimport shutil\nfrom collections import OrderedDict\nfrom typing import List, Dict, Tuple, Iterable, Type\nfrom zipfile import ZipFile\nimport sys\n\nimport numpy as np\nimport transformers\nimport torch\nfrom numpy import ndarray\nfrom torch import nn, Tensor\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm, trange\n\nfrom . import __DOWNLOAD_SERVER__\nfrom .evaluation import SentenceEvaluator\nfrom .util import import_from_string, batch_to_device, http_get\nfrom . import __version__\n\n\nclass SentenceTransformer(nn.Sequential):\n def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):\n if modules is not None and not isinstance(modules, OrderedDict):\n modules = OrderedDict(\n [(str(idx), module) for idx, module in enumerate(modules)])\n\n if model_name_or_path is not None and model_name_or_path != \"\":\n logging.info(\"Load pretrained DialogTransformer: {}\".format(\n model_name_or_path))\n\n if '/' not in model_name_or_path and '\\\\' not in model_name_or_path and not os.path.isdir(model_name_or_path):\n logging.info(\"Did not find a / or \\\\ in the name. Assume to download model from server\")\n model_name_or_path = __DOWNLOAD_SERVER__ + model_name_or_path + '.zip'\n\n if model_name_or_path.startswith('http://') or model_name_or_path.startswith('https://'):\n model_url = model_name_or_path\n folder_name = model_url.replace(\"https://\", \"\").replace(\"http://\", \"\").replace(\"/\", \"_\")[:250]\n\n # print('===================')\n\n try:\n from torch.hub import _get_torch_home\n torch_cache_home = _get_torch_home()\n if torch_cache_home.startswith(\n 'C:\\\\Users\\\\something678/.cache\\\\torch'):\n torch_cache_home = torch_cache_home.replace(\n 'C:\\\\Users\\\\something678/.cache\\\\torch',\n ('G:\\\\KnowledgeBaseData'\n '\\\\sentenceTransformers_datasets'\n '\\\\downloaded_saved_model'))\n elif torch_cache_home.startswith(\n '/home/something678/.cache/torch'):\n torch_cache_home = torch_cache_home.replace(\n '/home/something678/.cache/torch',\n ('/media/Data1/something678/sentence-transformers-master'\n '/my_downloaded_saved_model'))\n\n # print('=================== didnt enter exception')\n except ImportError:\n torch_cache_home = os.path.expanduser(\n os.getenv('TORCH_HOME', os.path.join(\n os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))\n default_cache_path = os.path.join(torch_cache_home, 'sentence_transformers')\n model_path = os.path.join(default_cache_path, folder_name)\n os.makedirs(model_path, exist_ok=True)\n\n if not os.listdir(model_path):\n if model_url[-1] is \"/\":\n model_url = model_url[:-1]\n logging.info(\"Downloading sentence transformer model from {} and saving it at {}\".format(model_url, model_path))\n try:\n zip_save_path = os.path.join(model_path, 'model.zip')\n http_get(model_url, zip_save_path)\n with ZipFile(zip_save_path, 'r') as zip:\n zip.extractall(model_path)\n except Exception as e:\n shutil.rmtree(model_path)\n raise e\n else:\n model_path = model_name_or_path\n\n #### Load from disk\n if model_path is not None:\n logging.info(\"Load SentenceTransformer from folder: {}\".format(model_path))\n\n if os.path.exists(os.path.join(model_path, 'config.json')):\n with open(os.path.join(model_path, 'config.json')) as fIn:\n config = json.load(fIn)\n if config['__version__'] > __version__:\n logging.warning(\"You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\\n\\n\\n\".format(config['__version__'], __version__))\n\n with open(os.path.join(model_path, 'modules.json')) as fIn:\n contained_modules = json.load(fIn)\n\n # the modules are bert, LSTM and so-on\n modules = OrderedDict()\n for module_config in contained_modules:\n module_class = import_from_string(module_config['type'])\n module = module_class.load(os.path.join(model_path, module_config['path']))\n modules[module_config['name']] = module\n\n super().__init__(modules)\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n logging.info(\"Use pytorch device: {}\".format(device))\n self.device = torch.device(device)\n self.to(device)\n\n def encode(self, sentences: List[str], batch_size: int = 8, show_progress_bar: bool = None) -> List[ndarray]:\n \"\"\"\n :param sentences:\n the sentences to embed\n :param batch_size:\n the batch size used for the computation\n :param show_progress_bar:\n Output a progress bar when encode sentences\n :return:\n a list with ndarrays of the embeddings for each sentence\n \"\"\"\n if show_progress_bar is None:\n show_progress_bar = (logging.getLogger().getEffectiveLevel()==logging.INFO or logging.getLogger().getEffectiveLevel()==logging.DEBUG)\n\n all_embeddings = []\n length_sorted_idx = np.argsort([len(sen) for sen in sentences])\n\n iterator = range(0, len(sentences), batch_size)\n if show_progress_bar:\n iterator = tqdm(iterator, desc=\"Batches\")\n\n for batch_idx in iterator:\n batch_tokens = []\n\n batch_start = batch_idx\n batch_end = min(batch_start + batch_size, len(sentences))\n\n longest_seq = 0\n\n for idx in length_sorted_idx[batch_start: batch_end]:\n sentence = sentences[idx]\n tokens = self.tokenize(sentence)\n longest_seq = max(longest_seq, len(tokens))\n batch_tokens.append(tokens)\n\n features = {}\n for text in batch_tokens:\n sentence_features = self.get_sentence_features(text, longest_seq)\n\n for feature_name in sentence_features:\n if feature_name not in features:\n features[feature_name] = []\n features[feature_name].append(sentence_features[feature_name])\n\n for feature_name in features:\n features[feature_name] = torch.tensor(np.asarray(features[feature_name])).to(self.device)\n\n with torch.no_grad():\n embeddings = self.forward(features)\n embeddings = embeddings['sentence_embedding'].to('cpu').numpy()\n all_embeddings.extend(embeddings)\n\n reverting_order = np.argsort(length_sorted_idx)\n all_embeddings = [all_embeddings[idx] for idx in reverting_order]\n\n return all_embeddings\n\n def get_max_seq_length(self):\n if hasattr(self._first_module(), 'max_seq_length'):\n return self._first_module().max_seq_length\n\n return None\n\n def tokenize(self, text):\n return self._first_module().tokenize(text)\n\n def get_sentence_features(self, *features):\n return self._first_module().get_sentence_features(*features)\n\n def get_sentence_embedding_dimension(self):\n return self._last_module().get_sentence_embedding_dimension()\n\n def _first_module(self):\n \"\"\"Returns the first module of this sequential embedder\"\"\"\n return self._modules[next(iter(self._modules))]\n\n def _last_module(self):\n \"\"\"Returns the last module of this sequential embedder\"\"\"\n return self._modules[next(reversed(self._modules))]\n\n def save(self, path):\n \"\"\"\n Saves all elements for this seq. sentence embedder into different sub-folders\n \"\"\"\n if path is None:\n return\n\n logging.info(\"Save model to {}\".format(path))\n contained_modules = []\n\n for idx, name in enumerate(self._modules):\n module = self._modules[name]\n model_path = os.path.join(path, str(idx)+\"_\"+type(module).__name__)\n os.makedirs(model_path, exist_ok=True)\n module.save(model_path)\n contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})\n\n with open(os.path.join(path, 'modules.json'), 'w') as fOut:\n json.dump(contained_modules, fOut, indent=2)\n\n with open(os.path.join(path, 'config.json'), 'w') as fOut:\n json.dump({'__version__': __version__}, fOut, indent=2)\n\n def smart_batching_collate(self, batch):\n \"\"\"\n Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model\n batchsizes vary among the batches.\n the list of two-sentnce pairs are batched so that\n they can be fed to bert\n Actually it converts instances to the batches\n The dataloader has default collate_fn, that is, each batch is a list,\n and [0] is feature[0], [1] is feature[1], etc., see collate_fn in\n dataloader.py for detailed usages\n\n :param batch:\n a batch from a SmartBatchingDataset\n :return:\n a batch of tensors for the model\n \"\"\"\n num_texts = len(batch[0][0])\n\n labels = []\n paired_texts = [[] for _ in range(num_texts)]\n max_seq_len = [0] * num_texts\n for tokens, label in batch:\n labels.append(label)\n for i in range(num_texts):\n paired_texts[i].append(tokens[i])\n max_seq_len[i] = max(max_seq_len[i], len(tokens[i]))\n\n features = []\n for idx in range(num_texts):\n max_len = max_seq_len[idx]\n feature_lists = {}\n for text in paired_texts[idx]:\n sentence_features = self.get_sentence_features(text, max_len)\n\n for feature_name in sentence_features:\n if feature_name not in feature_lists:\n feature_lists[feature_name] = []\n feature_lists[feature_name].append(sentence_features[feature_name])\n\n for feature_name in feature_lists:\n feature_lists[feature_name] = torch.tensor(np.asarray(feature_lists[feature_name]))\n\n features.append(feature_lists)\n\n return {'features': features, 'labels': torch.stack(labels)}\n\n def fit(self,\n train_objectives: Iterable[Tuple[DataLoader, nn.Module]],\n evaluator: SentenceEvaluator,\n epochs: int = 1,\n steps_per_epoch = None,\n scheduler: str = 'WarmupLinear',\n warmup_steps: int = 10000,\n optimizer_class: Type[Optimizer] = transformers.AdamW,\n optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},\n weight_decay: float = 0.01,\n evaluation_steps: int = 0,\n output_path: str = None,\n save_best_model: bool = True,\n max_grad_norm: float = 1,\n fp16: bool = False,\n fp16_opt_level: str = 'O1',\n local_rank: int = -1\n ):\n \"\"\"\n :param weight_decay:\n :param scheduler:\n :param warmup_steps:\n :param optimizer:\n :param evaluation_steps:\n :param output_path:\n :param save_best_model:\n :param max_grad_norm:\n :param fp16:\n :param fp16_opt_level:\n :param local_rank:\n :param train_objectives:\n Tuples of DataLoader and LossConfig\n :param evaluator:\n :param epochs:\n :param steps_per_epoch: Train for x steps in each epoch. If set to None, the length of the dataset will be used\n \"\"\"\n if output_path is not None:\n os.makedirs(output_path, exist_ok=True)\n if os.listdir(output_path):\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(\n output_path))\n\n dataloaders = [dataloader for dataloader, _ in train_objectives]\n '''\n Each dataloader corresponds to a model, denoted as the train_objectives here\n '''\n\n # Use smart batching\n for dataloader in dataloaders:\n dataloader.collate_fn = self.smart_batching_collate\n\n '''\n '''\n loss_models = [loss for _, loss in train_objectives]\n # retrieve the loss_models\n device = self.device\n for loss_model in loss_models:\n loss_model.to(device)\n\n self.best_score = -9999999\n\n if steps_per_epoch is None or steps_per_epoch == 0:\n steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])\n # the smallerest dataset determines the steps_per_epoch, that is\n # the num_of_batches per epoch\n\n num_train_steps = int(steps_per_epoch * epochs)\n\n # Prepare optimizers\n optimizers = []\n schedulers = []\n # for each epoch\n # >>> lambda1 = lambda epoch: epoch // 30\n # >>> lambda2 = lambda epoch: 0.95 ** epoch\n # >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])\n for loss_model in loss_models:\n param_optimizer = list(loss_model.named_parameters())\n '''\n Choose parameters to optimize\n '''\n\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n t_total = num_train_steps\n if local_rank != -1:\n t_total = t_total // torch.distributed.get_world_size()\n\n optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)\n\n scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=t_total)\n\n optimizers.append(optimizer)\n schedulers.append(scheduler_obj)\n\n if fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n for train_idx in range(len(loss_models)):\n model, optimizer = amp.initialize(loss_models[train_idx], optimizers[train_idx], opt_level=fp16_opt_level)\n loss_models[train_idx] = model\n optimizers[train_idx] = optimizer\n\n global_step = 0\n # steps_per_epoch * number_of_loss_models\n data_iterators = [iter(dataloader) for dataloader in dataloaders]\n\n num_train_objectives = len(train_objectives)\n\n for epoch in trange(epochs, desc=\"Epoch\"):\n training_steps = 0\n\n for loss_model in loss_models:\n loss_model.zero_grad()\n loss_model.train()\n\n for _ in trange(steps_per_epoch, desc=\"Iteration\", smoothing=0.05):\n for train_idx in range(num_train_objectives):\n loss_model = loss_models[train_idx]\n optimizer = optimizers[train_idx]\n scheduler = schedulers[train_idx]\n data_iterator = data_iterators[train_idx]\n\n try:\n data = next(data_iterator)\n except StopIteration:\n # logging.info(\"Restart data_iterator\")\n data_iterator = iter(dataloaders[train_idx])\n data_iterators[train_idx] = data_iterator\n data = next(data_iterator)\n\n features, labels = batch_to_device(data, self.device)\n loss_value = loss_model(features, labels)\n\n if fp16:\n with amp.scale_loss(loss_value, optimizer) as scaled_loss:\n # scale the loss_value by the amplifier\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm)\n else:\n loss_value.backward()\n torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)\n\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n\n training_steps += 1\n global_step += 1\n\n if evaluation_steps > 0 and training_steps % evaluation_steps == 0:\n self._eval_during_training(evaluator, output_path, save_best_model, epoch, training_steps)\n for loss_model in loss_models:\n loss_model.zero_grad()\n loss_model.train()\n self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1)\n\n def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):\n \"\"\"\n :param evaluator:\n the evaluator\n :param output_path:\n the evaluator can write the results to this path\n \"\"\"\n if output_path is not None:\n os.makedirs(output_path, exist_ok=True)\n return evaluator(self, output_path)\n\n def _eval_during_training(\n self, evaluator, output_path, save_best_model, epoch, steps):\n \"\"\"Runs evaluation during the training\"\"\"\n if evaluator is not None:\n score = evaluator(\n self, output_path=output_path, epoch=epoch, steps=steps)\n if score > self.best_score and save_best_model:\n self.save(output_path)\n self.best_score = score\n\n def _get_scheduler(\n self, optimizer, scheduler: str, warmup_steps: int, t_total: int):\n \"\"\"\n Returns the correct learning rate scheduler\n \"\"\"\n scheduler = scheduler.lower()\n if scheduler == 'constantlr':\n return transformers.get_constant_schedule(optimizer)\n elif scheduler == 'warmupconstant':\n # this uses warmup\n return transformers.get_constant_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps)\n elif scheduler == 'warmuplinear':\n return transformers.get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n elif scheduler == 'warmupcosine':\n return transformers.get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n elif scheduler == 'warmupcosinewithhardrestarts':\n return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n else:\n raise ValueError(\"Unknown scheduler {}\".format(scheduler))\n" ]
[ [ "torch.hub._get_torch_home", "torch.stack", "torch.distributed.get_world_size", "torch.no_grad", "numpy.argsort", "numpy.asarray", "torch.cuda.is_available", "torch.device" ] ]
cnakhl/quimb
[ "482a21ebdaa0e8236924dbbdc435e8de68d86719" ]
[ "quimb/tensor/drawing.py" ]
[ "\"\"\"Functionailty for drawing tensor networks.\n\"\"\"\nimport textwrap\nimport importlib\nimport collections\n\nimport numpy as np\n\nfrom ..utils import valmap\n\n\nHAS_FA2 = importlib.util.find_spec('fa2') is not None\n\n\ndef parse_dict_to_tids_or_inds(spec, tn, default='__NONE__'):\n \"\"\"Parse a dictionary possibly containing a mix of tags, tids and inds, to\n a dictionary with only sinlge tids and inds as keys. If a tag or set of\n tags are given as a key, all matching tensor tids will receive the value.\n \"\"\"\n #\n if (spec is not None) and (not isinstance(spec, dict)):\n # assume new default value for everything\n return collections.defaultdict(lambda: spec)\n\n # allow not specifying a default value\n if default != '__NONE__':\n new = collections.defaultdict(lambda: default)\n else:\n new = {}\n\n if spec is None:\n return new\n\n # parse the special values\n for k, v in spec.items():\n if (\n # given as tid\n (isinstance(k, int) and k in tn.tensor_map) or\n # given as ind\n (isinstance(k, str) and k in tn.ind_map)\n ):\n # already a tid\n new[k] = v\n continue\n\n for tid in tn._get_tids_from_tags(k):\n new[tid] = v\n\n return new\n\n\ndef _add_or_merge_edge(G, u, v, attrs):\n if not G.has_edge(u, v):\n G.add_edge(u, v, **attrs)\n else:\n # multibond - update attrs\n attrs0 = G.edges[u, v]\n # average colors\n attrs0['color'] = tuple(\n (x + y) / 2 for x, y in zip(attrs0['color'], attrs['color']))\n attrs0['ind'] += ' ' + attrs['ind']\n # hide original edge and instead track multiple bond sizes\n attrs0['multiedge_inds'].append(attrs['ind'])\n attrs0['multiedge_sizes'].append(attrs['edge_size'])\n attrs0['spring_weight'] /= (attrs['edge_size'] + 1)\n attrs0['edge_size'] = 0\n\n\ndef draw_tn(\n tn,\n color=None,\n *,\n output_inds=None,\n highlight_inds=(),\n highlight_tids=(),\n highlight_inds_color=(1.0, 0.2, 0.2, 1.0),\n highlight_tids_color=(1.0, 0.2, 0.2, 1.0),\n show_inds=None,\n show_tags=None,\n show_scalars=True,\n custom_colors=None,\n title=None,\n legend=True,\n fix=None,\n k=None,\n iterations=200,\n initial_layout='spectral',\n use_forceatlas2=1000,\n use_spring_weight=False,\n node_color=None,\n node_scale=1.0,\n node_size=None,\n node_shape='o',\n node_outline_size=None,\n node_outline_darkness=0.8,\n node_hatch='',\n edge_color=None,\n edge_scale=1.0,\n edge_alpha=1 / 2,\n multiedge_spread=0.1,\n show_left_inds=True,\n arrow_closeness=1.1,\n arrow_length=1.0,\n arrow_overhang=1.0,\n arrow_linewidth=1.0,\n label_color=None,\n font_size=10,\n font_size_inner=7,\n figsize=(6, 6),\n margin=None,\n xlims=None,\n ylims=None,\n get=None,\n return_fig=False,\n ax=None,\n):\n \"\"\"Plot this tensor network as a networkx graph using matplotlib,\n with edge width corresponding to bond dimension.\n\n Parameters\n ----------\n color : sequence of tags, optional\n If given, uniquely color any tensors which have each of the tags.\n If some tensors have more than of the tags, only one color will show.\n output_inds : sequence of str, optional\n For hyper tensor networks explicitly specify which indices should be\n drawn as outer indices. If not set, the outer indices are assumed to be\n those that only appear on a single tensor.\n highlight_inds : iterable, optional\n Highlight these edges.\n highlight_tids : iterable, optional\n Highlight these nodes.\n highlight_inds_color\n What color to use for ``highlight_inds`` nodes.\n highlight_tids_color : tuple[float], optional\n What color to use for ``highlight_tids`` nodes.\n show_inds : {None, False, True, 'all', 'bond-size'}, optional\n Explicitly turn on labels for each tensors indices.\n show_tags : {None, False, True}, optional\n Explicitly turn on labels for each tensors tags.\n show_scalars : bool, optional\n Whether to show scalar tensors (floating nodes with no edges).\n custom_colors : sequence of colors, optional\n Supply a custom sequence of colors to match the tags given\n in ``color``.\n title : str, optional\n Set a title for the axis.\n legend : bool, optional\n Whether to draw a legend for the colored tags.\n fix : dict[tags_ind_or_tid], (float, float)], optional\n Used to specify actual relative positions for each tensor node.\n Each key should be a sequence of tags that uniquely identifies a\n tensor, a ``tid``, or a ``ind``, and each value should be a ``(x, y)``\n coordinate tuple.\n k : float, optional\n The optimal distance between nodes.\n iterations : int, optional\n How many iterations to perform when when finding the best layout\n using node repulsion. Ramp this up if the graph is drawing messily.\n initial_layout : {'spectral', 'kamada_kawai', 'circular', 'planar', \\\\\n 'random', 'shell', 'bipartite', ...}, optional\n The name of a networkx layout to use before iterating with the\n spring layout. Set ``iterations=0`` if you just want to use this\n layout only.\n use_forceatlas2 : bool or int, optional\n Whether to try and use ``forceatlas2`` (``fa2``) for the spring layout\n relaxation instead of ``networkx``. If an integer, only try and use\n beyond that many nodes (it can give messier results on smaller graphs).\n use_spring_weight : bool, optional\n Whether to use inverse bond sizes as spring weights to the force\n repulsion layout algorithms.\n node_color : tuple[float], optional\n Default color of nodes.\n node_size : None, float or dict, optional\n How big to draw the tensors. Can be a global single value, or a dict\n containing values for specific tags or tids. This is in absolute\n figure units. See ``node_scale`` simply scale the node sizes up or\n down.\n node_scale : float, optional\n Scale the node sizes by this factor, in addition to the automatica\n scaling based on the number of tensors.\n node_shape : None, str or dict, optional\n What shape to draw the tensors. Should correspond to a matplotlib\n scatter marker. Can be a global single value, or a dict containing\n values for specific tags or tids.\n node_outline_size : None, float or dict, optional\n The width of the border of each node. Can be a global single value, or\n a dict containing values for specific tags or tids.\n node_outline_darkness : float, optional\n Darkening of nodes outlines.\n edge_color : tuple[float], optional\n Default color of edges.\n edge_scale : float, optional\n How much to scale the width of the edges.\n edge_alpha : float, optional\n Set the alpha (opacity) of the drawn edges.\n multiedge_spread : float, optional\n How much to spread the lines of multi-edges.\n show_left_inds : bool, optional\n Whether to show ``tensor.left_inds`` as incoming arrows.\n arrow_closeness : float, optional\n How close to draw the arrow to its target.\n arrow_length : float, optional\n The size of the arrow with respect to the edge.\n arrow_overhang : float, optional\n Varies the arrowhead between a triangle (0.0) and 'V' (1.0).\n label_color : tuple[float], optional\n Color to draw labels with.\n font_size : int, optional\n Font size for drawing tags and outer indices.\n font_size_inner : int, optional\n Font size for drawing inner indices.\n figsize : tuple of int\n The size of the drawing.\n margin : None or float, optional\n Specify an argument for ``ax.margin``, else the plot limits will try\n and be computed based on the node positions and node sizes.\n xlims : None or tuple, optional\n Explicitly set the x plot range.\n xlims : None or tuple, optional\n Explicitly set the y plot range.\n get : {None, 'pos', 'graph'}, optional\n If ``None`` then plot as normal, else if:\n\n - ``'pos'``, return the plotting positions of each ``tid`` and\n ``ind`` drawn as a node, this can supplied to subsequent calls as\n ``fix=pos`` to maintain positions, even as the graph structure\n changes.\n - ``'graph'``, return the ``networkx.Graph`` object. Note that this\n will potentially have extra nodes representing output and hyper\n indices.\n\n return_fig : bool, optional\n If True and ``ax is None`` then return the figure created rather than\n executing ``pyplot.show()``.\n ax : matplotlib.Axis, optional\n Draw the graph on this axis rather than creating a new figure.\n \"\"\"\n import networkx as nx\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n import matplotlib.patches as patches\n from matplotlib.colors import to_rgb\n import math\n\n if output_inds is None:\n output_inds = set(tn.outer_inds())\n elif isinstance(output_inds, str):\n output_inds = {output_inds}\n else:\n output_inds = set(output_inds)\n\n # automatically decide whether to show tags and inds\n if show_inds is None:\n show_inds = (tn.num_tensors <= 20)\n if show_tags is None:\n show_tags = (tn.num_tensors <= 20)\n\n isdark = sum(to_rgb(mpl.rcParams['figure.facecolor'])) / 3 < 0.5\n if isdark:\n draw_color = (0.75, 0.77, 0.80, 1.0)\n else:\n draw_color = (0.45, 0.47, 0.50, 1.0)\n\n if edge_color is None:\n edge_color = draw_color\n else:\n edge_color = mpl.colors.to_rgb(edge_color)\n\n if node_color is None:\n node_color = draw_color\n else:\n node_color = mpl.colors.to_rgb(node_color)\n\n # set the size of the nodes and their border\n node_size = parse_dict_to_tids_or_inds(\n node_size, tn,\n default=node_scale * 1000 / tn.num_tensors**0.7)\n node_outline_size = parse_dict_to_tids_or_inds(\n node_outline_size, tn,\n default=min(3, node_size.default_factory()**0.5 / 5))\n node_shape = parse_dict_to_tids_or_inds(\n node_shape, tn, default='o')\n node_hatch = parse_dict_to_tids_or_inds(\n node_hatch, tn, default='')\n\n if label_color is None:\n label_color = mpl.rcParams['axes.labelcolor']\n\n # build the graph\n G = nx.Graph()\n hyperedges = []\n node_labels = dict()\n edge_labels = dict()\n\n for ix, tids in tn.ind_map.items():\n # general information for this index\n edge_attrs = {\n 'color': (highlight_inds_color if ix in highlight_inds else\n edge_color),\n 'ind': ix,\n 'edge_size': edge_scale * math.log2(tn.ind_size(ix)),\n }\n edge_attrs['multiedge_inds'] = [edge_attrs['ind']]\n edge_attrs['multiedge_sizes'] = [edge_attrs['edge_size']]\n edge_attrs['spring_weight'] = 1 / sum(t.ndim for t in tn._inds_get(ix))\n\n if (ix in output_inds) or (len(tids) != 2):\n # hyper or outer edge - needs dummy 'node' shown with zero size\n hyperedges.append(ix)\n for tid in tids:\n _add_or_merge_edge(G, tid, ix, edge_attrs)\n else:\n # standard edge\n _add_or_merge_edge(G, *tids, edge_attrs)\n if show_inds == 'all':\n edge_labels[tuple(tids)] = ix\n elif show_inds == 'bond-size':\n edge_labels[tuple(tids)] = tn.ind_size(ix)\n\n # color the nodes\n colors = get_colors(color, custom_colors)\n\n # set parameters for all the nodes\n for tid, t in tn.tensor_map.items():\n\n if tid not in G.nodes:\n # e.g. tensor is a scalar\n if show_scalars:\n G.add_node(tid)\n else:\n continue\n\n G.nodes[tid]['size'] = node_size[tid]\n G.nodes[tid]['outline_size'] = node_outline_size[tid]\n color = node_color\n for tag in colors:\n if tag in t.tags:\n color = colors[tag]\n if tid in highlight_tids:\n color = highlight_tids_color\n G.nodes[tid]['color'] = color\n G.nodes[tid]['outline_color'] = tuple(\n (1.0 if i == 3 else node_outline_darkness) * c\n for i, c in enumerate(color)\n )\n G.nodes[tid]['marker'] = node_shape[tid]\n G.nodes[tid]['hatch'] = node_hatch[tid]\n if show_tags:\n # make the tags appear with auto vertical extent\n node_label = '{' + str(list(t.tags))[1:-1] + '}'\n node_labels[tid] = \"\\n\".join(textwrap.wrap(\n node_label, max(2 * len(node_label) ** 0.5, 16)\n ))\n\n for hix in hyperedges:\n G.nodes[hix]['ind'] = hix\n G.nodes[hix]['color'] = (1.0, 1.0, 1.0, 1.0)\n G.nodes[hix]['size'] = 0.0\n G.nodes[hix]['outline_size'] = 0.0\n G.nodes[hix]['outline_color'] = (1.0, 1.0, 1.0, 1.0)\n G.nodes[hix]['marker'] = ''\n G.nodes[hix]['hatch'] = ''\n if show_inds == 'all':\n node_labels[hix] = hix\n elif show_inds == 'bond-size':\n node_labels[hix] = tn.ind_size(hix)\n\n if get == 'graph':\n return G\n\n if show_inds == 'bond-size':\n font_size = font_size_inner\n for oix in output_inds:\n node_labels[oix] = tn.ind_size(oix)\n elif show_inds:\n for oix in output_inds:\n node_labels[oix] = oix\n\n pos = get_positions(tn, G, fix, initial_layout, k, iterations,\n use_forceatlas2, use_spring_weight)\n\n if get == 'pos':\n return pos\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n ax.axis('off')\n ax.set_aspect('equal')\n if title is not None:\n ax.set_title(str(title))\n\n xmin = ymin = +float('inf')\n xmax = ymax = -float('inf')\n for xy in pos.values():\n xmin = min(xmin, xy[0])\n xmax = max(xmax, xy[0])\n ymin = min(ymin, xy[1])\n ymax = max(ymax, xy[1])\n\n if margin is None:\n # XXX: pad the plot range so that node circles are not clipped,\n # using the networkx node_size parameter, *which is in absolute\n # units* and so must be inverse transformed using matplotlib!\n inv = ax.transData.inverted()\n real_node_size = (abs(\n inv.transform((0, node_size.default_factory()))[1] -\n inv.transform((0, 0))[1]\n ) ** 0.5) / 4\n ax.set_xlim(xmin - real_node_size, xmax + real_node_size)\n ax.set_ylim(ymin - real_node_size, ymax + real_node_size)\n else:\n ax.margins(margin)\n\n created_fig = True\n else:\n created_fig = False\n\n nx.draw_networkx_edges(\n G, pos,\n width=tuple(x[2]['edge_size'] for x in G.edges(data=True)),\n edge_color=tuple(x[2]['color'] for x in G.edges(data=True)),\n alpha=edge_alpha,\n ax=ax,\n )\n\n # draw multiedges\n multiedge_centers = {}\n for i, j, attrs in G.edges(data=True):\n sizes = attrs['multiedge_sizes']\n multiplicity = len(sizes)\n if multiplicity > 1:\n rads = np.linspace(\n multiplicity * -multiedge_spread,\n multiplicity * +multiedge_spread,\n multiplicity\n )\n\n xa, ya = pos[i]\n xb, yb = pos[j]\n xab, yab = (xa + xb) / 2., (ya + yb) / 2.\n dx, dy = xb - xa, yb - ya\n\n inds = attrs['multiedge_inds']\n for sz, rad, ix in zip(sizes, rads, inds):\n\n # store the central point of the arc in case its needed by\n # the arrow drawing functionality\n cx, cy = xab + rad * dy * 0.5, yab - rad * dx * 0.5\n multiedge_centers[ix] = (cx, cy)\n\n ax.add_patch(patches.FancyArrowPatch(\n (xa, ya), (xb, yb),\n connectionstyle=patches.ConnectionStyle.Arc3(rad=rad),\n alpha=edge_alpha,\n linewidth=sz,\n color=attrs['color'],\n zorder=1,\n ))\n\n scatters = collections.defaultdict(lambda: collections.defaultdict(list))\n\n for node, attrs in G.nodes(data=True):\n # need to group by marker and hatch as matplotlib doesn't map these\n key = (attrs['marker'], attrs['hatch'])\n scatters[key]['x'].append(pos[node][0])\n scatters[key]['y'].append(pos[node][1])\n scatters[key]['s'].append(attrs['size'])\n scatters[key]['c'].append(attrs['color'])\n scatters[key]['linewidths'].append(attrs['outline_size'])\n scatters[key]['edgecolors'].append(attrs['outline_color'])\n\n # plot the nodes\n for (marker, hatch), data in scatters.items():\n ax.scatter(\n data['x'],\n data['y'],\n s=data['s'],\n c=data['c'],\n marker=marker,\n linewidths=data['linewidths'],\n edgecolors=data['edgecolors'],\n hatch=hatch,\n zorder=2,\n )\n\n # draw incomcing arrows for tensor left_inds\n if show_left_inds:\n for tid, t in tn.tensor_map.items():\n if t.left_inds is not None:\n for ind in t.left_inds:\n if ind in hyperedges:\n tida = ind\n else:\n tida, = (x for x in tn.ind_map[ind] if x != tid)\n tidb = tid\n (xa, ya), (xb, yb) = pos[tida], pos[tidb]\n\n edge_width = G.get_edge_data(tida, tidb)['edge_size']\n edge_length = ((xb - xa)**2 + (yb - ya)**2)**0.5\n arrow_scale = (\n 0.02 * arrow_length * edge_width / edge_length**0.5\n )\n\n # arrow start and change\n if ind in multiedge_centers:\n x, y = multiedge_centers[ind]\n else:\n x = (xa + arrow_closeness * xb) / (1 + arrow_closeness)\n y = (ya + arrow_closeness * yb) / (1 + arrow_closeness)\n\n dx = (xb - xa) * arrow_scale\n dy = (yb - ya) * arrow_scale\n\n ax.add_patch(patches.FancyArrow(\n x, y, dx, dy,\n width=0, # don't draw tail\n length_includes_head=True,\n head_width=(dx**2 + dy**2)**0.5,\n head_length=(dx**2 + dy**2)**0.5,\n linewidth=arrow_linewidth,\n color=(\n highlight_inds_color if ind in highlight_inds else\n edge_color\n ),\n alpha=edge_alpha,\n fill=True,\n shape='full',\n overhang=arrow_overhang,\n ))\n\n if show_inds in {'all', 'bond-size'}:\n nx.draw_networkx_edge_labels(\n G, pos,\n edge_labels=edge_labels,\n font_size=font_size_inner,\n font_color=label_color,\n ax=ax,\n )\n if show_tags or show_inds:\n nx.draw_networkx_labels(\n G, pos,\n labels=node_labels,\n font_size=font_size,\n font_color=label_color,\n ax=ax,\n )\n\n # create legend\n if colors and legend:\n handles = []\n for color in colors.values():\n handles += [plt.Line2D([0], [0], marker='o', color=color,\n linestyle='', markersize=10)]\n\n # needed in case '_' is the first character\n lbls = [f\" {lbl}\" for lbl in colors]\n\n plt.legend(handles, lbls, ncol=max(round(len(handles) / 20), 1),\n loc='center left', bbox_to_anchor=(1, 0.5))\n\n if not created_fig:\n # we added to axisting axes\n return\n\n if xlims is not None:\n ax.set_xlim(xlims)\n if ylims is not None:\n ax.set_ylim(ylims)\n\n if return_fig:\n return fig\n else:\n plt.show()\n plt.close(fig)\n\n\n# colorblind palettes by Bang Wong (https://www.nature.com/articles/nmeth.1618)\n\n_COLORS_DEFAULT = (\n '#56B4E9', # light blue\n '#E69F00', # orange\n '#009E73', # green\n '#D55E00', # red\n '#F0E442', # yellow\n '#CC79A7', # purple\n '#0072B2', # dark blue\n)\n\n_COLORS_SORTED = (\n '#0072B2', # dark blue\n '#56B4E9', # light blue\n '#009E73', # green\n '#F0E442', # yellow\n '#E69F00', # orange\n '#D55E00', # red\n '#CC79A7', # purple\n)\n\n\ndef mod_sat(c, mod):\n \"\"\"Modify the luminosity of rgb color ``c``.\n \"\"\"\n from matplotlib.colors import hsv_to_rgb, rgb_to_hsv\n\n h, s, v = rgb_to_hsv(c[:3])\n return (*hsv_to_rgb((h, mod * s, v)), 1.0)\n\n\ndef auto_colors(nc):\n import math\n from matplotlib.colors import LinearSegmentedColormap\n\n cmap = LinearSegmentedColormap.from_list('wong', _COLORS_SORTED)\n\n xs = list(map(cmap, np.linspace(0, 1.0, nc)))\n\n # modulate color saturation with sine to generate local distinguishability\n # ... but only turn on gradually for increasing number of nodes\n sat_mod_period = min(4, nc / 7)\n sat_mod_factor = max(0.0, 2 / 3 * math.tanh((nc - 7) / 4))\n\n return [\n mod_sat(\n c, 1 - sat_mod_factor * math.sin(math.pi * i / sat_mod_period)**2\n )\n for i, c in enumerate(xs)\n ]\n\n\ndef get_colors(color, custom_colors=None):\n \"\"\"Generate a sequence of rgbs for tag(s) ``color``.\n \"\"\"\n from matplotlib.colors import to_rgba\n\n if color is None:\n return dict()\n\n if isinstance(color, str):\n color = (color,)\n\n if custom_colors is not None:\n rgbs = list(map(to_rgba, custom_colors))\n return dict(zip(color, rgbs))\n\n nc = len(color)\n if nc <= 7:\n return dict(zip(color, list(map(to_rgba, _COLORS_DEFAULT))))\n\n rgbs = auto_colors(nc)\n return dict(zip(color, rgbs))\n\n\ndef _rotate(xy, theta):\n \"\"\"Return a rotated set of points.\n \"\"\"\n s = np.sin(theta)\n c = np.cos(theta)\n\n xyr = np.empty_like(xy)\n xyr[:, 0] = c * xy[:, 0] - s * xy[:, 1]\n xyr[:, 1] = s * xy[:, 0] + c * xy[:, 1]\n\n return xyr\n\n\ndef _span(xy):\n \"\"\"Return the vertical span of the points.\n \"\"\"\n return xy[:, 1].max() - xy[:, 1].min()\n\n\ndef _massage_pos(pos, nangles=360, flatten=False):\n \"\"\"Rotate a position dict's points to cover a small vertical span\n \"\"\"\n xy = np.empty((len(pos), 2))\n for i, (x, y) in enumerate(pos.values()):\n xy[i, 0] = x\n xy[i, 1] = y\n\n thetas = np.linspace(0, 2 * np.pi, nangles, endpoint=False)\n rxys = (_rotate(xy, theta) for theta in thetas)\n rxy0 = min(rxys, key=lambda rxy: _span(rxy))\n\n if flatten:\n rxy0[:, 1] /= 2\n\n return dict(zip(pos, rxy0))\n\n\ndef get_positions(\n tn,\n G,\n fix=None,\n initial_layout='spectral',\n k=None,\n iterations=200,\n use_forceatlas2=False,\n use_spring_weight=False,\n):\n import networkx as nx\n\n if fix is None:\n fix = dict()\n else:\n fix = parse_dict_to_tids_or_inds(fix, tn)\n # find range with which to scale spectral points with\n xmin, xmax, ymin, ymax = (\n f(fix.values(), key=lambda xy: xy[i])[i]\n for f, i in [(min, 0), (max, 0), (min, 1), (max, 1)])\n if xmin == xmax:\n xmin, xmax = xmin - 1, xmax + 1\n if ymin == ymax:\n ymin, ymax = ymin - 1, ymax + 1\n xymin, xymax = min(xmin, ymin), max(xmax, ymax)\n\n if all(node in fix for node in G.nodes):\n # everything is already fixed\n return fix\n\n # use spectral or other layout as starting point\n pos0 = getattr(nx, initial_layout + '_layout')(G)\n\n # scale points to fit with specified positions\n if fix:\n # but update with fixed positions\n pos0.update(valmap(lambda xy: np.array(\n (2 * (xy[0] - xymin) / (xymax - xymin) - 1,\n 2 * (xy[1] - xymin) / (xymax - xymin) - 1)), fix))\n fixed = fix.keys()\n else:\n fixed = None\n\n # and then relax remaining using spring layout\n if iterations:\n\n if use_forceatlas2 is True:\n use_forceatlas2 = 1\n elif use_forceatlas2 in (0, False):\n use_forceatlas2 = float('inf')\n\n should_use_fa2 = (\n (fixed is None) and HAS_FA2 and (len(G) > use_forceatlas2)\n )\n\n weight = 'spring_weight' if use_spring_weight else None\n\n if should_use_fa2:\n from fa2 import ForceAtlas2\n pos = ForceAtlas2(verbose=False).forceatlas2_networkx_layout(\n G, pos=pos0, iterations=iterations, weight_attr=weight)\n else:\n pos = nx.spring_layout(\n G, pos=pos0, fixed=fixed, k=k, iterations=iterations,\n weight=weight)\n else:\n pos = pos0\n\n if not fix:\n # finally rotate them to cover a small vertical span\n pos = _massage_pos(pos)\n\n return pos\n" ]
[ [ "matplotlib.colors.rgb_to_hsv", "matplotlib.colors.hsv_to_rgb", "numpy.cos", "matplotlib.pyplot.subplots", "matplotlib.colors.LinearSegmentedColormap.from_list", "numpy.empty_like", "matplotlib.patches.ConnectionStyle.Arc3", "matplotlib.pyplot.show", "matplotlib.pyplot.close", "numpy.array", "numpy.sin", "matplotlib.pyplot.Line2D", "numpy.linspace", "matplotlib.patches.FancyArrow", "matplotlib.colors.to_rgb" ] ]
mpsonntag/nixpy
[ "fd6addf137e22dad5fc1b1a95bfc4ca2bd84da5d" ]
[ "nixio/test/test_data_array.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright © 2014, German Neuroinformatics Node (G-Node)\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted under the terms of the BSD License. See\n# LICENSE file in the root of the Project.\nimport os\nimport time\nfrom six import string_types\nimport sys\nimport unittest\nimport numpy as np\nimport nixio as nix\nfrom nixio.exceptions import IncompatibleDimensions\nfrom .tmp import TempDir\n\n\nclass TestDataArray(unittest.TestCase):\n\n def setUp(self):\n self.tmpdir = TempDir(\"dataarraytest\")\n self.testfilename = os.path.join(self.tmpdir.path, \"dataarraytest.nix\")\n self.file = nix.File.open(self.testfilename, nix.FileMode.Overwrite)\n self.block = self.file.create_block(\"test block\", \"recordingsession\")\n self.array = self.block.create_data_array(\"test array\", \"signal\",\n nix.DataType.Double, (100, ))\n self.other = self.block.create_data_array(\"other array\", \"signal\",\n nix.DataType.Double, (100, ))\n\n def tearDown(self):\n del self.file.blocks[self.block.id]\n self.file.close()\n self.tmpdir.cleanup()\n\n def test_data_array_eq(self):\n assert self.array == self.array\n assert not self.array == self.other\n assert self.array is not None\n\n def test_data_array_id(self):\n assert self.array.id is not None\n\n def test_data_array_name(self):\n assert self.array.name is not None\n\n def test_data_array_type(self):\n def set_none():\n self.array.type = None\n\n assert self.array.type is not None\n self.assertRaises(Exception, set_none)\n\n self.array.type = \"foo type\"\n assert self.array.type == \"foo type\"\n\n def test_data_array_definition(self):\n assert self.array.definition is None\n\n self.array.definition = \"definition\"\n assert self.array.definition == \"definition\"\n\n self.array.definition = None\n assert self.array.definition is None\n\n def test_data_array_timestamps(self):\n created_at = self.array.created_at\n assert created_at > 0\n\n updated_at = self.array.updated_at\n assert updated_at > 0\n\n self.array.force_created_at(1403530068)\n assert self.array.created_at == 1403530068\n\n def test_data_array_label(self):\n assert self.array.label is None\n\n self.array.label = \"label\"\n assert self.array.label == \"label\"\n\n self.array.label = None\n assert self.array.label is None\n\n def test_data_array_unit(self):\n assert self.array.unit is None\n\n self.array.unit = \"mV\"\n assert self.array.unit == \"mV\"\n\n self.array.unit = \"0.5*ms\"\n assert self.array.unit == \"0.5*ms\"\n\n self.array.unit = None\n assert self.array.unit is None\n\n def test_data_array_exp_origin(self):\n assert self.array.expansion_origin is None\n\n data = [10, 29, 33]\n intarray = self.block.create_data_array(\"intarray\", \"array\", nix.DataType.Int64, data=data)\n\n intarray.expansion_origin = 10.2\n assert intarray.expansion_origin == 10.2\n np.testing.assert_almost_equal(intarray[:], np.array(data) - 10.2)\n\n # single value retrieval\n np.testing.assert_almost_equal(intarray[1], data[1] - 10.2)\n\n intarray.expansion_origin = None\n assert intarray.expansion_origin is None\n np.testing.assert_almost_equal(intarray[:], np.array(data))\n\n def test_data_array_coefficients(self):\n assert self.array.polynom_coefficients == ()\n\n self.array.polynom_coefficients = (1.1, 2.2)\n assert self.array.polynom_coefficients == (1.1, 2.2)\n\n data = [10, 29, 33]\n intarray = self.block.create_data_array(\"intarray\", \"array\", nix.DataType.Int64, data=data)\n intarray.polynom_coefficients = (0.0, 0.1)\n np.testing.assert_almost_equal(intarray[:], np.array(data) * 0.1)\n\n # single value retrieval\n np.testing.assert_almost_equal(intarray[1], data[1] * 0.1)\n\n # Coefficient deletion\n intarray.polynom_coefficients = None\n np.testing.assert_almost_equal(intarray[:], np.array(data))\n\n def test_data_array_data(self):\n assert self.array.polynom_coefficients == ()\n\n data = np.array([float(i) for i in range(100)])\n dout = np.empty_like(data)\n self.array.write_direct(data)\n assert self.array.dtype == np.dtype(float)\n self.array.read_direct(dout)\n assert np.array_equal(data, dout)\n dout = np.array(self.array)\n assert np.array_equal(data, dout)\n assert self.array.data_extent == data.shape\n assert self.array.data_extent == self.array.shape\n assert self.array.size == data.size\n\n assert len(self.array) == len(data)\n\n dout = np.array(range(100))\n assert np.array_equal(data, dout)\n\n dout = self.array[...]\n assert np.array_equal(data, dout)\n\n # indexed writing (1-d)\n data = np.array([float(-i) for i in range(100)])\n self.array[()] = data\n assert np.array_equal(self.array[...], data)\n\n self.array[...] = [float(-i) for i in range(100)]\n assert np.array_equal(self.array[()], data)\n assert np.array_equal(self.array[0:-10], data[0:-10])\n assert np.array_equal(self.array[-10], np.array([data[-10]]))\n\n self.array[0] = 42\n assert self.array[0] == 42.0\n\n # changing shape via data_extent property\n self.array.data_extent = (200, )\n assert self.array.data_extent == (200, )\n\n data = np.eye(123)\n da1 = self.block.create_data_array(\"double array\", \"signal\", nix.DataType.Double, (123, 123))\n dset = da1\n dset.write_direct(data)\n dout = np.empty_like(data)\n dset.read_direct(dout)\n assert np.array_equal(data, dout)\n\n # indexing support in 2-d arrays\n with self.assertRaises(IndexError):\n _ = self.array[[], [1, 2]]\n\n dout = dset[12]\n assert dout.shape == data[12].shape\n assert np.array_equal(dout, data[12])\n assert np.array_equal(dset[()], data)\n assert np.array_equal(dset[...], data)\n assert np.array_equal(dset[12, ...], data[12, ...])\n assert np.array_equal(dset[..., 12], data[..., 12])\n assert np.array_equal(dset[1:], data[1:])\n assert np.array_equal(dset[-20:, -20:], data[123-20:, 123-20:])\n assert np.array_equal(dset[:1], data[:1])\n assert np.array_equal(dset[:-1, :-1], data[1:123, 1:123])\n assert np.array_equal(dset[1:10, 1:10], data[1:10, 1:10])\n assert np.array_equal(dset[1:-2, 1:-2], data[1:121, 1:121])\n\n da3 = self.block.create_data_array(\"int identity array\", \"signal\",\n nix.DataType.Int32, (123, 123))\n assert da3.shape == (123, 123)\n assert da3.dtype == np.dtype('i4')\n\n data = np.random.rand(3, 4, 5)\n da4 = self.block.create_data_array(\"3d array\", \"signal\",\n nix.DataType.Double, (3, 4, 5))\n dset = da4\n dset.write_direct(data)\n assert dset.shape == data.shape\n assert len(dset) == len(data)\n assert dset.size == data.size\n assert np.array_equal(dset[2, ...], data[2, ...])\n assert np.array_equal(dset[-1, ...], data[2, ...])\n assert np.array_equal(dset[..., 3], data[..., 3])\n assert np.array_equal(dset[..., -2], data[..., 3])\n assert np.array_equal(dset[2, ..., 3], data[2, ..., 3])\n assert np.array_equal(dset[2, ..., -2], data[2, ..., 3])\n assert np.array_equal(dset[1:2, ..., 3:5], data[1:2, ..., 3:5])\n assert np.array_equal(dset[1:2, ..., 3:-1], data[1:2, ..., 3:4])\n\n # indexed writing (n-d)\n data = np.random.rand(2, 2)\n dset[1, 0:2, 0:2] = data\n assert np.array_equal(dset[1, 0:2, 0:2], data)\n\n # test inferring shape & dtype from data, and writing the data\n test_ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n test_data = np.array(test_ten, dtype=int)\n da = self.block.create_data_array('created_from_data', 'b',\n data=test_data)\n assert da.shape == test_data.shape\n assert np.array_equal(test_data, da[:])\n assert test_ten == [x for x in da]\n\n # test for exceptions\n self.assertRaises(ValueError, self.block.create_data_array, 'x', 'y')\n self.assertRaises(ValueError, self.block.create_data_array,\n 'x', 'y', data=test_data, shape=(1, 1, 1))\n\n # test appending\n data = np.zeros((10, 5))\n da = self.block.create_data_array('append', 'double', data=data)\n to_append = np.zeros((2, 5))\n\n da.append(to_append)\n assert da.shape == (12, 5)\n\n to_append = np.zeros((12, 2))\n da.append(to_append, axis=1)\n assert da.shape == (12, 7)\n\n self.assertRaises(ValueError, da.append, np.zeros((3, 3, 3)))\n self.assertRaises(ValueError, da.append, np.zeros((5, 5)))\n\n def test_data_array_dtype(self):\n da = self.block.create_data_array('dtype_f8', 'b', 'f8', (10, 10))\n assert da.dtype == np.dtype('f8')\n\n da = self.block.create_data_array('dtype_i16', 'b', np.int16, (10, 10))\n data = da[:]\n assert da.dtype == np.int16\n assert data.dtype == np.int16\n\n da = self.block.create_data_array('dtype_int', 'b', int, (10, 10))\n assert da.dtype == np.dtype(int)\n\n da = self.block.create_data_array('dtype_ndouble', 'b',\n nix.DataType.Double, (10, 10))\n assert da.dtype == np.dtype('f8')\n\n da = self.block.create_data_array('dtype_auto', 'b', None, (10, 10))\n assert da.dtype == np.dtype('f8')\n\n test_data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], dtype=int)\n da = self.block.create_data_array('dtype_int_from_data', 'b',\n data=test_data)\n assert da.dtype == test_data.dtype\n\n bdata = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\n if sys.version_info[0] == 3:\n bdata = [bytes(x, 'UTF-8') for x in bdata]\n\n void_data = np.array(bdata, dtype='V1')\n da = self.block.create_data_array('dtype_opaque', 'b', data=void_data)\n assert da.dtype == np.dtype('V1')\n assert np.array_equal(void_data, da[:])\n\n def test_array_unicode(self):\n da = self.block.create_data_array(\"unicode\", \"lotsatext\",\n nix.DataType.String, shape=(4,))\n data = [\"Καφές\", \"Café\", \"咖啡\", \"☕\"]\n da.write_direct(data)\n\n assert data == list(da[:])\n\n def test_data_array_dimensions(self):\n assert len(self.array.dimensions) == 0\n\n self.array.append_set_dimension()\n self.array.append_range_dimension(range(10))\n self.array.append_sampled_dimension(0.1)\n\n assert len(self.array.dimensions) == 3\n\n self.assertRaises(KeyError, lambda: self.array.dimensions[\"notexist\"])\n self.assertRaises(IndexError, lambda: self.array.dimensions[-4])\n self.assertRaises(IndexError, lambda: self.array.dimensions[3])\n\n assert isinstance(str(self.array.dimensions), string_types)\n assert isinstance(repr(self.array.dimensions), string_types)\n\n dims = list(self.array.dimensions)\n for i in range(3):\n assert dims[i].index == self.array.dimensions[i].index\n assert(dims[i].dimension_type ==\n self.array.dimensions[i].dimension_type)\n\n assert(self.array.dimensions[i].index ==\n self.array.dimensions[i-3].index)\n\n self.array.delete_dimensions()\n\n def test_data_array_sources(self):\n source1 = self.block.create_source(\"source1\", \"channel\")\n source2 = self.block.create_source(\"source2\", \"electrode\")\n\n assert len(self.array.sources) == 0\n\n self.array.sources.append(source1)\n self.array.sources.append(source2)\n\n self.assertRaises(TypeError, self.array.sources.append, 100)\n\n assert len(self.array.sources) == 2\n assert source1 in self.array.sources\n assert source2 in self.array.sources\n\n del self.array.sources[source2]\n assert self.array.sources[0] == source1\n\n del self.array.sources[source1]\n assert len(self.array.sources) == 0\n\n def test_data_array_indexing(self):\n data = np.random.rand(50)\n da = self.block.create_data_array(\"random\", \"DataArray\",\n data=data)\n\n np.testing.assert_almost_equal(data[:], da[:])\n\n def check_idx(idx):\n np.testing.assert_almost_equal(da[idx], data[idx])\n\n check_idx(10)\n check_idx(Ellipsis)\n check_idx(slice(10, 15))\n\n def test_data_array_multi_slicing(self):\n shape = (5, 10, 15, 20)\n da = self.block.create_data_array(\n 'test', 'test',\n data=np.random.randint(65000, size=shape)\n )\n self.assertEqual(da[0, 0, 0, 0].shape, (1,))\n self.assertEqual(da[0, 0, 0, :].shape, (20,))\n self.assertEqual(da[0, 0, :, 0].shape, (15,))\n self.assertEqual(da[0, 0, :, :].shape, (15, 20))\n self.assertEqual(da[0, :, 0, 0].shape, (10,))\n self.assertEqual(da[0, :, 0, :].shape, (10, 20))\n self.assertEqual(da[0, :, :, 0].shape, (10, 15))\n self.assertEqual(da[0, :, :, :].shape, (10, 15, 20))\n self.assertEqual(da[:, 0, 0, 0].shape, (5,))\n self.assertEqual(da[:, 0, 0, :].shape, (5, 20))\n self.assertEqual(da[:, 0, :, 0].shape, (5, 15))\n self.assertEqual(da[:, 0, :, :].shape, (5, 15, 20))\n self.assertEqual(da[:, :, 0, 0].shape, (5, 10))\n self.assertEqual(da[:, :, 0, :].shape, (5, 10, 20))\n self.assertEqual(da[:, :, :, 0].shape, (5, 10, 15))\n self.assertEqual(da[:, :, :, :].shape, shape)\n\n def test_outofbounds_indexing(self):\n # test out of bounds IndexError exception\n oobtestda = self.block.create_data_array(\"oobdatatest\",\n \"data\", data=[1, 2, 10])\n with self.assertRaises(IndexError):\n _ = oobtestda[3]\n with self.assertRaises(IndexError):\n _ = oobtestda[10]\n with self.assertRaises(IndexError):\n _ = oobtestda[-7]\n\n def test_data_array_numpy_indexing(self):\n data = np.random.rand(50)\n da = self.block.create_data_array(\"random\", \"DataArray\",\n data=data)\n\n def check_idx(idx):\n np.testing.assert_almost_equal(da[idx], data[idx])\n\n check_idx(np.int8(10))\n check_idx(np.int16(20))\n check_idx(np.int32(42))\n check_idx(np.int64(9))\n\n def test_get_slice(self):\n data2d = np.random.random_sample((100, 2))\n da2d = self.block.create_data_array(\"get_slice 2d\", \"Data\",\n data=data2d)\n da2d.append_range_dimension(np.linspace(10, 19.8, 50))\n da2d.append_set_dimension()\n data = da2d[10:30, 1:2]\n islice = da2d.get_slice((10, 1), (20, 1),\n mode=nix.DataSliceMode.Index)\n np.testing.assert_almost_equal(data, islice)\n dslice = da2d.get_slice((12.0, 1), (4.0, 1),\n mode=nix.DataSliceMode.Data)\n np.testing.assert_almost_equal(data, dslice)\n dslice2 = da2d.get_slice((0.0, 1), (16.0, 1),\n mode=nix.DataSliceMode.Data)\n np.testing.assert_almost_equal(da2d[0:30, 1:2], dslice2)\n\n data3d = np.random.random_sample((30, 30, 5))\n da3d = self.block.create_data_array(\"get_slice 3d\", \"Data\",\n data=data3d)\n sdim = da3d.append_sampled_dimension(0.1)\n sdim.offset = 0.5\n da3d.append_sampled_dimension(2.0)\n da3d.append_set_dimension()\n\n data = data3d[5:15, 20:25, 3:5]\n islice = da3d.get_slice((5, 20, 3), (10, 5, 2),\n mode=nix.DataSliceMode.Index)\n np.testing.assert_almost_equal(data, islice)\n dslice = da3d.get_slice((1.0, 40.0, 3), (1.0, 10.0, 2),\n mode=nix.DataSliceMode.Data)\n np.testing.assert_almost_equal(data, dslice)\n\n with self.assertRaises(IncompatibleDimensions):\n da2d.get_slice((0, 0, 0), (10, 10, 10))\n\n with self.assertRaises(IncompatibleDimensions):\n da2d.get_slice((0, 0), (10,))\n\n with self.assertRaises(IncompatibleDimensions):\n da3d.get_slice((0, 0, 0), (3, 9, 40, 1))\n\n def test_dim_one_based(self):\n self.array.append_set_dimension()\n self.array.append_range_dimension(range(10))\n self.array.append_sampled_dimension(0.1)\n dim_container_one_based = self.array.iter_dimensions()\n for idx, dim in dim_container_one_based:\n assert self.array.dimensions[idx-1].dimension_type ==\\\n dim.dimension_type\n\n def test_timestamp_autoupdate(self):\n array = self.block.create_data_array(\"array.time\", \"signal\",\n nix.DataType.Double, (100, ))\n # Append dimensions and check time\n datime = array.updated_at\n time.sleep(1)\n array.append_set_dimension()\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.append_sampled_dimension(sampling_interval=0.1)\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.append_range_dimension(ticks=[0.1])\n self.assertNotEqual(datime, array.updated_at)\n\n # other properties\n datime = array.updated_at\n time.sleep(1)\n array.polynom_coefficients = [1.1, 2.2]\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.expansion_origin = -1\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.label = \"lbl\"\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.unit = \"Ms\"\n self.assertNotEqual(datime, array.updated_at)\n\n def test_timestamp_noautoupdate(self):\n self.file.auto_update_timestamps = False\n array = self.block.create_data_array(\"array.time\", \"signal\",\n nix.DataType.Double, (100, ))\n # Append dimensions and check time\n datime = array.updated_at\n time.sleep(1)\n array.append_set_dimension()\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.append_sampled_dimension(sampling_interval=0.1)\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.append_range_dimension(ticks=[0.1])\n self.assertEqual(datime, array.updated_at)\n\n # other properties\n datime = array.updated_at\n time.sleep(1)\n array.polynom_coefficients = [1.1, 2.2]\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.expansion_origin = -1\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.label = \"lbl\"\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.unit = \"Ms\"\n self.assertEqual(datime, array.updated_at)\n\n def test_data_deletion(self):\n data = [42.1337, 720.3, 190.0009]\n array = self.block.create_data_array(\"del.test\", \"test\", data=data)\n np.testing.assert_almost_equal(data, array[:])\n\n array[:] = None\n np.testing.assert_almost_equal([np.nan]*len(data), array[:])\n\n nda = len(self.block.data_arrays)\n del self.block.data_arrays[\"del.test\"]\n assert len(self.block.data_arrays) == nda-1\n assert \"del.test\" not in self.block.data_arrays\n\n def test_single_value_retrieval(self):\n assert self.array[1].shape == (1,)\n self.array.expansion_origin = 0.3\n assert self.array[1].shape == (1,)\n self.array.expansion_origin = None\n\n assert self.array[1].shape == (1,)\n self.array.polynom_coefficients = (1.2, 3.4)\n assert self.array[1].shape == (1,)\n self.array.polynom_coefficients = None\n\n assert self.array[1].shape == (1,)\n self.array.expansion_origin = 0.9\n self.array.polynom_coefficients = (1.2, 3.4)\n assert self.array[1].shape == (1,)\n self.array.expansion_origin = None\n self.array.polynom_coefficients = None\n\n assert self.array[1].shape == (1,)\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.eye", "numpy.random.random_sample", "numpy.int8", "numpy.zeros", "numpy.dtype", "numpy.int64", "numpy.empty_like", "numpy.int32", "numpy.random.rand", "numpy.array_equal", "numpy.array", "numpy.int16", "numpy.linspace", "numpy.random.randint" ] ]
iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer
[ "57904544c6d6b43dcd5937afeb474c0a47456d98" ]
[ "models/model_unet.py" ]
[ "import tensorflow as tf\nfrom tensorflow.python.ops import control_flow_ops\nfrom six.moves import cPickle\nimport unet\nimport simplified_unet\n\narg_scope = tf.contrib.framework.arg_scope\n\n\nclass UnetModel(object):\n\n def __init__(self, number_class=3, is_training=True, is_simplified = False, dropout = True):\n\n \"\"\"Create the model\"\"\"\n self.n_classes = number_class\n self.is_training = is_training\n self.is_simplified = is_simplified\n self.dropout = dropout\n\n def _create_network(self, input_batch, dropout = False, is_training = True):\n\n \"\"\"\n Args:\n input_batch: batch of pre-processed images.\n keep_prob: probability of keeping neurons intact.\n\n Returns:\n A downsampled segmentation mask.\n \"\"\"\n if not self.is_simplified:\n net, _ = unet.unet(input_batch, self.n_classes, is_training = is_training, dropout = dropout, weight_decay=0.0005)\n else:\n net, _ = simplified_unet.unet(input_batch, self.n_classes, is_training = is_training, dropout = dropout, weight_decay=0.0005)\n\n return net\n\n def prepare_label(self, input_batch, new_size):\n \"\"\"Resize masks and perform one-hot encoding.\n Args:\n input_batch: input tensor of shape [batch_size H W 1].\n new_size: a tensor with new height and width.\n Returns:\n Outputs a tensor of shape [batch_size h w 21]\n with last dimension comprised of 0's and 1's only.\n \"\"\"\n with tf.name_scope('label_encode'):\n input_batch = tf.image.resize_nearest_neighbor(input_batch,\n new_size) # As labels are integer numbers, need to use NN interp.\n input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.\n input_batch = tf.one_hot(input_batch, depth=self.n_classes)\n return input_batch\n\n def preds(self, input_batch):\n \"\"\"Create the network and run inference on the input batch.\n\n Args:\n input_batch: batch of pre-processed images.\n\n Returns:\n Argmax over the predictions of the network of the same shape as the input.\n \"\"\"\n raw_output = self._create_network(tf.cast(input_batch, tf.float32), dropout = self.dropout, is_training = self.is_training)\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(input_batch)[1:3, ])\n raw_output = tf.argmax(raw_output, axis=3)\n raw_output = tf.expand_dims(raw_output, axis=3) # Create 4D-tensor.\n return tf.cast(raw_output, tf.uint8)\n\n def loss(self, img_batch, label_batch, mask_batch):\n \"\"\"Create the network, run inference on the input batch and compute loss.\n\n Args:\n input_batch: batch of pre-processed images.\n\n Returns:\n Pixel-wise softmax loss.\n \"\"\"\n raw_output = self._create_network(tf.cast(img_batch, tf.float32), dropout = self.dropout, is_training = self.is_training)\n\n # Get prediction output\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img_batch)[1:3, ])\n raw_output_up = tf.argmax(raw_output_up, axis=3)\n raw_output_up = tf.expand_dims(raw_output_up, axis=3) # Create 4D-tensor.\n pred = tf.cast(raw_output_up, tf.uint8)\n prediction = tf.reshape(raw_output, [-1, self.n_classes])\n\n # Prepare ground truth output\n label_batch = tf.image.resize_nearest_neighbor(label_batch, tf.stack(raw_output.get_shape()[1:3]))\n gt = tf.expand_dims(tf.cast(tf.reshape(label_batch, [-1]), tf.int32), axis=1)\n\n # Prepare mask\n if mask_batch != None:\n resized_mask_batch = tf.image.resize_nearest_neighbor(mask_batch, tf.stack(raw_output.get_shape()[1:3]))\n resized_mask_batch = tf.cast(tf.reshape(resized_mask_batch, [-1]), tf.float32)\n mask = tf.reshape(resized_mask_batch, gt.get_shape())\n\n # Calculate the masked loss \n epsilon = 0.00001 * tf.ones(prediction.get_shape(), tf.float32)\n if mask_batch != None:\n loss = tf.losses.sparse_softmax_cross_entropy(logits=prediction+epsilon, labels=gt, weights=mask)\n else:\n loss = tf.losses.sparse_softmax_cross_entropy(logits=prediction+epsilon, labels=gt)\n reduced_loss = tf.reduce_mean(loss)\n print(loss)\n\n\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n if update_ops:\n updates = tf.group(*update_ops)\n reduced_loss = control_flow_ops.with_dependencies([updates], reduced_loss)\n\n return pred, reduced_loss" ]
[ [ "tensorflow.shape", "tensorflow.reshape", "tensorflow.get_collection", "tensorflow.expand_dims", "tensorflow.reduce_mean", "tensorflow.squeeze", "tensorflow.cast", "tensorflow.name_scope", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.one_hot", "tensorflow.argmax", "tensorflow.image.resize_nearest_neighbor", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.group" ] ]
paolorampazzo/mypySOT
[ "c22a0f297576aa6db79c7f0752d97445195dd9b4" ]
[ "pySOT/experimental_design.py" ]
[ "\"\"\"\n.. module:: experimental_design\n :synopsis: Methods for generating an experimental design.\n\n.. moduleauthor:: David Eriksson <[email protected]>,\n Yi Shen <[email protected]>\n\n:Module: experimental_design\n:Author: David Eriksson <[email protected]>\n Yi Shen <[email protected]>\n\"\"\"\n\nimport numpy as np\nimport pyDOE2 as pydoe\nimport abc\nimport six\nimport itertools\nfrom pySOT.utils import from_unit_box, round_vars\nfrom numpy.linalg import matrix_rank as rank\nfrom scipy.spatial.distance import cdist\nimport warnings\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass ExperimentalDesign(object):\n \"\"\"Base class for experimental designs.\n\n :ivar dim: Number of dimensions\n :ivar num_pts: Number of points in the experimental design\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self): # pragma: no cover\n self.dim = None\n self.num_pts = None\n\n @abc.abstractmethod\n def generate_points(self, lb=None,\n ub=None, int_var=None): # pragma: no cover\n pass\n\n\ndef _expdes_dist(gen, iterations, lb, ub, int_var):\n \"\"\"Helper method for picking the best experimental design.\n\n We generate iterations designs and picks the one the maximizes the\n minimum distance between points. This isn't a perfect criterion, but\n it will help avoid rank-defficient designs such as y=x.\n\n :param lb: Lower bounds\n :type lb: numpy.array\n :param ub: Upper bounds\n :type ub: numpy.array\n :param int_var: Indices of integer variables.\n :type int_var: numpy.array\n\n :return: Experimental design of size num_pts x dim\n :rtype: numpy.ndarray\n \"\"\"\n\n X = None\n best_score = 0\n for _ in range(iterations):\n cand = gen() # Generate a new design\n if all([x is not None for x in [lb, ub]]): # Map and round\n cand = round_vars(from_unit_box(cand, lb, ub), int_var, lb, ub)\n\n dists = cdist(cand, cand)\n np.fill_diagonal(dists, np.inf) # Since these are zero\n score = dists.min().min()\n\n if score > best_score and rank(cand) == cand.shape[1]:\n best_score = score\n X = cand.copy()\n\n if X is None:\n raise ValueError(\"No valid design found, increase num_pts?\")\n return X\n\n\n \nclass LatinHypercube(ExperimentalDesign):\n \"\"\"Latin Hypercube experimental design.\n\n :param dim: Number of dimensions\n :type dim: int\n :param num_pts: Number of desired sampling points\n :type num_pts: int\n :param criterion: Previously passed to pyDOE, now deprecated\n :type criterion: string\n :param iterations: Number of designs to choose from\n :type iterations: int\n\n :ivar dim: Number of dimensions\n :ivar num_pts: Number of points in the experimental design\n :ivar iterations: Number of points in the experimental design\n \"\"\"\n def __init__(self, dim, num_pts, criterion=None, iterations=1000):\n if criterion is not None:\n warnings.warn(\"Criterion is deprecated and will be removed.\")\n self.dim = dim\n self.num_pts = num_pts\n self.iterations = iterations\n\n def generate_points(self, lb=None, ub=None, int_var=None):\n \"\"\"Generate a new experimental design.\n\n You can specify lb, ub, int_var to have the design mapped to a\n specific domain. These inputs are ignored if one of lb\n or ub is None. The design is generated in [0, 1]^d in this case.\n\n :param lb: Lower bounds\n :type lb: numpy.array\n :param ub: Upper bounds\n :type ub: numpy.array\n :param int_var: Indices of integer variables. If None, [], or\n np.array([]) we assume all variables are continuous.\n :type int_var: numpy.array\n\n :return: Experimental design of size num_pts x dim\n :rtype: numpy.ndarray\n \"\"\"\n if int_var is None or len(int_var) == 0:\n int_var = np.array([])\n\n def wrapper():\n return pydoe.lhs(self.dim, self.num_pts, iterations=1)\n return _expdes_dist(wrapper, self.iterations, lb, ub, int_var)\n\n\nclass SymmetricLatinHypercube(ExperimentalDesign):\n \"\"\"Symmetric Latin hypercube experimental design.\n\n :param dim: Number of dimensions\n :type dim: int\n :param num_pts: Number of desired sampling points\n :type num_pts: int\n :param iterations: Number of designs to generate and pick the best from\n :type iterations: int\n\n :ivar dim: Number of dimensions\n :ivar num_pts: Number of points in the experimental design\n :ivar iterations: Number of points in the experimental design\n \"\"\"\n def __init__(self, dim, num_pts, iterations=1000):\n self.dim = dim\n self.num_pts = num_pts\n self.iterations = iterations\n\n def generate_points(self, lb=None, ub=None, int_var=None):\n \"\"\"Generate a new experimental design.\n\n You can specify lb, ub, int_var to have the design mapped to a\n specific domain. These inputs are ignored if one of lb\n or ub is None. The design is generated in [0, 1]^d in this case.\n\n :param lb: Lower bounds\n :type lb: numpy.array\n :param ub: Upper bounds\n :type ub: numpy.array\n :param int_var: Indices of integer variables. If None, [], or\n np.array([]) we assume all variables are continuous.\n :type int_var: numpy.array\n\n :return: Experimental design of size num_pts x dim\n :rtype: numpy.ndarray\n \"\"\"\n if int_var is None or len(int_var) == 0:\n int_var = np.array([])\n\n def wrapper():\n return self._slhd()\n return _expdes_dist(wrapper, self.iterations, lb, ub, int_var)\n\n def _slhd(self):\n \"\"\"Generate a symmetric Latin hypercube design in the unit hypercube.\n\n :return: Symmetric Latin hypercube design in the unit hypercube\n of size num_pts x dim\n :rtype: numpy.ndarray\n \"\"\"\n # Generate a one-dimensional array based on sample number\n points = np.zeros([self.num_pts, self.dim])\n points[:, 0] = np.arange(1, self.num_pts+1)\n\n # Get the last index of the row in the top half of the hypercube\n middleind = self.num_pts // 2\n\n # special manipulation if odd number of rows\n if self.num_pts % 2 == 1:\n points[middleind, :] = middleind + 1\n\n # Generate the top half of the hypercube matrix\n for j in range(1, self.dim):\n for i in range(middleind):\n if np.random.random() < 0.5:\n points[i, j] = self.num_pts - i\n else:\n points[i, j] = i + 1\n np.random.shuffle(points[:middleind, j])\n\n # Generate the bottom half of the hypercube matrix\n for i in range(middleind, self.num_pts):\n points[i, :] = self.num_pts + 1 - points[self.num_pts - 1 - i, :]\n\n return (points - 1) / (self.num_pts - 1) # Map to [0, 1]^d\n\n\nclass TwoFactorial(ExperimentalDesign):\n \"\"\"Two-factorial experimental design.\n\n The two-factorial experimental design consists of the corners\n of the unit hypercube, and hence :math:`2^{dim}` points.\n\n :param dim: Number of dimensions\n :type dim: int\n\n :ivar dim: Number of dimensions\n :ivar num_pts: Number of points in the experimental design\n\n :raises ValueError: If dim >= 15\n \"\"\"\n def __init__(self, dim):\n if dim >= 15:\n raise ValueError(\"Refusing to use >= 2^15 points.\")\n self.dim = dim\n self.num_pts = 2 ** dim\n\n def generate_points(self, lb=None, ub=None, int_var=None):\n \"\"\"Generate a two factorial design in the unit hypercube.\n\n You can specify lb, ub, int_var to have the design mapped to a\n specific domain. These inputs are ignored if one of lb\n or ub is None. The design is generated in [0, 1]^d in this case.\n\n :param lb: Lower bounds\n :type lb: numpy.array\n :param ub: Upper bounds\n :type ub: numpy.array\n :param int_var: Indices of integer variables. If None, [], or\n np.array([]) we assume all variables are continuous.\n :type int_var: numpy.array\n\n :return: Two factorial design in unit hypercube of size num_pts x dim\n :rtype: numpy.array\n \"\"\"\n if int_var is None or len(int_var) == 0:\n int_var = np.array([])\n\n X = np.array(list(itertools.product([0, 1], repeat=self.dim)))\n if all([x is not None for x in [lb, ub]]): # Map and round\n X = round_vars(from_unit_box(X, lb, ub), int_var, lb, ub)\n return X\n" ]
[ [ "numpy.random.shuffle", "scipy.spatial.distance.cdist", "numpy.zeros", "numpy.arange", "numpy.random.random", "numpy.linalg.matrix_rank", "numpy.fill_diagonal", "numpy.array" ] ]
aliborji/ShapeDefence
[ "92da19bb195b5161d997f6ee1cc777b07a714f6f" ]
[ "pix2pix-pytorch/networks.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.optim import lr_scheduler\n\n\ndef get_norm_layer(norm_type='instance'):\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n elif norm_type == 'switchable':\n norm_layer = SwitchNorm2d\n elif norm_type == 'none':\n norm_layer = None\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\n\ndef get_scheduler(optimizer, opt):\n if opt.lr_policy == 'lambda':\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)\n return lr_l\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n elif opt.lr_policy == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)\n elif opt.lr_policy == 'plateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\n elif opt.lr_policy == 'cosine':\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)\n else:\n return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)\n return scheduler\n\n\n# update learning rate (called once every epoch)\ndef update_learning_rate(scheduler, optimizer):\n scheduler.step()\n lr = optimizer.param_groups[0]['lr']\n print('learning rate = %.7f' % lr)\n\n\ndef init_weights(net, init_type='normal', gain=0.02):\n def init_func(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n init.normal_(m.weight.data, 0.0, gain)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=gain)\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, gain)\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network with %s' % init_type)\n net.apply(init_func)\n\n\ndef init_net(net, init_type='normal', init_gain=0.02, gpu_id='cuda:0'):\n net.to(gpu_id)\n init_weights(net, init_type, gain=init_gain)\n return net\n\n\ndef define_G(input_nc, output_nc, ngf, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_id='cuda:0'):\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)\n \n return init_net(net, init_type, init_gain, gpu_id)\n\n\n# Defines the generator that consists of Resnet blocks between a few\n# downsampling/upsampling operations.\nclass ResnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=9, padding_type='reflect'):\n assert(n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n self.input_nc = input_nc\n self.output_nc = output_nc\n self.ngf = ngf\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.inc = Inconv(input_nc, ngf, norm_layer, use_bias)\n self.down1 = Down(ngf, ngf * 2, norm_layer, use_bias)\n self.down2 = Down(ngf * 2, ngf * 4, norm_layer, use_bias)\n\n model = []\n for i in range(n_blocks):\n model += [ResBlock(ngf * 4, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n self.resblocks = nn.Sequential(*model)\n\n self.up1 = Up(ngf * 4, ngf * 2, norm_layer, use_bias)\n self.up2 = Up(ngf * 2, ngf, norm_layer, use_bias)\n\n self.outc = Outconv(ngf, output_nc)\n\n def forward(self, input):\n out = {}\n out['in'] = self.inc(input)\n out['d1'] = self.down1(out['in'])\n out['d2'] = self.down2(out['d1'])\n out['bottle'] = self.resblocks(out['d2'])\n out['u1'] = self.up1(out['bottle'])\n out['u2'] = self.up2(out['u1'])\n\n return self.outc(out['u2'])\n\n\nclass Inconv(nn.Module):\n def __init__(self, in_ch, out_ch, norm_layer, use_bias):\n super(Inconv, self).__init__()\n self.inconv = nn.Sequential(\n nn.ReflectionPad2d(3),\n nn.Conv2d(in_ch, out_ch, kernel_size=7, padding=0,\n bias=use_bias),\n norm_layer(out_ch),\n nn.ReLU(True)\n )\n\n def forward(self, x):\n x = self.inconv(x)\n return x\n\n\nclass Down(nn.Module):\n def __init__(self, in_ch, out_ch, norm_layer, use_bias):\n super(Down, self).__init__()\n self.down = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, kernel_size=3,\n stride=2, padding=1, bias=use_bias),\n norm_layer(out_ch),\n nn.ReLU(True)\n )\n\n def forward(self, x):\n x = self.down(x)\n return x\n\n\n# Define a Resnet block\nclass ResBlock(nn.Module):\n def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n super(ResBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\n\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n norm_layer(dim),\n nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return nn.ReLU(True)(out)\n\n\nclass Up(nn.Module):\n def __init__(self, in_ch, out_ch, norm_layer, use_bias):\n super(Up, self).__init__()\n self.up = nn.Sequential(\n # nn.Upsample(scale_factor=2, mode='nearest'),\n # nn.Conv2d(in_ch, out_ch,\n # kernel_size=3, stride=1,\n # padding=1, bias=use_bias),\n nn.ConvTranspose2d(in_ch, out_ch,\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(out_ch),\n nn.ReLU(True)\n )\n\n def forward(self, x):\n x = self.up(x)\n return x\n\n\nclass Outconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(Outconv, self).__init__()\n self.outconv = nn.Sequential(\n nn.ReflectionPad2d(3),\n nn.Conv2d(in_ch, out_ch, kernel_size=7, padding=0),\n nn.Tanh()\n )\n\n def forward(self, x):\n x = self.outconv(x)\n return x\n\n\ndef define_D(input_nc, ndf, netD,\n n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', init_gain=0.02, gpu_id='cuda:0'):\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n if netD == 'basic':\n net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid)\n elif netD == 'n_layers':\n net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid)\n elif netD == 'pixel':\n net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid)\n else:\n raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)\n\n return init_net(net, init_type, init_gain, gpu_id)\n\n\n# Defines the PatchGAN discriminator with the specified arguments.\nclass NLayerDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False):\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2**n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2**n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]\n\n if use_sigmoid:\n sequence += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n return self.model(input)\n\n\nclass PixelDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False):\n super(PixelDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.net = [\n nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),\n norm_layer(ndf * 2),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]\n\n if use_sigmoid:\n self.net.append(nn.Sigmoid())\n\n self.net = nn.Sequential(*self.net)\n\n def forward(self, input):\n return self.net(input)\n\n\nclass GANLoss(nn.Module):\n def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0):\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n if use_lsgan:\n self.loss = nn.MSELoss()\n else:\n self.loss = nn.BCELoss()\n\n def get_target_tensor(self, input, target_is_real):\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(input)\n\n def __call__(self, input, target_is_real):\n target_tensor = self.get_target_tensor(input, target_is_real)\n return self.loss(input, target_tensor)\n" ]
[ [ "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.nn.Conv2d", "torch.nn.ReflectionPad2d", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.nn.Sigmoid", "torch.nn.Dropout", "torch.nn.ConvTranspose2d", "torch.nn.init.kaiming_normal_", "torch.nn.init.xavier_normal_", "torch.nn.init.normal_", "torch.optim.lr_scheduler.LambdaLR", "torch.tensor", "torch.nn.ReplicationPad2d", "torch.optim.lr_scheduler.StepLR", "torch.nn.MSELoss", "torch.nn.init.constant_", "torch.nn.Tanh", "torch.nn.Sequential", "torch.nn.BCELoss", "torch.nn.ReLU", "torch.nn.init.orthogonal_", "torch.nn.LeakyReLU" ] ]
MaksSieve/CourseProject_2nd_Year
[ "ecbe77aa33d0e87231784cdc460c24ce99278928" ]
[ "engine_tests/PiImageSearch/ball_tracking69.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# import the necessary packages\nfrom collections import deque\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\nimport time\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport RPi.GPIO as GPIO\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-v\", \"--video\",\n help=\"path to the (optional) video file\")\nap.add_argument(\"-b\", \"--buffer\", type=int, default=64,\n help=\"max buffer size\")\nargs = vars(ap.parse_args())\n\n# define the lower and upper boundaries of the \"green\"\n# ball in the HSV color space, then initialize the\n# list of tracked points\ngreenUpper = (195, 100, 153)\ngreenLower = (101, 56, 27) \npts = deque(maxlen=args[\"buffer\"])\n\n# if a video path was not supplied, grab the reference\n# to the webcam\nif not args.get(\"video\", False):\n camera = cv2.VideoCapture(0)\n\n# otherwise, grab a reference to the video file\nelse:\n camera = cv2.VideoCapture(args[\"video\"])\n\n#Creating a Pandas DataFrame To Store Data Point\nData_Features = ['x', 'y', 'time']\nData_Points = pd.DataFrame(data = None, columns = Data_Features , dtype = float)\n\n\n#Reading the time in the begining of the video.\nstart = time.time()\n\n# keep looping\nwhile True:\n # grab the current frame\n (grabbed, frame) = camera.read()\n \n #Reading The Current Time\n current_time = time.time() - start\n\n # if we are viewing a video and we did not grab a frame,\n # then we have reached the end of the video\n if args.get(\"video\") and not grabbed:\n break\n\n # resize the frame, blur it, and convert it to the HSV\n # color space\n frame = imutils.resize(frame, width=600)\n # blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # construct a mask for the color \"green\", then perform\n # a series of dilations and erosions to remove any small\n # blobs left in the mask\n mask = cv2.inRange(hsv, greenLower, greenUpper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n # find contours in the mask and initialize the current\n # (x, y) center of the ball\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n \n\n # only proceed if the radius meets a minimum size\n if (radius < 300) & (radius > 10 ) : \n # draw the circle and centroid on the frame,\n # then update the list of tracked points\n cv2.circle(frame, (int(x), int(y)), int(radius),\n (0, 255, 255), 2)\n cv2.circle(frame, center, 5, (0, 0, 255), -1)\n \n #Save The Data Points\n Data_Points.loc[Data_Points.size/3] = [x, y, current_time]\n\n # update the points queue\n pts.appendleft(center)\n\n # loop over the set of tracked points\n for i in range(1, len(pts)):\n # if either of the tracked points are None, ignore them\n if pts[i - 1] is None or pts[i] is None:\n continue\n dY = pts[i][1] - pts[i-1][1]\n GPIO.setmode(GPIO.BCM)\n GPIO.output(4, GPIO.LOW)\n GPIO.output(17, GPIO.LOW)\n GPIO.output(22, GPIO.LOW)\n GPIO.output(27, GPIO.LOW)\n if np.sign(dY) == 1:\n GPIO.output(4, GPIO.HIGH)\n GPIO.output(27, GPIO.HIGH)\n else:\n GPIO.output(17, GPIO.HIGH)\n GPIO.output(22, GPIO.HIGH)\n GPIO.cleanup()\n # otherwise, compute the thickness of the line and\n # draw the connecting lines\n thickness = int(np.sqrt(args[\"buffer\"] / float(i + 1)) * 2.5)\n cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)\n\n # show the frame to our screen\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the 'q' key is pressed, stop the loop\n if key == ord(\"q\"):\n break\n#'h' is the focal length of the camera\n#'X0' is the correction term of shifting of x-axis\n#'Y0' is the correction term of shifting of y-axis\n#'time0' is the correction term for correction of starting of time\nh = 0.2\nX0 = -3\nY0 = 20\ntime0 = 0\ntheta0 = 0.3\n\n#Applying the correction terms to obtain actual experimental data\nData_Points['x'] = Data_Points['x']- X0\nData_Points['y'] = Data_Points['y'] - Y0\nData_Points['time'] = Data_Points['time'] - time0\n\n#Calulataion of theta value\nData_Points['theta'] = 2 * np.arctan(Data_Points['y']*0.0000762/h)#the factor correspons to pixel length in real life\nData_Points['theta'] = Data_Points['theta'] - theta0\n\n#Creating the 'Theta' vs 'Time' plot\nplt.plot(Data_Points['theta'], Data_Points['time'])\nplt.xlabel('Theta')\nplt.ylabel('Time')\n\n#Export The Data Points As cvs File and plot\nData_Points.to_csv('Data_Set.csv', sep=\",\")\nplt.savefig('Time_vs_Theta_Graph.svg', transparent= True)\n\n# cleanup the camera and close any open windows\ncamera.release()\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.sign", "matplotlib.pyplot.savefig", "numpy.arctan", "pandas.DataFrame", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
NeveIsa/geneal
[ "064b0409912088886bf56fe9a729d74dac92a235" ]
[ "geneal/applications/fitness_functions/continuous.py" ]
[ "import numpy as np\n\n\ndef fitness_functions_continuous(function_number):\n\n if function_number == 1:\n return lambda chromosome: -(np.abs(chromosome[0]) + np.cos(chromosome[0]))\n elif function_number == 2:\n return lambda chromosome: -(np.abs(chromosome[0]) + np.sin(chromosome[0]))\n elif function_number == 3:\n return lambda chromosome: -(chromosome ** 2).sum()\n elif function_number == 4:\n return lambda chromosome: -np.sum(\n np.abs(chromosome) - 10 * np.cos(np.sqrt(np.abs(10 * chromosome)))\n )\n elif function_number == 5:\n return lambda chromosome: -(chromosome[0] ** 2 + chromosome[0]) * np.cos(\n chromosome[0]\n )\n elif function_number == 6:\n return lambda chromosome: -(\n chromosome[0] * np.sin(4 * chromosome[0])\n + 1.1 * chromosome[1] * np.sin(2 * chromosome[1])\n )\n" ]
[ [ "numpy.sin", "numpy.abs", "numpy.cos" ] ]
giorgiovaccarino/CSSR
[ "e62d936445abcd0e34844b93db6505e9a59bec04" ]
[ "model/modeling/resnet.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Dequan Wang and Xingyi Zhou\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport math\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom .DCNv2.dcn_v2 import DCN\nimport torch.utils.model_zoo as model_zoo\n\nfrom model.utils.misc import _sigmoid\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n\n}\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\ndef fill_up_weights(up):\n w = up.weight.data\n f = math.ceil(w.size(2) / 2)\n c = (2 * f - 1 - f % 2) / (2. * f)\n for i in range(w.size(2)):\n for j in range(w.size(3)):\n w[0, 0, i, j] = \\\n (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))\n for c in range(1, w.size(0)):\n w[c, 0, :, :] = w[0, 0, :, :] \n\ndef fill_fc_weights(layers):\n for m in layers.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n # torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n # torch.nn.init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\nclass PoseResNet(nn.Module):\n\n def __init__(self, block, layers, heads, head_conv):\n self.inplanes = 64\n self.heads = heads\n self.deconv_with_bias = False\n\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n # used for deconv layers\n self.deconv_layers = self._make_deconv_layer(\n 3,\n [256, 128, 64],\n [4, 4, 4],\n )\n\n for head in self.heads:\n classes = self.heads[head]\n if head_conv > 0:\n fc = nn.Sequential(\n nn.Conv2d(64, head_conv,\n kernel_size=3, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(head_conv, classes, \n kernel_size=1, stride=1, \n padding=0, bias=True))\n if 'hm' in head:\n fc[-1].bias.data.fill_(-2.19)\n else:\n fill_fc_weights(fc)\n else:\n fc = nn.Conv2d(64, classes, \n kernel_size=1, stride=1, \n padding=0, bias=True)\n if 'hm' in head:\n fc.bias.data.fill_(-2.19)\n else:\n fill_fc_weights(fc)\n self.__setattr__(head, fc)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n fc = DCN(self.inplanes, planes, \n kernel_size=(3,3), stride=1,\n padding=1, dilation=1, deformable_groups=1)\n # fc = nn.Conv2d(self.inplanes, planes,\n # kernel_size=3, stride=1, \n # padding=1, dilation=1, bias=False)\n # fill_fc_weights(fc)\n up = nn.ConvTranspose2d(\n in_channels=planes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias)\n fill_up_weights(up)\n\n layers.append(fc)\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n layers.append(up)\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.deconv_layers(x)\n ret = {}\n for head in self.heads:\n ret[head] = self.__getattr__(head)(x)\n ret['hm'] = _sigmoid(ret['hm'])\n return [ret]\n\n def init_weights(self, num_layers):\n if 1:\n url = model_urls['resnet{}'.format(num_layers)]\n pretrained_state_dict = model_zoo.load_url(url)\n print('=> loading pretrained model {}'.format(url))\n self.load_state_dict(pretrained_state_dict, strict=False)\n print('=> init deconv weights from normal distribution')\n for name, m in self.deconv_layers.named_modules():\n if isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\nresnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])}\n\n\ndef get_pose_net():\n num_layers = 18\n heads = {'hm': 80, 'wh': 2, 'reg': 2}\n head_conv = 64\n block_class, layers = resnet_spec[num_layers]\n\n model = PoseResNet(block_class, layers, heads, head_conv=head_conv)\n model.init_weights(num_layers)\n return model\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.init.constant_", "torch.nn.init.normal_", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.utils.model_zoo.load_url", "torch.nn.ReLU", "torch.nn.ConvTranspose2d" ] ]
Ojas-Singh/oOo
[ "ef3be64693c7698d0d34022a1b93cb8dab5c766c" ]
[ "xemia.py" ]
[ "from threading import stack_size\nfrom ximea import xiapi\nfrom imutils.video import FPS\nimport cv2\nimport numpy as np\nimport time\nimport multiprocessing\nfrom multiprocessing import Pool, Queue\nimport sys,os\nimport pickle\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import leastsq\nfrom numba import jit\nmatplotlib.use(\"Qt5agg\")\n\n@jit(nopython=True)\ndef correlation_coefficient( patch1, patch2):\n product = np.mean((patch1 - patch1.mean()) * (patch2 - patch2.mean()))\n stds = patch1.std() * patch2.std()\n if stds == 0:\n return 0\n else:\n product /= stds\n return product\n@jit(nopython=True)\ndef gauss_erf(p,x,y):#p = [height, mean, sigma]\n\treturn y - p[0] * np.exp(-(x-p[1])**2 /(2.0 * p[2]**2))\n@jit(nopython=True)\ndef gauss_eval(x,p):\n\treturn p[0] * np.exp(-(x-p[1])**2 /(2.0 * p[2]**2))\n\n\ndef gaussianFit(X,Y):\n\tsize = len(X)\n\tmaxy = max(Y)\n\thalfmaxy = maxy / 2.0\n\tmean = sum(X*Y)/sum(Y)\n\n\thalfmaxima = X[int(len(X)/2)]\n\tfor k in range(size):\n\t\tif abs(Y[k] - halfmaxy) < halfmaxy/10:\n\t\t\thalfmaxima = X[k]\n\t\t\tbreak\n\tsigma = mean - halfmaxima\n\tpar = [maxy, mean, sigma] # Amplitude, mean, sigma\t\t\t\t\n\ttry:\n\t\tplsq = leastsq(gauss_erf, par,args=(X,Y))\n\texcept:\n\t\treturn None\n\tif plsq[1] > 4:\n\t\tprint('fit failed')\n\t\treturn None\n\n\tpar = plsq[0]\n\tXmore = np.linspace(X[0],X[-1],100)\n\tY = gauss_eval(Xmore, par)\n\n\treturn par[1],Xmore,Y\n\ndef worker(input_q, output_q,stack):\n RESIZE = 128\n while True:\n frameinfo = input_q.get() \n\n\n f = np.fft.fft2(frameinfo[1])\n fshift = np.fft.fftshift(f)\n magnitude_spectrum = 20*np.log(np.abs(fshift))\n magnitude_spectrum = np.asarray(magnitude_spectrum, dtype=np.uint8)\n centroid = None\n R = 4 * RESIZE / 10\n corr = []\n\n for img in stack:\n # corr.append(correlation_coefficient(img, comp_roi.getArrayRegion(magnitude_spectrum)))\n corr.append(correlation_coefficient(img, magnitude_spectrum))\n\n X = np.array(range(len(stack)))\n corr = np.array(corr)\n corr -= min(corr)\n #self.extracted_view.setData(X, corr)\n try:\n centroid, X, corr = gaussianFit(X, corr)\n #self.fitted_view.setData(X, corr)\n output_q.put([frameinfo[0],centroid])\n except Exception as error:\n print(error)\n \n \n\ndef graphdisplayworker(graph_q):\n fig = plt.figure()\n data = [[],[]]\n ax = fig.add_subplot(111)\n fig.show()\n timestart = time.time()\n while True:\n \n if quit:\n break\n for j in range(graph_q.qsize()):\n timestamp,centroid = graph_q.get()\n data[0].append(timestamp-timestart)\n data[1].append(centroid)\n timenowplot = time.time()\n ax.plot(data[0], data[1], color='b')\n plt.pause(0.02)\n ax.set_xlim(left=max(0, timenowplot-timestart-3), right=timenowplot-timestart+1)\n # plt.pause(0.05)\n plt.show(block=False)\n time.sleep(.005)\n cv2.waitKey(1)\n \ndef record(display_q):\n results = []\n quit_state = False\n while not quit_state:\n data = display_q.get()\n timestamp,centroid = data[1]\n results.append((timestamp,centroid))\n graph_q.put((timestamp,centroid))\n quit_state = data[0]\n with open('results.pkl', 'wb') as f:\n pickle.dump(results, f)\n print(\"written to file results.pkl !\")\n\n\nif __name__ == '__main__':\n cam = xiapi.Camera()\n print('Opening first camera...')\n cam.open_device()\n cam.set_exposure(1000)\n cam.set_param('width',128)\n cam.set_param('height',128)\n cam.set_param('downsampling_type', 'XI_SKIPPING')\n cam.set_acq_timing_mode('XI_ACQ_TIMING_MODE_FREE_RUN')\n qu_limit = 10\n workers = 12\n threadn = cv2.getNumberOfCPUs() \n print(\"Threads : \", threadn)\n print(\"Workers Spawned : \", workers)\n input_q = Queue(qu_limit) # fps is better if queue is higher but then more lags\n frame_count = 0\n stacksize = 200\n stack=[]\n output_q = Queue()\n display_q = Queue()\n graph_q = Queue()\n quit = False\n all_processes = []\n \n D = multiprocessing.Process(target=graphdisplayworker, args=[graph_q],daemon = False)\n R = multiprocessing.Process(target=record, args=[display_q],daemon = False)\n \n\n \n \n img = xiapi.Image()\n print('Starting data acquisition...')\n cam.start_acquisition()\n fps = FPS().start()\n cam.get_image(img)\n frame = 20*img.get_image_data_numpy()\n roi=cv2.selectROI(frame)\n cv2.destroyAllWindows()\n for i in range(stacksize):\n cam.get_image(img)\n frame = 20*img.get_image_data_numpy()\n stack.append(frame[int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2])])\n cv2.waitKey(1)\n for i in range(workers):\n p = multiprocessing.Process(target=worker, args=[input_q, output_q,stack],daemon = True)\n p.start()\n all_processes.append(p)\n cv2.waitKey(2)\n R.start()\n D.start()\n \n while quit == False and frame_count <500:\n cam.get_image(img)\n frame = 20*img.get_image_data_numpy()\n input_q.put([time.time(),frame[int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2])]])\n \n \n if output_q.empty():\n pass # fill up queue\n else:\n frame_count += 1\n dummylist=[]\n for i in range(output_q.qsize()):\n dummylist.append((quit,output_q.get()))\n dummylist.sort()\n for i in dummylist:\n display_q.put(i)\n fps.update() \n \n \n fps.stop() \n quit = True\n \n print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))\n print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))\n display_q.put((quit,output_q.get()))\n time.sleep(4)\n D.terminate()\n R.terminate()\n\n for process in all_processes:\n process.terminate()\n cam.stop_acquisition()\n cam.close_device() \n os._exit(1)\n # sys.exit()\n \n \n \n \n\n \n \n" ]
[ [ "numpy.fft.fftshift", "matplotlib.pyplot.pause", "numpy.array", "numpy.fft.fft2", "matplotlib.pyplot.figure", "numpy.abs", "scipy.optimize.leastsq", "numpy.exp", "numpy.asarray", "matplotlib.pyplot.show", "matplotlib.use", "numpy.linspace" ] ]
SiggiGue/sigfeat
[ "86bb94200dcd4b33c21de1abc01814bf85f97b38" ]
[ "examples/example2.py" ]
[ "from sigfeat import Extractor\nfrom sigfeat import feature as fts\n\n\nextractor = Extractor(\n fts.SpectralFlux(),\n fts.SpectralCentroid(),\n fts.SpectralFlatness(),\n fts.SpectralRolloff(),\n fts.SpectralCrestFactor(),\n fts.CrestFactor(),\n fts.ZeroCrossingRate(),\n fts.RootMeanSquare(),\n fts.Peak(),\n)\n\n\nif __name__ == '__main__':\n from pylab import plt\n import pandas as pd\n from pandas.tools.plotting import scatter_matrix\n\n from sigfeat.source.soundfile import SoundFileSource\n from sigfeat.preprocess import MeanMix\n from sigfeat.sink import DefaultDictSink\n\n src = MeanMix(SoundFileSource(\n 'Test.wav',\n blocksize=4096,\n overlap=2048))\n sink = DefaultDictSink()\n extractor.extract(src, sink)\n\n plt.figure(src.source.name)\n for l, r in sink['results'].items():\n plt.plot(r, 'o-', label=str(l))\n plt.legend()\n\n df = pd.DataFrame(sink['results'])\n scatter_matrix(df)\n plt.show()\n" ]
[ [ "pandas.DataFrame", "pandas.tools.plotting.scatter_matrix" ] ]
bsburnham/striplog
[ "0c68f63d645c5bb7a5cc73b9bdaa197c4fb3cc33" ]
[ "striplog/striplog.py" ]
[ "\"\"\"\nA striplog is a sequence of intervals.\n\n:copyright: 2019 Agile Geoscience\n:license: Apache 2.0\n\"\"\"\nimport re\nfrom io import StringIO\nimport csv\nimport operator\nimport warnings\nfrom collections import defaultdict\nfrom collections import OrderedDict\nfrom functools import reduce\nfrom copy import deepcopy\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport requests\nimport json\n\nfrom .interval import Interval, IntervalError\nfrom .component import Component\nfrom .legend import Legend\nfrom .canstrat import parse_canstrat\nfrom .markov import Markov_chain\nfrom . import utils\nfrom . import templates\n\n\nclass StriplogError(Exception):\n \"\"\"\n Generic error class.\n \"\"\"\n pass\n\n\nclass Striplog(object):\n \"\"\"\n A Striplog is a sequence of intervals.\n\n We will build them from LAS files or CSVs.\n\n Args:\n list_of_Intervals (list): A list of Interval objects.\n source (str): A source for the data. Default None.\n order (str): 'auto', 'depth', 'elevation', or 'none'. Please refer to\n the documentation for details. Best idea is to let the default\n work. Default: 'auto'.\n \"\"\"\n def __init__(self, list_of_Intervals, source=None, order='auto'):\n\n list_of_Intervals = deepcopy(list_of_Intervals)\n\n if not list_of_Intervals:\n m = \"Cannot create an empty Striplog.\"\n raise StriplogError(m)\n\n if order.lower()[0] == 'a': # Auto\n # If bases == tops, then this is a bunch of 'points'.\n if all([iv.base.z == iv.top.z for iv in list_of_Intervals]):\n order = 'none'\n self.order = 'none'\n # We will tolerate zero-thickness intervals mixed in.\n elif all([iv.base.z >= iv.top.z for iv in list_of_Intervals]):\n order = 'depth'\n self.order = 'depth'\n elif all([iv.base.z <= iv.top.z for iv in list_of_Intervals]):\n order = 'elevation'\n self.order = 'elevation'\n else:\n m = \"Could not determine order from tops and bases.\"\n raise StriplogError(m)\n\n if order.lower()[0] == 'n':\n self.order = 'none'\n # Sanity check\n fail = any([iv.base.z != iv.top.z for iv in list_of_Intervals])\n if fail:\n m = \"'None' order specified but tops != bases.\"\n raise StriplogError(m)\n # Order force\n list_of_Intervals.sort(key=operator.attrgetter('top'))\n\n elif order.lower()[0] == 'd':\n self.order = 'depth'\n # Sanity check\n fail = any([iv.base.z < iv.top.z for iv in list_of_Intervals])\n if fail:\n m = \"Depth order specified but base above top.\"\n raise StriplogError(m)\n # Order force\n list_of_Intervals.sort(key=operator.attrgetter('top'))\n\n else:\n self.order = 'elevation'\n fail = any([iv.base.z > iv.top.z for iv in list_of_Intervals])\n if fail:\n m = \"Elevation order specified but base above top.\"\n raise StriplogError(m)\n # Order force\n r = True\n list_of_Intervals.sort(key=operator.attrgetter('top'), reverse=r)\n\n self.source = source\n\n self.__list = list_of_Intervals\n self.__index = 0 # Set up iterable.\n\n def __repr__(self):\n length = len(self.__list)\n details = \"start={}, stop={}\".format(self.start.z, self.stop.z)\n return \"Striplog({0} Intervals, {1})\".format(length, details)\n\n def __str__(self):\n s = [str(i) for i in self.__list]\n return '\\n'.join(s)\n\n def __getitem__(self, key):\n if type(key) is slice:\n i = key.indices(len(self.__list))\n result = [self.__list[n] for n in range(*i)]\n if result:\n return Striplog(result)\n else:\n return None\n elif type(key) is list:\n result = []\n for j in key:\n result.append(self.__list[j])\n if result:\n return Striplog(result)\n else:\n return None\n else:\n return self.__list[key]\n\n def __delitem__(self, key):\n if (type(key) is list) or (type(key) is tuple):\n # Have to compute what the indices *will* be as\n # the initial ones are deleted.\n indices = [x-i for i, x in enumerate(key)]\n for k in indices:\n del self.__list[k]\n else:\n del self.__list[key]\n return\n\n def __len__(self):\n return len(self.__list)\n\n def __setitem__(self, key, value):\n if not key:\n return\n try:\n for i, j in enumerate(key):\n self.__list[j] = value[i]\n except TypeError:\n self.__list[key] = value\n except IndexError:\n raise StriplogError(\"There must be one Interval for each index.\")\n\n def __iter__(self):\n return iter(self.__list)\n\n def __next__(self):\n \"\"\"\n Supports iterable.\n \"\"\"\n try:\n result = self.__list[self.__index]\n except IndexError:\n self.__index = 0\n raise StopIteration\n self.__index += 1\n return result\n\n def next(self):\n \"\"\"\n For Python 2 compatibility.\n \"\"\"\n return self.__next__()\n\n def __contains__(self, item):\n for r in self.__list:\n if item in r.components:\n return True\n return False\n\n def __reversed__(self):\n return Striplog(self.__list[::-1])\n\n def __add__(self, other):\n if isinstance(other, self.__class__):\n result = self.__list + other.__list\n return Striplog(result)\n elif isinstance(other, Interval):\n result = self.__list + [other]\n return Striplog(result)\n else:\n raise StriplogError(\"You can only add striplogs or intervals.\")\n\n def insert(self, index, item):\n if isinstance(item, self.__class__):\n for i, iv in enumerate(item):\n self.__list.insert(index+i, iv)\n elif isinstance(item, Interval):\n self.__list.insert(index, item)\n return\n else:\n raise StriplogError(\"You can only insert striplogs or intervals.\")\n\n def append(self, item):\n \"\"\"\n Implements list-like `append()` method.\n \"\"\"\n if isinstance(item, Interval):\n self.__list.append(item)\n return\n else:\n m = \"You can only append an Interval to a Striplog.\"\n raise StriplogError(m)\n\n def extend(self, item):\n \"\"\"\n Implements list-like `extend()` method.\n \"\"\"\n if isinstance(item, self.__class__):\n self.__list += item\n return\n else:\n m = \"You can only extend a Striplog with another Striplog.\"\n raise StriplogError(m)\n\n def pop(self, index):\n \"\"\"\n Implements list-like `pop()` method.\n \"\"\"\n self.__list.pop(index)\n\n @property\n def start(self):\n \"\"\"\n Property. The closest Position to the datum.\n\n Returns:\n Position.\n \"\"\"\n if self.order == 'depth':\n # Too naive if intervals can overlap:\n # return self[0].top\n return min(i.top for i in self)\n else:\n return min(i.base for i in self)\n\n @property\n def stop(self):\n \"\"\"\n Property. The furthest Position from the datum.\n\n Returns:\n Position.\n \"\"\"\n if self.order == 'depth':\n return max(i.base for i in self)\n else:\n return max(i.top for i in self)\n\n def __sort(self):\n \"\"\"\n Private method. Sorts into 'natural' order: top-down for depth-ordered\n striplogs; bottom-up for elevation-ordered.\n\n Sorts in place.\n\n Returns:\n None.\n \"\"\"\n self.__list.sort(key=operator.attrgetter('top'))\n return\n\n def __strict(self):\n \"\"\"\n Private method. Checks if striplog is monotonically increasing in\n depth.\n\n Returns:\n Bool.\n \"\"\"\n def conc(a, b):\n return a + b\n\n # Check boundaries, b\n b = np.array(reduce(conc, [[i.top.z, i.base.z] for i in self]))\n\n return all(np.diff(b) >= 0)\n\n @property\n def cum(self):\n \"\"\"\n Property. Gives the cumulative thickness of all filled intervals.\n\n It would be nice to use sum() for this (by defining __radd__),\n but I quite like the ability to add striplogs and get a striplog\n and I don't think we can have both, it's too confusing.\n\n Not calling it sum, because that's a keyword.\n\n Returns:\n Float. The cumulative thickness.\n \"\"\"\n total = 0.0\n for i in self:\n total += i.thickness\n return total\n\n @property\n def mean(self):\n \"\"\"\n Property. Returns the mean thickness of all filled intervals.\n\n Returns:\n Float. The mean average of interval thickness.\n \"\"\"\n return self.cum / len(self)\n\n @property\n def components(self):\n \"\"\"\n Property. Returns the list of compenents in the striplog.\n\n Returns:\n List. A list of the unique components.\n \"\"\"\n return [i[0] for i in self.unique if i[0]]\n\n @property\n def unique(self):\n \"\"\"\n Property. Summarize a Striplog with some statistics.\n\n Returns:\n List. A list of (Component, total thickness thickness) tuples.\n \"\"\"\n all_rx = set([iv.primary for iv in self])\n table = {r: 0 for r in all_rx}\n for iv in self:\n table[iv.primary] += iv.thickness\n\n return sorted(table.items(), key=operator.itemgetter(1), reverse=True)\n\n @property\n def top(self):\n \"\"\"\n Property.\n \"\"\"\n # For backwards compatibility.\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"Striplog.top is deprecated; please use Striplog.unique\"\n warnings.warn(w, DeprecationWarning, stacklevel=2)\n return self.unique\n\n @classmethod\n def __intervals_from_tops(self,\n tops,\n values,\n basis,\n components,\n field=None,\n ignore_nan=True):\n \"\"\"\n Private method. Take a sequence of tops in an arbitrary dimension,\n and provide a list of intervals from which a striplog can be made.\n\n This is only intended to be used by ``from_image()``.\n\n Args:\n tops (iterable). A list of floats.\n values (iterable). A list of values to look up.\n basis (iterable). A list of components.\n components (iterable). A list of Components.\n\n Returns:\n List. A list of Intervals.\n \"\"\"\n # Scale tops to actual depths.\n length = float(basis.size)\n start, stop = basis[0], basis[-1]\n tops = [start + (p/(length-1)) * (stop-start) for p in tops]\n bases = tops[1:] + [stop]\n\n list_of_Intervals = []\n for i, t in enumerate(tops):\n\n v, c, d = values[i], [], {}\n\n if ignore_nan and np.isnan(v):\n continue\n\n if (field is not None):\n d = {field: v}\n\n if components is not None:\n try:\n c = [deepcopy(components[int(v)])]\n except IndexError:\n c = []\n\n if c and (c[0] is None):\n c = []\n\n interval = Interval(t, bases[i], data=d, components=c)\n list_of_Intervals.append(interval)\n\n return list_of_Intervals\n\n @classmethod\n def _clean_longitudinal_data(cls, data, null=None):\n \"\"\"\n Private function. Make sure we have what we need to make a striplog.\n \"\"\"\n\n # Rename 'depth' or 'MD'\n if ('top' not in data.keys()):\n data['top'] = data.pop('depth', data.pop('MD', None))\n\n # Sort everything\n idx = list(data.keys()).index('top')\n values = sorted(zip(*data.values()), key=lambda x: x[idx])\n data = {k: list(v) for k, v in zip(data.keys(), zip(*values))}\n\n if data['top'] is None:\n raise StriplogError('Could not get tops.')\n\n # Get rid of null-like values if specified.\n if null is not None:\n for k, v in data.items():\n data[k] = [i if i != null else None for i in v]\n\n return data\n\n @classmethod\n def from_petrel(cls, filename,\n stop=None,\n points=False,\n null=None,\n function=None,\n include=None,\n exclude=None,\n remap=None,\n ignore=None):\n\n \"\"\"\n Makes a striplog from a Petrel text file.\n\n Returns:\n striplog.\n \"\"\"\n result = utils.read_petrel(filename,\n function=function,\n remap=remap,\n )\n\n data = cls._clean_longitudinal_data(result,\n null=null\n )\n\n list_of_Intervals = cls._build_list_of_Intervals(data,\n stop=stop,\n points=points,\n include=include,\n exclude=exclude,\n ignore=ignore\n )\n if list_of_Intervals:\n return cls(list_of_Intervals)\n return None\n\n @classmethod\n def _build_list_of_Intervals(cls,\n data_dict,\n stop=None,\n points=False,\n include=None,\n exclude=None,\n ignore=None,\n lexicon=None):\n \"\"\"\n Private function. Takes a data dictionary and constructs a list\n of Intervals from it.\n\n Args:\n data_dict (dict)\n stop (float): Where to end the last interval.\n points (bool)\n include (dict)\n exclude (dict)\n ignore (list)\n lexicon (Lexicon)\n\n Returns:\n list.\n \"\"\"\n\n include = include or {}\n exclude = exclude or {}\n ignore = ignore or []\n\n # Reassemble as list of dicts\n all_data = []\n for data in zip(*data_dict.values()):\n all_data.append({k: v for k, v in zip(data_dict.keys(), data)})\n\n # Sort\n all_data = sorted(all_data, key=lambda x: x['top'])\n\n # Filter down:\n wanted_data = []\n for dictionary in all_data:\n keep = True\n delete = []\n for k, v in dictionary.items():\n incl = include.get(k, utils.null_default(True))\n excl = exclude.get(k, utils.null_default(False))\n if k in ignore:\n delete.append(k)\n if not incl(v):\n keep = False\n if excl(v):\n keep = False\n if delete:\n for key in delete:\n _ = dictionary.pop(key, None)\n if keep:\n wanted_data.append(dictionary)\n\n # Fill in\n if not points:\n for i, iv in enumerate(wanted_data):\n if iv.get('base', None) is None:\n try: # To set from next interval\n iv['base'] = wanted_data[i+1]['top']\n except (IndexError, KeyError):\n # It's the last interval\n if stop is not None:\n thick = stop - iv['top']\n else:\n thick = 1\n iv['base'] = iv['top'] + thick\n\n # Build the list of intervals to pass to __init__()\n list_of_Intervals = []\n for iv in wanted_data:\n top = iv.pop('top')\n base = iv.pop('base', None)\n descr = iv.pop('description', '')\n if iv:\n c, d = {}, {}\n for k, v in iv.items():\n match1 = (k[:9].lower() == 'component')\n match2 = (k[:5].lower() == 'comp ')\n if match1 or match2:\n k = re.sub(r'comp(?:onent)? ', '', k, flags=re.I)\n c[k] = v # It's a component\n else:\n if v is not None:\n d[k] = v # It's data\n comp = [Component(c)] if c else None\n this = Interval(**{'top': top,\n 'base': base,\n 'description': descr,\n 'data': d,\n 'components': comp})\n else:\n this = Interval(**{'top': top,\n 'base': base,\n 'description': descr,\n 'lexicon': lexicon})\n list_of_Intervals.append(this)\n\n return list_of_Intervals\n\n @classmethod\n def from_csv(cls, filename=None,\n text=None,\n dlm=',',\n lexicon=None,\n points=False,\n include=None,\n exclude=None,\n remap=None,\n function=None,\n null=None,\n ignore=None,\n source=None,\n stop=None,\n fieldnames=None):\n \"\"\"\n Load from a CSV file or text.\n\n Args\n filename (str): The filename, or use `text`.\n text (str): CSV data as a string, or use `filename`.\n dlm (str): The delimiter, default ','.\n lexicon (Lexicon): The lexicon to use, optional. Only needed if \\\n parsing descriptions (e.g. cuttings).\n points (bool): Whether to make a point dataset (as opposed to \\\n ordinary intervals with top and base. Default is False.\n include: Default is None.\n exclude: Default is None.\n remap: Default is None.\n function: Default is None.\n null: Default is None.\n ignore: Default is None.\n source: Default is None.\n stop: Default is None.\n fieldnames: Default is None.\n\n Returns\n Striplog. A new instance.\n \"\"\"\n if (filename is None) and (text is None):\n raise StriplogError(\"You must provide a filename or CSV text.\")\n\n if (filename is not None):\n if source is None:\n source = filename\n with open(filename, 'r') as f:\n text = f.read()\n\n source = source or 'CSV'\n\n # Deal with multiple spaces in space delimited file.\n if dlm == ' ':\n text = re.sub(r'[ \\t]+', ' ', text)\n\n if fieldnames is not None:\n text = dlm.join(fieldnames) + '\\n' + text\n\n try:\n f = StringIO(text) # Python 3\n except TypeError:\n f = StringIO(unicode(text)) # Python 2\n\n reader = csv.DictReader(f, delimiter=dlm)\n\n # Reorganize the data to make fixing it easier.\n reorg = {k.strip().lower(): []\n for k in reader.fieldnames\n if k is not None}\n t = f.tell()\n for key in reorg:\n f.seek(t)\n for r in reader:\n s = {k.strip().lower(): v.strip() for k, v in r.items()}\n try:\n reorg[key].append(float(s[key]))\n except ValueError:\n reorg[key].append(s[key])\n\n f.close()\n\n remap = remap or {}\n for k, v in remap.items():\n reorg[v] = reorg.pop(k)\n\n data = cls._clean_longitudinal_data(reorg, null=null)\n\n list_of_Intervals = cls._build_list_of_Intervals(data,\n points=points,\n lexicon=lexicon,\n include=include,\n exclude=exclude,\n ignore=ignore,\n stop=stop)\n\n return cls(list_of_Intervals, source=source)\n\n @classmethod\n def from_dict(cls, dictionary):\n \"\"\"\n Take a dictionary of the form name:depth and return a striplog of\n complete intervals.\n \"\"\"\n d_sorted = sorted(dictionary.items(), key=lambda i: i[1])\n names = [i[0] for i in d_sorted]\n tops_ = [i[1] for i in d_sorted]\n bases_ = tops_[1:] + [tops_[-1]+1]\n comps_ = [Component({'formation': name}) for name in names]\n\n list_of_Intervals = []\n for top, base, comp in zip(tops_, bases_, comps_):\n iv = Interval(top=top, base=base, components=[comp])\n list_of_Intervals.append(iv)\n\n return cls(list_of_Intervals)\n\n @classmethod\n def from_descriptions(cls, text,\n lexicon=None,\n source='CSV',\n dlm=',',\n points=False,\n abbreviations=False,\n complete=False,\n order='depth',\n columns=None,\n ):\n \"\"\"\n Convert a CSV string into a striplog. Expects 2 or 3 fields:\n top, description\n OR\n top, base, description\n\n Args:\n text (str): The input text, given by ``well.other``.\n lexicon (Lexicon): A lexicon, required to extract components.\n source (str): A source. Default: 'CSV'.\n dlm (str): The delimiter, given by ``well.dlm``. Default: ','\n points (bool): Whether to treat as points or as intervals.\n abbreviations (bool): Whether to expand abbreviations in the\n description. Default: False.\n complete (bool): Whether to make 'blank' intervals, or just leave\n gaps. Default: False.\n order (str): The order, 'depth' or 'elevation'. Default: 'depth'.\n columns (tuple or list): The names of the columns.\n\n Returns:\n Striplog: A ``striplog`` object.\n\n Example:\n # TOP BOT LITH\n 312.34, 459.61, Sandstone\n 459.71, 589.61, Limestone\n 589.71, 827.50, Green shale\n 827.60, 1010.84, Fine sandstone\n \"\"\"\n\n text = re.sub(r'(\\n+|\\r\\n|\\r)', '\\n', text.strip())\n\n as_strings = []\n try:\n f = StringIO(text) # Python 3\n except TypeError:\n f = StringIO(unicode(text)) # Python 2\n reader = csv.reader(f, delimiter=dlm, skipinitialspace=True)\n for row in reader:\n as_strings.append(row)\n f.close()\n\n if not columns:\n if order[0].lower() == 'e':\n columns = ('base', 'top', 'description')\n else:\n columns = ('top', 'base', 'description')\n\n result = {k: [] for k in columns}\n\n # Set the indices for the fields.\n tix = columns.index('top')\n bix = columns.index('base')\n dix = columns.index('description')\n\n for i, row in enumerate(as_strings):\n\n # THIS ONLY WORKS FOR MISSING TOPS!\n if len(row) == 2:\n row = [row[0], None, row[1]]\n\n # TOP\n this_top = float(row[tix])\n\n # THIS ONLY WORKS FOR MISSING TOPS!\n # BASE\n # Base is null: use next top if this isn't the end.\n if row[1] is None:\n if i < len(as_strings)-1:\n this_base = float(as_strings[i+1][0]) # Next top.\n else:\n this_base = this_top + 1 # Default to 1 m thick at end.\n else:\n this_base = float(row[bix])\n\n # DESCRIPTION\n this_descr = row[dix].strip()\n\n # Deal with making intervals or points...\n if not points:\n # Insert intervals where needed.\n if complete and (i > 0) and (this_top != result['base'][-1]):\n result['top'].append(result['base'][-1])\n result['base'].append(this_top)\n result['description'].append('')\n else:\n this_base = None # Gets set to Top in striplog creation\n\n # ASSIGN\n result['top'].append(this_top)\n result['base'].append(this_base)\n result['description'].append(this_descr)\n\n # Build the list.\n list_of_Intervals = []\n for i, t in enumerate(result['top']):\n b = result['base'][i]\n d = result['description'][i]\n interval = Interval(t, b, description=d,\n lexicon=lexicon,\n abbreviations=abbreviations)\n list_of_Intervals.append(interval)\n\n return cls(list_of_Intervals, source=source)\n\n @classmethod\n def from_image(cls, filename, start, stop, legend,\n source=\"Image\",\n col_offset=0.1,\n row_offset=2,\n tolerance=0,\n background=None):\n \"\"\"\n Read an image and generate Striplog.\n\n Args:\n filename (str): An image file, preferably high-res PNG.\n start (float or int): The depth at the top of the image.\n stop (float or int): The depth at the bottom of the image.\n legend (Legend): A legend to look up the components in.\n source (str): A source for the data. Default: 'Image'.\n col_offset (Number): The proportion of the way across the image\n from which to extract the pixel column. Default: 0.1 (ie 10%).\n row_offset (int): The number of pixels to skip at the top of\n each change in colour. Default: 2.\n tolerance (float): The Euclidean distance between hex colours,\n which has a maximum (black to white) of 441.67 in base 10.\n Default: 0.\n background (array): A background colour (as hex) to ignore.\n\n Returns:\n Striplog: The ``striplog`` object.\n \"\"\"\n if background is None:\n bg = \"#xxxxxx\"\n else:\n bg = background\n rgb = utils.loglike_from_image(filename, col_offset)\n loglike = np.array([utils.rgb_to_hex(t) for t in rgb if utils.rgb_to_hex(t) != bg])\n\n # Get the pixels and colour values at 'tops' (i.e. changes).\n tops, hexes = utils.tops_from_loglike(loglike, offset=row_offset)\n\n # If there are consecutive tops, we assume it's because there is a\n # single-pixel row that we don't want. So take the second one only.\n # We used to do this reduction in ``utils.tops_from_loglike()`` but\n # it was preventing us from making intervals only one sample thick.\n nonconsecutive = np.append(np.diff(tops), 2)\n tops = tops[nonconsecutive > 1]\n hexes = hexes[nonconsecutive > 1]\n\n # Get the set of unique colours.\n hexes_reduced = list(set(hexes))\n\n # Get the components corresponding to the colours.\n components = [legend.get_component(h, tolerance=tolerance)\n for h in hexes_reduced]\n\n # Turn them into integers.\n values = [hexes_reduced.index(i) for i in hexes]\n\n basis = np.linspace(start, stop, loglike.size)\n\n list_of_Intervals = cls.__intervals_from_tops(tops,\n values,\n basis,\n components)\n\n list_of_Intervals = [iv for iv in list_of_Intervals\n if isinstance(iv.primary, Component)]\n\n return cls(list_of_Intervals, source=\"Image\")\n\n @classmethod\n def from_img(cls, *args, **kwargs):\n \"\"\"\n For backwards compatibility.\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"from_img() is deprecated; please use from_image()\"\n warnings.warn(w)\n return cls.from_image(*args, **kwargs)\n\n @classmethod\n def _from_array(cls, a,\n lexicon=None,\n source=\"\",\n points=False,\n abbreviations=False):\n \"\"\"\n DEPRECATING.\n\n Turn an array-like into a Striplog. It should have the following\n format (where ``base`` is optional):\n\n [(top, base, description),\n (top, base, description),\n ...\n ]\n\n Args:\n a (array-like): A list of lists or of tuples, or an array.\n lexicon (Lexicon): A language dictionary to extract structured\n objects from the descriptions.\n source (str): The source of the data. Default: ''.\n points (bool): Whether to treat as point data. Default: False.\n\n Returns:\n Striplog: The ``striplog`` object.\n \"\"\"\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"from_array() is deprecated.\"\n warnings.warn(w, DeprecationWarning, stacklevel=2)\n\n csv_text = ''\n for interval in a:\n interval = [str(i) for i in interval]\n if (len(interval) < 2) or (len(interval) > 3):\n raise StriplogError('Elements must have 2 or 3 items')\n descr = interval[-1].strip('\" ')\n interval[-1] = '\"' + descr + '\"'\n csv_text += ', '.join(interval) + '\\n'\n\n return cls.from_descriptions(csv_text,\n lexicon,\n source=source,\n points=points,\n abbreviations=abbreviations)\n\n @classmethod\n def from_log(cls, log,\n cutoff=None,\n components=None,\n legend=None,\n legend_field=None,\n field=None,\n right=False,\n basis=None,\n source='Log'):\n \"\"\"\n Turn a 1D array into a striplog, given a cutoff.\n\n Args:\n log (array-like): A 1D array or a list of integers.\n cutoff (number or array-like): The log value(s) at which to bin\n the log. Optional.\n components (array-like): A list of components. Use this or\n ``legend``.\n legend (``Legend``): A legend object. Use this or ``components``.\n legend_field ('str'): If you're not trying to match against\n components, then you can match the log values to this field in\n the Decors.\n field (str): The field in the Interval's ``data`` to store the log\n values as.\n right (bool): Which side of the cutoff to send things that are\n equal to, i.e. right on, the cutoff.\n basis (array-like): A depth basis for the log, so striplog knows\n where to put the boundaries.\n source (str): The source of the data. Default 'Log'.\n\n Returns:\n Striplog: The ``striplog`` object.\n \"\"\"\n if (components is None) and (legend is None) and (field is None):\n m = 'You must provide a list of components and legend, or a field.'\n raise StriplogError(m)\n\n if (legend is not None) and (legend_field is None):\n try: # To treat it like a legend.\n components = [deepcopy(decor.component) for decor in legend]\n except AttributeError: # It's just a list of components.\n pass\n\n if legend_field is not None:\n field_values = [getattr(d, legend_field, 0) for d in legend]\n components = [Component() for i in range(int(max(field_values)+1))]\n for i, decor in enumerate(legend):\n components[i] = deepcopy(decor.component)\n\n if cutoff is not None:\n\n # First make sure we have enough components.\n try:\n n = len(cutoff)\n except TypeError:\n n = 1\n if len(components) < n+1:\n m = 'For n cutoffs, you need to provide at least'\n m += 'n+1 components.'\n raise StriplogError(m)\n\n # Digitize.\n try: # To use cutoff as a list.\n a = np.digitize(log, cutoff, right)\n except ValueError: # It's just a number.\n a = np.digitize(log, [cutoff], right)\n\n else:\n a = np.copy(log)\n\n tops, values = utils.tops_from_loglike(a)\n\n if basis is None:\n m = 'You must provide a depth or elevation basis.'\n raise StriplogError(m)\n\n list_of_Intervals = cls.__intervals_from_tops(tops,\n values,\n basis,\n components,\n field=field\n )\n\n return cls(list_of_Intervals, source=source)\n\n @classmethod\n def from_las3(cls, string, lexicon=None,\n source=\"LAS\",\n dlm=',',\n abbreviations=False):\n \"\"\"\n Turn LAS3 'lithology' section into a Striplog.\n\n Args:\n string (str): A section from an LAS3 file.\n lexicon (Lexicon): The language for conversion to components.\n source (str): A source for the data.\n dlm (str): The delimiter.\n abbreviations (bool): Whether to expand abbreviations.\n\n Returns:\n Striplog: The ``striplog`` object.\n\n Note:\n Handles multiple 'Data' sections. It would be smarter for it\n to handle one at a time, and to deal with parsing the multiple\n sections in the Well object.\n\n Does not read an actual LAS file. Use the Well object for that.\n \"\"\"\n f = re.DOTALL | re.IGNORECASE\n regex = r'\\~\\w+?_Data.+?\\n(.+?)(?:\\n\\n+|\\n*\\~|\\n*$)'\n pattern = re.compile(regex, flags=f)\n text = pattern.search(string).group(1)\n\n s = re.search(r'\\.(.+?)\\: ?.+?source', string)\n if s:\n source = s.group(1).strip()\n\n return cls.from_descriptions(text, lexicon,\n source=source,\n dlm=dlm,\n abbreviations=abbreviations)\n\n @classmethod\n def from_canstrat(cls, filename, source='canstrat'):\n \"\"\"\n Eat a Canstrat DAT file and make a striplog.\n \"\"\"\n with open(filename) as f:\n dat = f.read()\n\n data = parse_canstrat(dat)\n\n list_of_Intervals = []\n for d in data[7]: # 7 is the 'card type' for lithology info.\n if d.pop('skip'):\n continue\n top = d.pop('top')\n base = d.pop('base')\n comps = [Component({'lithology': d['rtc'],\n 'colour': d['colour_name']\n })]\n iv = Interval(top=top, base=base, components=comps, data=d)\n list_of_Intervals.append(iv)\n\n return cls(list_of_Intervals, source=source)\n\n def copy(self):\n \"\"\"Returns a shallow copy.\"\"\"\n return Striplog([i.copy() for i in self],\n order=self.order,\n source=self.source)\n\n \n\n\n\n # Outputter\n def to_canstrat(self, filename, params):\n \"\"\"\n Write a Canstrat ASCII file.\n\n Args:\n filename (str)\n params (dict): The well details. You can use a ``welly`` header\n object.\n\n Returns:\n\n \"\"\"\n\n return None\n\n # Outputter\n def to_csv(self,\n filename=None,\n as_text=True,\n use_descriptions=False,\n dlm=\",\",\n header=True):\n \"\"\"\n Returns a CSV string built from the summaries of the Intervals.\n\n Args:\n use_descriptions (bool): Whether to use descriptions instead\n of summaries, if available.\n dlm (str): The delimiter.\n header (bool): Whether to form a header row.\n\n Returns:\n str: A string of comma-separated values.\n \"\"\"\n if (filename is None):\n if (not as_text):\n m = \"You must provide a filename or set as_text to True.\"\n raise StriplogError(m)\n else:\n as_text = False\n\n if as_text:\n output = StringIO()\n else:\n output = open(filename, 'w')\n\n fieldnames = ['Top', 'Base', 'Component']\n writer = csv.DictWriter(output,\n delimiter=dlm,\n fieldnames=fieldnames,\n quoting=csv.QUOTE_MINIMAL)\n\n if header:\n writer.writeheader()\n\n for i in self.__list:\n if use_descriptions and i.description:\n text = i.description\n elif i.primary:\n text = i.primary.summary()\n else:\n text = ''\n d = {j: k for j, k in zip(fieldnames, [i.top.z, i.base.z, text])}\n writer.writerow(d)\n\n if as_text:\n return output.getvalue()\n else:\n output.close\n return None\n\n # Outputter\n def to_las3(self, use_descriptions=False, dlm=\",\", source=\"Striplog\"):\n \"\"\"\n Returns an LAS 3.0 section string.\n\n Args:\n use_descriptions (bool): Whether to use descriptions instead\n of summaries, if available.\n dlm (str): The delimiter.\n source (str): The sourse of the data.\n\n Returns:\n str: A string forming Lithology section of an LAS3 file.\n \"\"\"\n data = self.to_csv(use_descriptions=use_descriptions,\n dlm=dlm,\n header=False)\n\n return templates.section.format(name='Lithology',\n short=\"LITH\",\n source=source,\n data=data)\n\n # Outputter\n def to_log(self,\n step=1.0,\n start=None,\n stop=None,\n basis=None,\n field=None,\n field_function=None,\n bins=True,\n dtype='float',\n table=None,\n sort_table=False,\n legend=None,\n legend_field=None,\n match_only=None,\n undefined=0,\n return_meta=False\n ):\n \"\"\"\n Return a fully sampled log from a striplog. Useful for crossplotting\n with log data, for example.\n\n Args:\n step (float): The step size. Default: 1.0.\n start (float): The start depth of the new log. You will want to\n match the logs, so use the start depth from the LAS file.\n Default: The basis if provided, else the start of the striplog.\n stop (float): The stop depth of the new log. Use the stop depth\n of the LAS file. Default: The basis if provided, else the stop\n depth of the striplog.\n field (str): If you want the data to come from one of the\n attributes of the components in the striplog, provide it.\n field_function (function): Provide a function to apply to the field\n you are asking for. It's up to you to make sure the function\n does what you want.\n bins (bool): Whether to return the index of the items from the\n lookup table. If False, then the item itself will be returned. \n dtype (str): The NumPy dtype string for the output log.\n table (list): Provide a look-up table of values if you want. If you\n don't, then it will be constructed from the data.\n sort_table (bool): Whether to sort the table or not. Default: False.\n legend (Legend): If you want the codes to come from a legend,\n provide one. Otherwise the codes come from the log, using\n integers in the order of prevalence. If you use a legend,\n they are assigned in the order of the legend.\n legend_field (str): If you want to get a log representing one of\n the fields in the legend, such as 'width' or 'grainsize'.\n match_only (list): If you only want to match some attributes of\n the Components (e.g. lithology), provide a list of those\n you want to match.\n undefined (number): What to fill in where no value can be\n determined, e.g. ``-999.25`` or ``np.nan``. Default 0.\n return_meta (bool): If ``True``, also return the depth basis\n (np.linspace), and the component table.\n\n Returns:\n ndarray: If ``return_meta`` was ``True``, you get:\n\n * The log data as an array of ints.\n * The depth basis as an array of floats.\n * A list of the components in the order matching the ints.\n\n If ``return_meta`` was ``False`` (the default), you only get\n the log data.\n \"\"\"\n # Make the preparations.\n if basis is not None:\n start, stop = basis[0], basis[-1]\n step = basis[1] - start\n else:\n start = start or self.start.z\n stop = stop or self.stop.z\n pts = np.ceil((stop - start)/step) + 1\n basis = np.linspace(start, stop, int(pts))\n\n if (field is not None) or (legend_field is not None):\n result = np.zeros_like(basis, dtype=dtype)\n else:\n result = np.zeros_like(basis, dtype=np.int)\n\n if np.isnan(undefined):\n try:\n result[:] = np.nan\n except:\n pass # array type is int\n\n # If needed, make a look-up table for the log values.\n if table is None:\n if legend:\n table = [j.component for j in legend]\n elif field:\n s = set([iv.data.get(field) for iv in self])\n table = list(filter(None, s))\n else:\n table = [j[0] for j in self.unique]\n\n # Adjust the table if necessary. Go over all the components in the\n # table list, and remove elements that are not in the match list.\n # Careful! This results in a new table, with components that may not\n # be in the original list of components.\n if match_only is not None:\n if not isinstance(match_only, (list, tuple, set,)):\n raise StriplogError(\"match_only should be type list not str.\")\n table_new = []\n for c in table:\n if c == '':\n continue # No idea why sometimes there's a ''\n c_new = Component({k: v for k, v in c.__dict__.items()\n if k in match_only})\n # Only add unique, and preserve order.\n if c_new not in table_new:\n table_new.append(c_new)\n table = table_new\n else:\n match_only = []\n\n if sort_table:\n table.sort()\n\n start_ix = self.read_at(start, index=True)\n stop_ix = self.read_at(stop, index=True)\n if stop_ix is not None:\n stop_ix += 1\n\n # Assign the values.\n for i in self[start_ix:stop_ix]:\n c = i.primary\n if match_only:\n c = Component({k: getattr(c, k, None)\n for k in match_only})\n\n if legend and legend_field: # Use the legend field.\n try:\n key = legend.getattr(c, legend_field, undefined)\n key = key or undefined\n except ValueError:\n key = undefined\n elif field: # Get data directly from that field in iv.data.\n f = field_function or utils.null\n try:\n v = f(i.data.get(field, undefined)) or undefined\n if bins:\n # Then return the bin we're in...\n key = (table.index(v) + 1) or undefined\n else:\n # ...else return the actual value.\n key = v\n except ValueError:\n key = undefined\n else: # Use the lookup table.\n try:\n key = (table.index(c) + 1) or undefined\n except ValueError:\n key = undefined\n\n top_index = int(np.ceil((max(start, i.top.z)-start)/step))\n base_index = int(np.ceil((min(stop, i.base.z)-start)/step))\n\n try:\n result[top_index:base_index+1] = key\n except: # Have a list or array or something.\n result[top_index:base_index+1] = key[0]\n\n if return_meta:\n return result, basis, table\n else:\n return result\n\n def to_flag(self, **kwargs):\n \"\"\"\n A wrapper for ``to_log()`` that returns a boolean array.\n Useful for masking. Has the same interface as ``to_log()``.\n \"\"\"\n return self.to_log(**kwargs).astype(bool)\n\n def plot_points(self, ax,\n legend=None,\n field=None,\n field_function=None,\n undefined=0,\n **kwargs):\n \"\"\"\n Plotting, but only for points (as opposed to intervals).\n \"\"\"\n\n ys = [iv.top.z for iv in self]\n\n if field is not None:\n f = field_function or utils.null\n xs = [f(iv.data.get(field, undefined)) for iv in self]\n else:\n xs = [1 for iv in self]\n\n ax.set_xlim((min(xs), max(xs)))\n for x, y in zip(xs, ys):\n ax.axhline(y, color='lightgray', zorder=0)\n\n ax.scatter(xs, ys, clip_on=False, **kwargs)\n\n return ax\n\n def plot_tops(self, ax, legend=None, field=None, **kwargs):\n \"\"\"\n Plotting, but only for tops (as opposed to intervals).\n \"\"\"\n if field is None:\n raise StriplogError('You must provide a field to plot.')\n\n ys = [iv.top.z for iv in self]\n\n try:\n try:\n ts = [getattr(iv.primary, field) for iv in self]\n except:\n ts = [iv.data.get(field) for iv in self]\n except:\n raise StriplogError('Could not retrieve field.')\n\n for y, t in zip(ys, ts):\n ax.axhline(y, color='lightblue', lw=3, zorder=0)\n ax.text(0.1, y-max(ys)/200, t, ha='left')\n\n return ax\n\n def plot_field(self, ax, legend=None, field=None, **kwargs):\n \"\"\"\n Plotting, but only for tops (as opposed to intervals).\n \"\"\"\n if field is None:\n raise StriplogError('You must provide a field to plot.')\n\n try:\n try:\n xs = [getattr(iv.primary, field) for iv in self]\n except:\n xs = [iv.data.get(field) for iv in self]\n except:\n raise StriplogError('Could not retrieve field.')\n\n for iv, x in zip(self.__list, xs):\n _, ymin = utils.axis_transform(ax, 0, iv.base.z, ylim=(self.start.z, self.stop.z), inverse=True)\n _, ymax = utils.axis_transform(ax, 0, iv.top.z, ylim=(self.start.z, self.stop.z), inverse=True)\n ax.axvline(x, ymin=ymin, ymax=ymax)\n\n return ax\n\n def max_field(self, field):\n return max(filter(None, [iv.data.get(field) for iv in self]))\n\n def plot_axis(self,\n ax,\n legend,\n ladder=False,\n default_width=1,\n match_only=None,\n colour=None,\n colour_function=None,\n cmap=None,\n default=None,\n width_field=None,\n **kwargs\n ):\n \"\"\"\n Plotting, but only the Rectangles. You have to set up the figure.\n Returns a matplotlib axis object.\n\n Args:\n ax (axis): The matplotlib axis to plot into.\n legend (Legend): The Legend to use for colours, etc.\n ladder (bool): Whether to use widths or not. Default False.\n default_width (int): A width for the plot if not using widths.\n Default 1.\n match_only (list): A list of strings matching the attributes you\n want to compare when plotting.\n colour (str): Which data field to use for colours.\n cmap (cmap): Matplotlib colourmap. Default ``viridis``.\n default (float): The default (null) value.\n width_field (str): The field to use for the width of the patches.\n **kwargs are passed through to matplotlib's ``patches.Rectangle``.\n\n Returns:\n axis: The matplotlib.pyplot axis.\n \"\"\"\n default_c = None\n patches = []\n for iv in self.__list:\n origin = (0, iv.top.z)\n d = legend.get_decor(iv.primary, match_only=match_only)\n thick = iv.base.z - iv.top.z\n\n if ladder:\n if width_field is not None:\n w = iv.data.get(width_field, 1)\n w = default_width * w/self.max_field(width_field)\n default_c = 'gray'\n elif legend is not None:\n w = d.width or default_width\n try:\n w = default_width * w/legend.max_width\n except:\n w = default_width\n else:\n w = default_width\n\n # Allow override of lw\n this_patch_kwargs = kwargs.copy()\n lw = this_patch_kwargs.pop('lw', 0)\n ec = this_patch_kwargs.pop('ec', 'k')\n fc = this_patch_kwargs.pop('fc', None) or default_c or d.colour\n\n if colour is None:\n rect = mpl.patches.Rectangle(origin,\n w,\n thick,\n fc=fc,\n lw=lw,\n hatch=d.hatch,\n ec=ec, # edgecolour for hatching\n **this_patch_kwargs)\n ax.add_patch(rect)\n else:\n rect = mpl.patches.Rectangle(origin,\n w,\n thick,\n lw=lw,\n ec=ec, # edgecolour for hatching\n **this_patch_kwargs)\n patches.append(rect)\n\n if colour is not None:\n cmap = cmap or 'viridis'\n p = mpl.collections.PatchCollection(patches, cmap=cmap, lw=lw)\n p.set_array(self.get_data(colour,\n colour_function,\n default=default\n ))\n ax.add_collection(p)\n cb = plt.colorbar(p)\n cb.outline.set_linewidth(0)\n\n return ax\n\n def get_data(self, field, function=None, default=None):\n \"\"\"\n Get data from the striplog.\n \"\"\"\n f = function or utils.null\n data = []\n for iv in self:\n d = iv.data.get(field)\n if d is None:\n if default is not None:\n d = default\n else:\n d = np.nan\n data.append(f(d))\n\n return np.array(data)\n\n # Outputter\n def plot(self,\n legend=None,\n width=1.5,\n ladder=True,\n aspect=10,\n ticks=(1, 10),\n match_only=None,\n ax=None,\n return_fig=False,\n colour=None,\n cmap='viridis',\n default=None,\n style='intervals',\n field=None,\n label=None,\n **kwargs):\n \"\"\"\n Hands-free plotting.\n\n Args:\n legend (Legend): The Legend to use for colours, etc.\n width (int): The width of the plot, in inches. Default 1.\n ladder (bool): Whether to use widths or not. Default False.\n aspect (int): The aspect ratio of the plot. Default 10.\n ticks (int or tuple): The (minor,major) tick interval for depth.\n Only the major interval is labeled. Default (1,10).\n match_only (list): A list of strings matching the attributes you\n want to compare when plotting.\n ax (ax): A maplotlib axis to plot onto. If you pass this, it will\n be returned. Optional.\n return_fig (bool): Whether or not to return the maplotlib ``fig``\n object. Default False.\n colour (str): Which data field to use for colours.\n cmap (cmap): Matplotlib colourmap. Default ``viridis``.\n **kwargs are passed through to matplotlib's ``patches.Rectangle``.\n\n Returns:\n None. Unless you specify ``return_fig=True`` or pass in an ``ax``.\n \"\"\"\n if legend is None:\n legend = Legend.random(self.components)\n\n if style.lower() == 'tops':\n # Make sure width is at least 3 for 'tops' style\n width = max([3, width])\n\n if ax is None:\n return_ax = False\n fig = plt.figure(figsize=(width, aspect*width))\n ax = fig.add_axes([0.35, 0.05, 0.6, 0.95])\n else:\n return_ax = True\n\n if (self.order == 'none') or (style.lower() == 'points'):\n # Then this is a set of points.\n ax = self.plot_points(ax=ax, legend=legend, field=field, **kwargs)\n elif style.lower() == 'field':\n if field is None:\n raise StriplogError('You must provide a field to plot.')\n ax = self.plot_field(ax=ax, legend=legend, field=field)\n elif style.lower() == 'tops':\n ax = self.plot_tops(ax=ax, legend=legend, field=field)\n ax.set_xticks([])\n else:\n ax = self.plot_axis(ax=ax,\n legend=legend,\n ladder=ladder,\n default_width=width,\n match_only=kwargs.get('match_only',\n match_only),\n colour=colour,\n cmap=cmap,\n default=default,\n width_field=field,\n **kwargs\n )\n\n ax.set_xlim([0, width])\n ax.set_xticks([])\n\n # Rely on interval order.\n if self.order == 'depth':\n upper, lower = self.start.z, self.stop.z\n else:\n upper, lower = self.stop.z, self.start.z\n rng = abs(upper - lower)\n\n ax.set_ylim([lower, upper])\n\n if label is not None:\n for iv in self.__list:\n plt.text(1.6, iv.middle, iv.primary[label], ha='left', va='center', size=10)\n\n # Make sure ticks is a tuple.\n try:\n ticks = tuple(ticks)\n except TypeError:\n ticks = (1, ticks)\n\n # Avoid MAXTICKS error.\n while rng/ticks[0] > 250:\n mi, ma = 10*ticks[0], ticks[1]\n if ma <= mi:\n ma = 10 * mi\n ticks = (mi, ma)\n\n # Carry on plotting...\n minorLocator = mpl.ticker.MultipleLocator(ticks[0])\n ax.yaxis.set_minor_locator(minorLocator)\n\n majorLocator = mpl.ticker.MultipleLocator(ticks[1])\n majorFormatter = mpl.ticker.FormatStrFormatter('%d')\n ax.yaxis.set_major_locator(majorLocator)\n ax.yaxis.set_major_formatter(majorFormatter)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.get_yaxis().set_tick_params(which='both', direction='out')\n\n # Optional title.\n title = getattr(self, 'title', None)\n if title is not None:\n ax.set_title(title)\n\n ax.patch.set_alpha(0)\n\n if return_ax:\n return ax\n elif return_fig:\n return fig\n else:\n return\n\n def shift(self, delta=None, start=None):\n \"\"\"\n Shift all the intervals by `delta` (negative numbers are 'up')\n or by setting a new start depth.\n\n Returns a copy of the striplog.\n \"\"\"\n new_strip = self.copy()\n if delta is None:\n if start is None:\n raise StriplogError(\"You must provide a delta or a new start.\")\n delta = start - self.start.z\n for iv in new_strip:\n iv.top = iv.top.z + delta\n iv.base = iv.base.z + delta\n return new_strip\n\n def read_at(self, d, index=False):\n \"\"\"\n Get the index of the interval at a particular 'depth' (though this\n might be an elevation or age or anything).\n\n Args:\n d (Number): The 'depth' to query.\n index (bool): Whether to return the index instead of the interval.\n\n Returns:\n Interval: The interval, or if ``index==True`` the index of the\n interval, at the specified 'depth', or ``None`` if the depth is\n outside the striplog's range.\n \"\"\"\n for i, iv in enumerate(self):\n if iv.spans(d):\n return i if index else iv\n return None\n\n def depth(self, d):\n \"\"\"\n For backwards compatibility.\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"depth() is deprecated; please use read_at()\"\n warnings.warn(w)\n return self.read_at(d)\n\n def extract(self, log, basis, name, function=None):\n \"\"\"\n 'Extract' a log into the components of a striplog.\n\n Args:\n log (array_like). A log or other 1D data.\n basis (array_like). The depths or elevations of the log samples.\n name (str). The name of the attribute to store in the components.\n function (function). A function that takes an array as the only\n input, and returns whatever you want to store in the 'name'\n attribute of the primary component.\n\n Returns:\n A copy of the striplog.\n \"\"\"\n # Build a dict of {index: [log values]} to keep track.\n intervals = {}\n previous_ix = -1\n for i, z in enumerate(basis):\n ix = self.read_at(z, index=True)\n if ix is None:\n continue\n if ix == previous_ix:\n intervals[ix].append(log[i])\n else:\n intervals[ix] = [log[i]]\n previous_ix = ix\n\n # Set the requested attribute in the primary comp of each interval.\n new_strip = self.copy()\n for ix, data in intervals.items():\n f = function or utils.null\n d = f(np.array(data))\n new_strip[ix].data[name] = d\n\n return new_strip\n\n def find(self, search_term, index=False):\n \"\"\"\n Look for a regex expression in the descriptions of the striplog.\n If there's no description, it looks in the summaries.\n\n If you pass a Component, then it will search the components, not the\n descriptions or summaries.\n\n Case insensitive.\n\n Args:\n search_term (string or Component): The thing you want to search\n for. Strings are treated as regular expressions.\n index (bool): Whether to return the index instead of the interval.\n\n Returns:\n Striplog: A striplog that contains only the 'hit' Intervals.\n However, if ``index`` was ``True``, then that's what you get.\n \"\"\"\n hits = []\n for i, iv in enumerate(self):\n try:\n search_text = iv.description or iv.primary.summary()\n pattern = re.compile(search_term, flags=re.IGNORECASE)\n if pattern.search(search_text):\n hits.append(i)\n except TypeError:\n if search_term in iv.components:\n hits.append(i)\n if hits and index:\n return hits\n elif hits:\n return self[hits]\n else:\n return\n\n def __find_incongruities(self, op, index):\n \"\"\"\n Private method. Finds gaps and overlaps in a striplog. Called by\n find_gaps() and find_overlaps().\n\n Args:\n op (operator): ``operator.gt`` or ``operator.lt``\n index (bool): If ``True``, returns indices of intervals with\n gaps after them.\n\n Returns:\n Striplog: A striplog of all the gaps. A sort of anti-striplog.\n \"\"\"\n if len(self) == 1:\n return\n\n hits = []\n intervals = []\n\n if self.order == 'depth':\n one, two = 'base', 'top'\n else:\n one, two = 'top', 'base'\n\n for i, iv in enumerate(self[:-1]):\n next_iv = self[i+1]\n if op(getattr(iv, one), getattr(next_iv, two)):\n hits.append(i)\n\n top = getattr(iv, one)\n base = getattr(next_iv, two)\n iv_gap = Interval(top, base)\n intervals.append(iv_gap)\n\n if index and hits:\n return hits\n elif intervals:\n return Striplog(intervals)\n else:\n return\n\n def find_overlaps(self, index=False):\n \"\"\"\n Find overlaps in a striplog.\n\n Args:\n index (bool): If True, returns indices of intervals with\n gaps after them.\n\n Returns:\n Striplog: A striplog of all the overlaps as intervals.\n \"\"\"\n return self.__find_incongruities(op=operator.gt, index=index)\n\n def find_gaps(self, index=False):\n \"\"\"\n Finds gaps in a striplog.\n\n Args:\n index (bool): If True, returns indices of intervals with\n gaps after them.\n\n Returns:\n Striplog: A striplog of all the gaps. A sort of anti-striplog.\n \"\"\"\n return self.__find_incongruities(op=operator.lt, index=index)\n\n def prune(self, limit=None, n=None, percentile=None, keep_ends=False):\n \"\"\"\n Remove intervals below a certain limit thickness. In place.\n\n Args:\n limit (float): Anything thinner than this will be pruned.\n n (int): The n thinnest beds will be pruned.\n percentile (float): The thinnest specified percentile will be\n pruned.\n keep_ends (bool): Whether to keep the first and last, regardless\n of whether they meet the pruning criteria.\n \"\"\"\n strip = self.copy()\n\n if not (limit or n or percentile):\n m = \"You must provide a limit or n or percentile for pruning.\"\n raise StriplogError(m)\n if limit:\n prune = [i for i, iv in enumerate(strip) if iv.thickness < limit]\n if n:\n prune = strip.thinnest(n=n, index=True)\n if percentile:\n n = np.floor(len(strip)*percentile/100)\n prune = strip.thinnest(n=n, index=True)\n\n if keep_ends:\n first, last = 0, len(strip) - 1\n if first in prune:\n prune.remove(first)\n if last in prune:\n prune.remove(last)\n\n del strip[prune]\n\n return strip\n\n def anneal(self, mode='middle'):\n \"\"\"\n Fill in empty intervals by growing from top and base.\n\n Note that this operation happens in-place and destroys any information\n about the ``Position`` (e.g. metadata associated with the top or base).\n See GitHub issue #54.\n\n If there are overlaps in your striplog, then this method may have\n unexpected results.\n\n Args\n mode (str): One of ['down', 'middle', 'up']. Which way to 'flood'\n into the gaps.\n\n Returns\n Striplog. A new instance of the Striplog class.\n \"\"\"\n strip = deepcopy(self)\n\n gaps = strip.find_gaps(index=True)\n\n if not gaps:\n return\n\n for gap in gaps:\n before = strip[gap]\n after = strip[gap + 1]\n\n if mode == 'middle':\n if strip.order == 'depth':\n t = (after.top.z-before.base.z)/2\n before.base = before.base.z + t\n after.top = after.top.z - t\n else:\n t = (after.base-before.top)/2\n before.top = before.top.z + t\n after.base = after.base.z - t\n\n elif mode == 'down':\n if strip.order == 'depth':\n before.base = after.top.z\n else:\n before.top = after.base.z\n\n elif mode == 'up':\n if strip.order == 'depth':\n after.top = before.base.z\n else:\n after.base = before.top.z\n\n return strip\n\n def fill(self, component=None):\n \"\"\"\n Fill gaps with the component provided.\n\n Example\n t = s.fill(Component({'lithology': 'cheese'}))\n \"\"\"\n c = [component] if component is not None else []\n\n # Make the intervals to go in the gaps.\n gaps = self.find_gaps()\n if not gaps:\n return self\n for iv in gaps:\n iv.components = c\n\n return deepcopy(self) + gaps\n\n def union(self, other):\n \"\"\"\n Makes a striplog of all unions.\n\n Args:\n Striplog. The striplog instance to union with.\n\n Returns:\n Striplog. The result of the union.\n \"\"\"\n if not isinstance(other, self.__class__):\n m = \"You can only union striplogs with each other.\"\n raise StriplogError(m)\n\n result = []\n for iv in deepcopy(self):\n for jv in other:\n if iv.any_overlaps(jv):\n iv = iv.union(jv)\n result.append(iv)\n return Striplog(result)\n\n def intersect(self, other):\n \"\"\"\n Makes a striplog of all intersections.\n\n Args:\n Striplog. The striplog instance to intersect with.\n\n Returns:\n Striplog. The result of the intersection.\n \"\"\"\n if not isinstance(other, self.__class__):\n m = \"You can only intersect striplogs with each other.\"\n raise StriplogError(m)\n\n result = []\n for iv in self:\n for jv in other:\n try:\n result.append(iv.intersect(jv))\n except IntervalError:\n # The intervals don't overlap\n pass\n return Striplog(result)\n\n def merge_overlaps(self):\n \"\"\"\n Merges overlaps by merging overlapping Intervals.\n\n The function takes no arguments and returns ``None``. It operates on\n the striplog 'in place'\n\n TODO: This function will not work if any interval overlaps more than\n one other intervals at either its base or top.\n \"\"\"\n overlaps = np.array(self.find_overlaps(index=True))\n\n if not overlaps.any():\n return\n\n for overlap in overlaps:\n before = self[overlap].copy()\n after = self[overlap + 1].copy()\n\n # Get rid of the before and after pieces.\n del self[overlap]\n del self[overlap]\n\n # Make the new piece.\n new_segment = before.merge(after)\n\n # Insert it.\n self.insert(overlap, new_segment)\n\n overlaps += 1\n\n return\n\n def merge_neighbours(self, strict=True):\n \"\"\"\n Makes a new striplog in which matching neighbours (for which the\n components are the same) are unioned. That is, they are replaced by\n a new Interval with the same top as the uppermost and the same bottom\n as the lowermost.\n\n Args\n strict (bool): If True, then all of the components must match.\n If False, then only the primary must match.\n\n Returns:\n Striplog. A new striplog.\n\n TODO:\n Might need to be tweaked to deal with 'binary striplogs' if those\n aren't implemented with components.\n \"\"\"\n new_strip = [self[0].copy()]\n\n for lower in self[1:]:\n\n # Determine if touching.\n touching = new_strip[-1].touches(lower)\n\n # Decide if match.\n if strict:\n similar = new_strip[-1].components == lower.components\n else:\n similar = new_strip[-1].primary == lower.primary\n\n # Union if both criteria met.\n if touching and similar:\n new_strip[-1] = new_strip[-1].union(lower)\n else:\n new_strip.append(lower.copy())\n\n return Striplog(new_strip)\n\n def thickest(self, n=1, index=False):\n \"\"\"\n Returns the thickest interval(s) as a striplog.\n\n Args:\n n (int): The number of thickest intervals to return. Default: 1.\n index (bool): If True, only the indices of the intervals are\n returned. You can use this to index into the striplog.\n\n Returns:\n Interval. The thickest interval. Or, if ``index`` was ``True``,\n the index of the thickest interval.\n \"\"\"\n s = sorted(range(len(self)), key=lambda k: self[k].thickness)\n indices = s[-n:]\n if index:\n return indices\n else:\n if n == 1:\n # Then return an interval.\n i = indices[0]\n return self[i]\n else:\n return self[indices]\n\n def thinnest(self, n=1, index=False):\n \"\"\"\n Returns the thinnest interval(s) as a striplog.\n\n Args:\n n (int): The number of thickest intervals to return. Default: 1.\n index (bool): If True, only the indices of the intervals are\n returned. You can use this to index into the striplog.\n\n Returns:\n Interval. The thickest interval. Or, if ``index`` was ``True``,\n the index of the thickest interval.\n\n TODO:\n If you ask for the thinnest bed and there's a tie, you will\n get the last in the ordered list.\n \"\"\"\n s = sorted(range(len(self)), key=lambda k: self[k].thickness)\n indices = s[:n]\n if index:\n return indices\n else:\n if n == 1:\n i = indices[0]\n return self[i]\n else:\n return self[indices]\n\n def hist(self,\n lumping=None,\n summary=False,\n sort=True,\n plot=True,\n legend=None,\n ax=None,\n rotation=0,\n ha='center',\n ):\n \"\"\"\n Plots a histogram and returns the data for it.\n\n Args:\n lumping (str): If given, the bins will be lumped based on this\n attribute of the primary components of the intervals\n encountered.\n summary (bool): If True, the summaries of the components are\n returned as the bins. Otherwise, the default behaviour is to\n return the Components themselves.\n sort (bool): If True (default), the histogram is sorted by value,\n starting with the largest.\n plot (bool): If True (default), produce a bar plot.\n legend (Legend): The legend with which to colour the bars.\n ax (axis): An axis object, which will be returned if provided.\n If you don't provide one, it will be created but not returned.\n rotation (int): The rotation angle of the x-axis tick labels.\n Default is 0 but -45 is useful.\n ha (str): The horizontal alignment of the x-axis tick labels.\n Default is 'center' but 'left' is good for -ve rotation.\n\n Returns:\n Tuple: A tuple of tuples of entities and counts.\n\n TODO:\n Deal with numeric properties, so I can histogram 'Vp' values, say.\n \"\"\"\n # This seems like overkill, but collecting all this stuff gives\n # the user some choice about what they get back.\n entries = OrderedDict()\n for i in self:\n if lumping:\n k = i.primary[lumping]\n else:\n if summary:\n k = i.primary.summary()\n else:\n k = i.primary\n v = entries.get(k, {'thick': 0}).get('thick', 0)\n \n entries[k] = {\n 'label': i.primary.summary(),\n 'colour': legend.get_colour(i.primary) if legend else None,\n 'thick': v + i.thickness,\n }\n\n if sort:\n allitems = sorted(entries.items(),\n key=lambda i: i[1]['thick'],\n reverse=True\n )\n ents, data = zip(*allitems)\n else:\n ents, data = tuple(entries.keys()), tuple(entries.values())\n \n counts = [d['thick'] for d in data]\n\n # Make plot.\n if plot:\n if ax is None:\n fig, ax = plt.subplots()\n return_ax = False\n else:\n return_ax = True\n\n ind = np.arange(len(ents))\n bars = ax.bar(ind, counts, align='center')\n ax.set_xticks(ind)\n ax.set_xticklabels([d['label'] for d in data],\n rotation=rotation,\n ha=ha)\n if legend:\n colours = [d['colour'] for d in data]\n for b, c in zip(bars, colours):\n b.set_color(c)\n ax.set_ylabel('Thickness [m]')\n else:\n bars = []\n\n if plot and return_ax:\n return counts, ents, ax\n\n return counts, ents, bars\n\n histogram = hist\n\n def bar(self, height='thickness', sort=False, reverse=False,\n legend=None, ax=None, figsize=None, **kwargs):\n \"\"\"\n Make a bar plot of thickness per interval.\n\n Args:\n height (str): The property of the primary component to plot.\n sort (bool or function): Either pass a boolean indicating whether\n to reverse sort by thickness, or pass a function to be used as\n the sort key.\n reverse (bool): Reverses the sort order.\n legend (Legend): The legend to plot with.\n ax (axis): Optional axis to plot to.\n figsize (tuple): A figure size, (width, height), optional.\n **kwargs: passed to the matplotlib bar plot command, ax.bar().\n\n Returns:\n axis: If you sent an axis in, you get it back.\n \"\"\"\n if sort:\n if sort is True:\n def func(x): return x.thickness\n reverse = True\n data = sorted(self, key=func, reverse=reverse)\n else:\n data = self[:]\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n heights = [getattr(i, height) for i in data]\n\n comps = [i[0] for i in self.unique]\n\n if legend is None:\n legend = Legend.random(comps)\n\n colors = [legend.get_colour(i.primary) for i in data]\n\n bars = ax.bar(range(len(data)), height=heights, color=colors, **kwargs)\n\n # Legend.\n colourables = [i.primary.summary() for i in data]\n unique_bars = dict(zip(colourables, bars))\n ax.legend(list(unique_bars.values()), list(unique_bars.keys()))\n\n ax.set_ylabel(height.title())\n\n return ax\n\n def invert(self, copy=False):\n \"\"\"\n Inverts the striplog, changing its order and the order of its contents.\n\n Operates in place by default.\n\n Args:\n copy (bool): Whether to operate in place or make a copy.\n\n Returns:\n None if operating in-place, or an inverted copy of the striplog\n if not.\n \"\"\"\n if copy:\n return Striplog([i.invert(copy=True) for i in self])\n else:\n for i in self:\n i.invert()\n self.__sort()\n o = self.order\n self.order = {'depth': 'elevation', 'elevation': 'depth'}[o]\n return\n\n def crop(self, extent, copy=False):\n \"\"\"\n Crop to a new depth range.\n\n Args:\n extent (tuple): The new start and stop depth. Must be 'inside'\n existing striplog.\n copy (bool): Whether to operate in place or make a copy.\n\n Returns:\n Operates in place by deault; if copy is True, returns a striplog.\n \"\"\"\n try:\n if extent[0] is None:\n extent = (self.start.z, extent[1])\n if extent[1] is None:\n extent = (extent[0], self.stop.z)\n except:\n m = \"You must provide a 2-tuple for the new extents. Use None for\"\n m += \" the existing start or stop.\"\n raise StriplogError(m)\n\n first_ix = self.read_at(extent[0], index=True)\n last_ix = self.read_at(extent[1], index=True)\n\n first = self[first_ix].split_at(extent[0])[1]\n last = self[last_ix].split_at(extent[1])[0]\n\n new_list = self.__list[first_ix:last_ix+1].copy()\n new_list[0] = first\n new_list[-1] = last\n\n if copy:\n return Striplog(new_list)\n else:\n self.__list = new_list\n return\n\n def net_to_gross(strip, attr):\n \"\"\"\n Compute the ratio of intervals having that attribute as `True` to the\n total thickness.\n\n TODO\n Allow user to give a cut-off value to apply to the attribute,\n if it's a continuous scalar and not boolean.\n\n Args\n attr (str): Which attribute to use. Must have boolean values.\n\n Returns\n float. The net:gross ratio.\n \"\"\"\n net = non = 0\n for c, x in strip.unique:\n if getattr(c, attr):\n net = x\n else:\n non = x\n return net / (net + non)\n\n def quality(self, tests, alias=None):\n \"\"\"\n Run a series of tests and return the corresponding results.\n\n Based on curve testing for ``welly``.\n\n Args:\n tests (list): a list of functions.\n\n Returns:\n list. The results. Stick to booleans (True = pass) or ints.\n \"\"\"\n # This is hacky... striplog should probably merge with welly...\n\n # Ignore aliases\n alias = alias or {}\n alias = alias.get('striplog', alias.get('Striplog', []))\n\n # Gather the tests.\n # First, anything called 'all', 'All', or 'ALL'.\n # Second, anything with the name of the curve we're in now.\n # Third, anything that the alias list has for this curve.\n # (This requires a reverse look-up so it's a bit messy.)\n this_tests =\\\n tests.get('all', [])+tests.get('All', [])+tests.get('ALL', [])\\\n + tests.get('striplog', tests.get('Striplog', []))\\\n + utils.flatten_list([tests.get(a) for a in alias])\n this_tests = filter(None, this_tests)\n\n # If we explicitly set zero tests for a particular key, then this\n # overrides the 'all' tests.\n if not tests.get('striplog', tests.get('Striplog', 1)):\n this_tests = []\n\n return {test.__name__: test(self) for test in this_tests}\n\n @property\n def _table(self):\n \"\"\"\n A table (list of tuples) of the tops and bases we encounter, starting\n at the top. We will need to know 3 things: whether it's a top or a\n base, the depth it's at, and which interval in the striplog it\n corresponds to.\n \"\"\"\n table = []\n for i, interval in enumerate(self):\n table.append(('T', interval.top.middle, i))\n table.append(('B', interval.base.middle, i))\n table = sorted(table, key=lambda x: x[1])\n return table\n\n def _merge_table(self, attr, reverse=False):\n \"\"\"\n Do the merge operation on a table, and return a new table with\n no nesting / overlaps.\n\n Args\n attr (str): The attribute of the component you want to use. You\n must provide an attribute.\n reverse (bool): Whether to reverse the condition.\n\n Returns\n list: The merged table.\n \"\"\"\n merged, stack = [], []\n op = operator.le if reverse else operator.ge\n\n for interface in self._table:\n\n tb, depth, idx = interface\n\n if stack:\n # 'this' is the top or base we're on in this loop iteration.\n try:\n this = getattr(self[idx], attr)\n except AttributeError:\n this = getattr(self[idx].primary, attr)\n\n # 'current' is the highest priority unit in the stack.\n try:\n current = getattr(self[stack[-1]], attr)\n except AttributeError:\n current = getattr(self[stack[-1]].primary, attr)\n\n # Compare 'this' to 'current' to decide what to do.\n merge = op(this, current)\n else:\n merge = True\n\n if tb == 'T':\n\n # If this one meets the condition, merge it.\n if merge:\n # End the current unit, if any.\n if stack:\n merged.append(('B', depth, stack[-1]))\n # Start the new top.\n merged.append(interface)\n\n # Insert this unit into stack and re-sort.\n # (This is easier than trying to insert in the right place.)\n stack.append(idx)\n try:\n stack = sorted(stack,\n key=lambda i: getattr(self[i], attr),\n reverse=reverse)\n except AttributeError:\n stack = sorted(stack,\n key=lambda i: getattr(self[i].primary, attr),\n reverse=reverse)\n\n elif tb == 'B':\n have_merged = False\n\n # If this is the current unit's base, append it to the merge.\n if idx == stack[-1]:\n merged.append(interface)\n have_merged = True\n\n # End this unit in the stack.\n stack.remove(idx)\n\n # Add a top for the new current unit, if any, but only if we\n # did a merge.\n if stack and have_merged:\n merged.append(('T', depth, stack[-1]))\n\n return merged\n\n def _striplog_from_merge_table(self, table):\n \"\"\"\n Make a merge table into a Striplog instance.\n\n Args\n table (list). The table of tops and bases, represented as tuples.\n\n Returns\n Striplog. A new Striplog instance.\n \"\"\"\n m = []\n for top, bot in zip(table[::2], table[1::2]):\n\n # If zero thickness, discard.\n if top[1] == bot[1]:\n continue\n\n i = self[top[2]].copy()\n i.top = top[1]\n i.base = bot[1]\n m.append(i)\n\n return Striplog(m)\n\n def merge(self, attr, reverse=False):\n \"\"\"\n Merge the intervals in a striplog, using an attribute of the primary\n component for priority ordering.\n\n Args\n attr (str): The attribute of the component you want to use. You \\\n must provide an attribute.\n reverse (bool): Whether to reverse the condition.\n\n Returns\n Striplog: The merged striplog.\n \"\"\"\n m = self._merge_table(attr, reverse=reverse)\n return self._striplog_from_merge_table(m)\n\n def is_binary(self, attr=None):\n \"\"\"\n Determine if `attr`, which must be an attribute of every primary\n component, allows this striplog to be interpreted as a binary striplog.\n If no `attr` is provided, the first attribute of the primary comp-\n onent is used.\n \"\"\"\n try:\n primaries = [getattr(i.primary, attr) for i in self]\n except:\n primaries = [list(i.primary.__dict__.values())[0] for i in self]\n return all(map(lambda x: isinstance(x, bool), primaries))\n\n def to_binary_log(self, attr, step):\n \"\"\"\n Adaptation of `to_log` but deals with binary attributes of striplogs.\n\n Args\n attr (str): Which attribute to make into a log.\n \"\"\"\n log, basis, comps = self.to_log(step=step,\n match_only=[attr],\n undefined=-1,\n return_meta=True)\n if -1 in log:\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"We have undefined values, there might be a problem.\"\n warnings.warn(w)\n return log - 1, basis, comps\n\n def binary_morphology(self, attr, operation, step=1.0, p=3):\n \"\"\"\n Perform a discrete binary morphology operation on the striplog.\n\n Args\n attr (str): The attribute to use for the filtering. Must have\n boolean values.\n operation (str): One of `erosion`, `dilation`, `opening` or\n `closing`.\n step (float): The step size to use in discretization. Default is\n 1 but you might want to use something smaller, e.g. 0.1.\n p (int): The length of the structuring element, in samples (not\n natual units). Odd numbers are symmetrical and more intuitive.\n Default is 3.\n\n Returns\n Striplog. A new striplog instance.\n \"\"\"\n ops = {\n 'erosion': utils.binary_erosion,\n 'dilation': utils.binary_dilation,\n 'opening': utils.binary_opening,\n 'closing': utils.binary_closing,\n }\n if not self.is_binary():\n print(\"Cannot interpret striplog as binary.\")\n log, basis, comps = self.to_binary_log(step=step, attr=attr)\n proc = ops[operation](log, p)\n if operation == 'closing':\n proc = proc | log\n\n return Striplog.from_log(proc, components=comps, basis=basis)\n\n @classmethod\n def from_macrostrat(cls, lng, lat, buffer_size=0.2):\n \"\"\"\n Create a striplog from components derived using the MacroStrat API.\n This is simply a helper function to make things easier, but it\n works because we know what our data looks like in advance.\n\n Note: In order to plot this, you will need to add space for text and \n other decoration. This simply gives a Striplog back which _can_\n be plotted.\n\n Args:\n components (list):\n\n Returns:\n Tuple of:\n strip (striplog.Striplog)\n legend (striplog.Legend)\n\n Example:\n lng = -64.3573186\n lat = 44.4454632\n buffer_size = 0.3\n striplog.striplog.from_macrostrat(lng, lat, buffer_size)\n {'top': Position({'middle': 358.9, 'units': 'm'}), \n 'base': Position({'middle': 419.2, 'units': 'm'}), \n 'description': '', 'data': {}, 'components': [Component({\n 'map_id': 948660.0, 'scale': 'small', 'source_id': 7.0,\n 'name': 'Devonian plutonic: undivided granitic rocks',\n 'age': 'devonian', 'lith': 'plutonic: undivided granitic rocks',\n 'best_age_top': 358.9, 'best_age_bottom': 419.2, 't_int': 94.0,\n 'b_int': 94.0, 'color': '#cb8c37', 'source': 'MacroStrat.org (CC-BY)})]}\n {'top': Position({'middle': 358.9, 'units': 'm'}),\n 'base': Position({'middle': 541.0, 'units': 'm'}),\n 'description': '', 'data': {}, 'components': [Component({\n 'map_id': 948228.0, 'scale': 'small', 'source_id': 7.0,\n 'name': 'Cambrian-Devonian sedimentary', 'age': 'cambrian-devonian',\n 'lith': 'sedimentary', 'best_age_top': 358.9, 'best_age_bottom': 541.0,\n 't_int': 94.0, 'b_int': 122.0, 'color': '#99c08d',\n 'source': 'MacroStrat.org (CC-BY)})]}\n {'top': Position({'middle': 443.8, 'units': 'm'}),\n 'base': Position({'middle': 541.0, 'units': 'm'}),\n 'description': '', 'data': {}, 'components': [Component({\n 'map_id': 973359.0, 'scale': 'small', 'source_id': 7.0,\n 'name': 'Cambrian-Ordovician sedimentary', 'age': 'cambrian-ordovician',\n 'lith': 'sedimentary', 'best_age_top': 443.8, 'best_age_bottom': 541.0,\n 't_int': 112.0, 'b_int': 122.0, 'color': '#409963',\n 'source': 'MacroStrat.org (CC-BY)})]}\n \"\"\"\n # Get the \n features = utils.geology_from_macrostrat(lng=lng, lat=lat,\n buffer_size=buffer_size)\n\n columns = ('color', 'lith', 'age')\n\n intervals = []\n\n for feature in features:\n if feature['geometry'] is None:\n continue\n\n components = []\n for lith in utils.get_liths_from_macrostrat(feature['properties']['lith']):\n c = Component({'lithology': lith})\n components.append(c)\n\n intervals.append(Interval(\n top=feature['properties']['best_age_top'],\n base=feature['properties']['best_age_bottom'],\n components=components,\n description=feature['properties']['descrip'])\n )\n\n return cls(intervals, source='Macrostrat [CC-BY]', order='age')\n" ]
[ [ "numpy.zeros_like", "numpy.ceil", "numpy.diff", "matplotlib.pyplot.figure", "matplotlib.ticker.FormatStrFormatter", "numpy.copy", "matplotlib.collections.PatchCollection", "matplotlib.pyplot.subplots", "matplotlib.patches.Rectangle", "matplotlib.pyplot.text", "numpy.isnan", "numpy.array", "matplotlib.pyplot.colorbar", "matplotlib.ticker.MultipleLocator", "numpy.linspace", "numpy.digitize" ] ]
NLeSC/spreading_dye_sampler
[ "4282f7609959a31d1b2a4832f3ed643b15c46cb6" ]
[ "spreading_dye_sampler/test/test_dye_blot.py" ]
[ "import os\nimport sys\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))\n\nimport spreading_dye_sampler.dye_blot\n\nimport numpy as np\nfrom numpy.random import random\nimport pytest\n\[email protected]\ndef blot():\n num_cells = 100\n grid_width = 100\n grid_height = 100\n\n blot = None\n while blot is None:\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [10, 10], num_cells)\n return blot\n\ndef test_make_dye_blot():\n num_cells = 10\n grid_width = 100\n grid_height = 100\n\n for i in range(100):\n # make a blot\n blot = None\n while blot is None:\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [10, 20], num_cells)\n\n # check size\n assert blot.num_cells() == num_cells\n\n # check that the blot is in range\n for x, y in blot._cells:\n assert 0 <= x\n assert x < grid_width\n assert 0 <= y\n assert y < grid_height\n\ndef test_for_each_cell(blot):\n def test_forward(x, y):\n assert x, y in blot._cells\n\n blot.for_each_cell(test_forward)\n\ndef test_aspect():\n grid_width = 1000\n grid_height = 10\n num_cells = 500\n\n blot = None\n while blot is None:\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [1, 100], num_cells)\n\n assert blot.num_cells() == num_cells\n x_min = min([x for x, y in blot._cells])\n x_max = max([x for x, y in blot._cells])\n y_min = min([y for x, y in blot._cells])\n y_max = max([y for x, y in blot._cells])\n\n x_size = x_max - x_min\n y_size = y_max - y_min\n # This may fail occasionally. Need to figure out.\n assert x_size / y_size > 5\n\ndef test_squeeze():\n grid_width = 10\n grid_height = 10\n num_cells = grid_width * grid_height\n\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [10, 10], num_cells, squeeze=True)\n\n assert blot is not None\n assert blot.num_cells() == num_cells\n\n for y in range(grid_height):\n for x in range(grid_width):\n assert (x, y) in blot._cells\n\ndef test_masking():\n grid_width = 10\n grid_height = 10\n num_cells = 20\n\n # Make a mask with 45 permitted cells\n mask = np.zeros([grid_width, grid_height], dtype=bool)\n for y in range(grid_height):\n for x in range(grid_width):\n dx = x - grid_width / 2\n dy = y - grid_height / 2\n mask[x, y] = dx**2 + dy**2 < 4*4\n\n for i in range(100):\n num_cells = int(np.floor(random() * 44) + 1.0)\n blot = None\n while blot is None:\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [10, 10], num_cells, mask, squeeze=True)\n\n assert blot.num_cells() == num_cells\n\n def check(x, y):\n assert mask[x, y]\n\n blot.for_each_cell(check)\n" ]
[ [ "numpy.random.random", "numpy.zeros" ] ]
hrnciar/NiaPy
[ "d1e70924577cc90455c52701f2696bcb0a064438" ]
[ "examples/advanced_example_custom_pop.py" ]
[ "# encoding=utf8\n# This is temporary fix to import module from parent folder\n# It will be removed when package is published on PyPI\nimport sys\n\nsys.path.append('../')\n\nfrom niapy.task import StoppingTask, OptimizationType\nfrom niapy.benchmarks import Benchmark\nfrom niapy.algorithms.basic import GreyWolfOptimizer\nfrom numpy import random as rand, apply_along_axis\n\n\n# our custom benchmark class\nclass MyBenchmark(Benchmark):\n def __init__(self):\n\n Benchmark.__init__(self, -10, 10)\n\n def function(self):\n def evaluate(D, sol):\n val = 0.0\n for i in range(D): val += sol[i] ** 2\n return val\n\n return evaluate\n\n\n# custom initialization population function\ndef MyInit(task, NP, rnd=rand, **kwargs):\n pop = 0.2 + rnd.rand(NP, task.dimension) * task.range\n fpop = apply_along_axis(task.eval, 1, pop)\n return pop, fpop\n\n\n# we will run 10 repetitions of Grey Wolf Optimizer against our custom MyBenchmark benchmark function\nfor i in range(10):\n task = StoppingTask(max_iters=100, dimension=20, optimization_type=OptimizationType.MINIMIZATION,\n benchmark=MyBenchmark())\n\n # parameter is population size\n algo = GreyWolfOptimizer(population_size=20, initialization_function=MyInit)\n\n # running algorithm returns best found minimum\n best = algo.run(task)\n\n # printing best minimum\n print(best[-1])\n" ]
[ [ "numpy.apply_along_axis" ] ]
hexylena/tools-iuc
[ "811337eaab815f54f0fd93a3dd23a1153993ea2a" ]
[ "tools/cwpair2/cwpair2_util.py" ]
[ "import bisect\nimport csv\nimport os\nimport sys\nimport traceback\n\nimport matplotlib\nmatplotlib.use('Agg') # noqa\nfrom matplotlib import pyplot\n\n# Data outputs\nDETAILS = 'D'\nMATCHED_PAIRS = 'MP'\nORPHANS = 'O'\n# Data output formats\nGFF_EXT = 'gff'\nTABULAR_EXT = 'tabular'\n# Statistics historgrams output directory.\nHISTOGRAM = 'H'\n# Statistics outputs\nFINAL_PLOTS = 'F'\nPREVIEW_PLOTS = 'P'\nSTATS_GRAPH = 'C'\n\n# Graph settings.\nCOLORS = 'krg'\nY_LABEL = 'Peak-pair counts'\nX_LABEL = 'Peak-pair distance (bp)'\nTICK_WIDTH = 3\nADJUST = [0.140, 0.9, 0.9, 0.1]\nPLOT_FORMAT = 'pdf'\npyplot.rc('xtick.major', size=10.00)\npyplot.rc('ytick.major', size=10.00)\npyplot.rc('lines', linewidth=4.00)\npyplot.rc('axes', linewidth=3.00)\npyplot.rc('font', family='Bitstream Vera Sans', size=32.0)\n\n\nclass FrequencyDistribution(object):\n\n def __init__(self, start, end, binsize=10, d=None):\n self.start = start\n self.end = end\n self.dist = d or {}\n self.binsize = binsize\n\n def get_bin(self, x):\n \"\"\"\n Returns the bin in which a data point falls\n \"\"\"\n return self.start + (x - self.start) // self.binsize * self.binsize + self.binsize / 2.0\n\n def add(self, x):\n x = self.get_bin(x)\n self.dist[x] = self.dist.get(x, 0) + 1\n\n def graph_series(self):\n x = []\n y = []\n for i in range(self.start, self.end, self.binsize):\n center = self.get_bin(i)\n x.append(center)\n y.append(self.dist.get(center, 0))\n return x, y\n\n def mode(self):\n return max(self.dist.items(), key=lambda data: data[1])[0]\n\n def size(self):\n return sum(self.dist.values())\n\n\ndef stop_err(msg):\n sys.stderr.write(msg)\n sys.exit(1)\n\n\ndef distance(peak1, peak2):\n return (peak2[1] + peak2[2]) / 2 - (peak1[1] + peak1[2]) / 2\n\n\ndef gff_row(cname, start, end, score, source, type='.', strand='.', phase='.', attrs={}):\n return (cname, source, type, start, end, score, strand, phase, gff_attrs(attrs))\n\n\ndef gff_attrs(d):\n if not d:\n return '.'\n return ';'.join('%s=%s' % item for item in d.items())\n\n\ndef parse_chromosomes(reader):\n # This version of cwpair2 accepts only gff format as input.\n chromosomes = {}\n reader.next()\n for line in reader:\n cname, junk, junk, start, end, value, strand, junk, junk = line\n start = int(start)\n end = int(end)\n value = float(value)\n if cname not in chromosomes:\n chromosomes[cname] = []\n peaks = chromosomes[cname]\n peaks.append((strand, start, end, value))\n return chromosomes\n\n\ndef perc95(chromosomes):\n \"\"\"\n Returns the 95th percentile value of the given chromosomes.\n \"\"\"\n values = []\n for peaks in chromosomes.values():\n for peak in peaks:\n values.append(peak[3])\n values.sort()\n # Get 95% value\n return values[int(len(values) * 0.95)]\n\n\ndef filter(chromosomes, threshold=0.05):\n \"\"\"\n Filters the peaks to those above a threshold. Threshold < 1.0 is interpreted\n as a proportion of the maximum, >=1.0 as an absolute value.\n \"\"\"\n if threshold < 1:\n p95 = perc95(chromosomes)\n threshold = p95 * threshold\n # Make the threshold a proportion of the\n for cname, peaks in chromosomes.items():\n chromosomes[cname] = [peak for peak in peaks if peak[3] > threshold]\n\n\ndef split_strands(chromosome):\n watson = [peak for peak in chromosome if peak[0] == '+']\n crick = [peak for peak in chromosome if peak[0] == '-']\n return watson, crick\n\n\ndef all_pair_distribution(chromosomes, up_distance, down_distance, binsize):\n dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)\n for cname, data in chromosomes.items():\n watson, crick = split_strands(data)\n crick.sort(key=lambda data: float(data[1]))\n keys = make_keys(crick)\n for peak in watson:\n for cpeak in get_window(crick, peak, up_distance, down_distance, keys):\n dist.add(distance(peak, cpeak))\n return dist\n\n\ndef make_keys(crick):\n return [(data[1] + data[2]) // 2 for data in crick]\n\n\ndef get_window(crick, peak, up_distance, down_distance, keys=None):\n \"\"\"\n Returns a window of all crick peaks within a distance of a watson peak.\n crick strand MUST be sorted by distance\n \"\"\"\n strand, start, end, value = peak\n midpoint = (start + end) // 2\n lower = midpoint - up_distance\n upper = midpoint + down_distance\n keys = keys or make_keys(crick)\n start_index = bisect.bisect_left(keys, lower)\n end_index = bisect.bisect_right(keys, upper)\n return [cpeak for cpeak in crick[start_index:end_index]]\n\n\ndef match_largest(window, peak):\n if not window:\n return None\n return max(window, key=lambda cpeak: cpeak[3])\n\n\ndef match_closest(window, peak):\n if not window:\n return None\n\n def key(cpeak):\n d = distance(peak, cpeak)\n # Search negative distances last\n if d < 0:\n # And then prefer less negative distances\n d = 10000 - d\n return d\n return min(window, key=key)\n\n\ndef match_mode(window, peak, mode):\n if not window:\n return None\n return min(window, key=lambda cpeak: abs(distance(peak, cpeak) - mode))\n\nMETHODS = {'mode': match_mode, 'closest': match_closest, 'largest': match_largest}\n\n\ndef frequency_plot(freqs, fname, labels=[], title=''):\n pyplot.clf()\n pyplot.figure(figsize=(10, 10))\n for i, freq in enumerate(freqs):\n x, y = freq.graph_series()\n pyplot.plot(x, y, '%s-' % COLORS[i])\n if len(freqs) > 1:\n pyplot.legend(labels)\n pyplot.xlim(freq.start, freq.end)\n pyplot.ylim(ymin=0)\n pyplot.ylabel(Y_LABEL)\n pyplot.xlabel(X_LABEL)\n pyplot.subplots_adjust(left=ADJUST[0], right=ADJUST[1], top=ADJUST[2], bottom=ADJUST[3])\n # Get the current axes\n ax = pyplot.gca()\n for l in ax.get_xticklines() + ax.get_yticklines():\n l.set_markeredgewidth(TICK_WIDTH)\n pyplot.savefig(fname)\n\n\ndef create_directories():\n # Output histograms in pdf.\n os.mkdir(HISTOGRAM)\n os.mkdir('data_%s' % DETAILS)\n os.mkdir('data_%s' % ORPHANS)\n os.mkdir('data_%s' % MATCHED_PAIRS)\n\n\ndef process_file(dataset_path, galaxy_hid, method, threshold, up_distance,\n down_distance, binsize, output_files):\n if method == 'all':\n match_methods = METHODS.keys()\n else:\n match_methods = [method]\n statistics = []\n for match_method in match_methods:\n stats = perform_process(dataset_path,\n galaxy_hid,\n match_method,\n threshold,\n up_distance,\n down_distance,\n binsize,\n output_files)\n statistics.append(stats)\n if output_files == 'all' and method == 'all':\n frequency_plot([s['dist'] for s in statistics],\n statistics[0]['graph_path'],\n labels=METHODS.keys())\n return statistics\n\n\ndef perform_process(dataset_path, galaxy_hid, method, threshold, up_distance,\n down_distance, binsize, output_files):\n output_details = output_files in [\"all\", \"matched_pair_orphan_detail\"]\n output_plots = output_files in [\"all\"]\n output_orphans = output_files in [\"all\", \"matched_pair_orphan\", \"matched_pair_orphan_detail\"]\n # Keep track of statistics for the output file\n statistics = {}\n input = csv.reader(open(dataset_path, 'rt'), delimiter='\\t')\n fpath, fname = os.path.split(dataset_path)\n statistics['fname'] = '%s: data %s' % (method, str(galaxy_hid))\n statistics['dir'] = fpath\n if threshold >= 1:\n filter_string = 'fa%d' % threshold\n else:\n filter_string = 'f%d' % (threshold * 100)\n fname = '%s_%su%dd%d_on_data_%s' % (method, filter_string, up_distance, down_distance, galaxy_hid)\n\n def make_histogram_path(output_type, fname):\n return os.path.join(HISTOGRAM, 'histogram_%s_%s.%s' % (output_type, fname, PLOT_FORMAT))\n\n def make_path(output_type, extension, fname):\n # Returns the full path for an output.\n return os.path.join(output_type, '%s_%s.%s' % (output_type, fname, extension))\n\n def td_writer(output_type, extension, fname):\n # Returns a tab-delimited writer for a specified output.\n output_file_path = make_path(output_type, extension, fname)\n return csv.writer(open(output_file_path, 'wt'), delimiter='\\t')\n\n try:\n chromosomes = parse_chromosomes(input)\n except Exception:\n stop_err('Unable to parse file \"%s\".\\n%s' % (dataset_path, traceback.format_exc()))\n if output_details:\n # Details\n detailed_output = td_writer('data_%s' % DETAILS, TABULAR_EXT, fname)\n detailed_output.writerow(('chrom', 'start', 'end', 'value', 'strand') * 2 + ('midpoint', 'c-w reads sum', 'c-w distance (bp)'))\n if output_plots:\n # Final Plot\n final_plot_path = make_histogram_path(FINAL_PLOTS, fname)\n if output_orphans:\n # Orphans\n orphan_output = td_writer('data_%s' % ORPHANS, TABULAR_EXT, fname)\n orphan_output.writerow(('chrom', 'strand', 'start', 'end', 'value'))\n if output_plots:\n # Preview Plot\n preview_plot_path = make_histogram_path(PREVIEW_PLOTS, fname)\n # Matched Pairs.\n matched_pairs_output = td_writer('data_%s' % MATCHED_PAIRS, GFF_EXT, fname)\n statistics['stats_path'] = 'statistics.%s' % TABULAR_EXT\n if output_plots:\n statistics['graph_path'] = make_histogram_path(STATS_GRAPH, fname)\n statistics['perc95'] = perc95(chromosomes)\n if threshold > 0:\n # Apply filter\n filter(chromosomes, threshold)\n if method == 'mode':\n freq = all_pair_distribution(chromosomes, up_distance, down_distance, binsize)\n mode = freq.mode()\n statistics['preview_mode'] = mode\n if output_plots:\n frequency_plot([freq], preview_plot_path, title='Preview frequency plot')\n else:\n statistics['preview_mode'] = 'NA'\n dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)\n orphans = 0\n # x will be used to archive the summary dataset\n x = []\n for cname, chromosome in chromosomes.items():\n # Each peak is (strand, start, end, value)\n watson, crick = split_strands(chromosome)\n # Sort by value of each peak\n watson.sort(key=lambda data: -float(data[3]))\n # Sort by position to facilitate binary search\n crick.sort(key=lambda data: float(data[1]))\n keys = make_keys(crick)\n for peak in watson:\n window = get_window(crick, peak, up_distance, down_distance, keys)\n if method == 'mode':\n match = match_mode(window, peak, mode)\n else:\n match = METHODS[method](window, peak)\n if match:\n midpoint = (match[1] + match[2] + peak[1] + peak[2]) // 4\n d = distance(peak, match)\n dist.add(d)\n # Simple output in gff format.\n x.append(gff_row(cname,\n source='cwpair',\n start=midpoint,\n end=midpoint + 1,\n score=peak[3] + match[3],\n attrs={'cw_distance': d}))\n if output_details:\n detailed_output.writerow((cname,\n peak[1],\n peak[2],\n peak[3],\n '+',\n cname,\n match[1],\n match[2],\n match[3], '-',\n midpoint,\n peak[3] + match[3],\n d))\n i = bisect.bisect_left(keys, (match[1] + match[2]) / 2)\n del crick[i]\n del keys[i]\n else:\n if output_orphans:\n orphan_output.writerow((cname, peak[0], peak[1], peak[2], peak[3]))\n # Keep track of orphans for statistics.\n orphans += 1\n # Remaining crick peaks are orphans\n if output_orphans:\n for cpeak in crick:\n orphan_output.writerow((cname, cpeak[0], cpeak[1], cpeak[2], cpeak[3]))\n # Keep track of orphans for statistics.\n orphans += len(crick)\n # Sort output descending by score.\n x.sort(key=lambda data: float(data[5]), reverse=True)\n # Writing a summary to gff format file\n for row in x:\n row_tmp = list(row)\n # Dataset in tuple cannot be modified in Python, so row will\n # be converted to list format to add 'chr'.\n if row_tmp[0] == \"999\":\n row_tmp[0] = 'chrM'\n elif row_tmp[0] == \"998\":\n row_tmp[0] = 'chrY'\n elif row_tmp[0] == \"997\":\n row_tmp[0] = 'chrX'\n else:\n row_tmp[0] = row_tmp[0]\n # Print row_tmp.\n matched_pairs_output.writerow(row_tmp)\n statistics['paired'] = dist.size() * 2\n statistics['orphans'] = orphans\n statistics['final_mode'] = dist.mode()\n if output_plots:\n frequency_plot([dist], final_plot_path, title='Frequency distribution')\n statistics['dist'] = dist\n return statistics\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.rc", "matplotlib.pyplot.figure", "matplotlib.pyplot.gca", "matplotlib.pyplot.savefig", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.use", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
ExpectationMax/pymc3
[ "7988d0bd023c8ba05a2d97bcbb563a67ed9ed82a" ]
[ "pymc3/step_methods/hmc/quadpotential.py" ]
[ "# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\n\nimport aesara\nimport numpy as np\nimport scipy.linalg\n\nfrom numpy.random import normal\nfrom scipy.sparse import issparse\n\nfrom pymc3.aesaraf import floatX\n\n__all__ = [\n \"quad_potential\",\n \"QuadPotentialDiag\",\n \"QuadPotentialFull\",\n \"QuadPotentialFullInv\",\n \"QuadPotentialDiagAdapt\",\n \"QuadPotentialFullAdapt\",\n \"isquadpotential\",\n]\n\n\ndef quad_potential(C, is_cov):\n \"\"\"\n Compute a QuadPotential object from a scaling matrix.\n\n Parameters\n ----------\n C: arraylike, 0 <= ndim <= 2\n scaling matrix for the potential\n vector treated as diagonal matrix.\n is_cov: Boolean\n whether C is provided as a covariance matrix or hessian\n\n Returns\n -------\n q: Quadpotential\n \"\"\"\n if issparse(C):\n if not chol_available:\n raise ImportError(\"Sparse mass matrices require scikits.sparse\")\n elif is_cov:\n return QuadPotentialSparse(C)\n else:\n raise ValueError(\"Sparse precision matrices are not supported\")\n\n partial_check_positive_definite(C)\n if C.ndim == 1:\n if is_cov:\n return QuadPotentialDiag(C)\n else:\n return QuadPotentialDiag(1.0 / C)\n else:\n if is_cov:\n return QuadPotentialFull(C)\n else:\n return QuadPotentialFullInv(C)\n\n\ndef partial_check_positive_definite(C):\n \"\"\"Make a simple but partial check for Positive Definiteness.\"\"\"\n if C.ndim == 1:\n d = C\n else:\n d = np.diag(C)\n (i,) = np.nonzero(np.logical_or(np.isnan(d), d <= 0))\n\n if len(i):\n raise PositiveDefiniteError(\"Simple check failed. Diagonal contains negatives\", i)\n\n\nclass PositiveDefiniteError(ValueError):\n def __init__(self, msg, idx):\n super().__init__(msg)\n self.idx = idx\n self.msg = msg\n\n def __str__(self):\n return f\"Scaling is not positive definite: {self.msg}. Check indexes {self.idx}.\"\n\n\nclass QuadPotential:\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n def energy(self, x, velocity=None):\n raise NotImplementedError(\"Abstract method\")\n\n def random(self, x):\n raise NotImplementedError(\"Abstract method\")\n\n def velocity_energy(self, x, v_out):\n raise NotImplementedError(\"Abstract method\")\n\n def update(self, sample, grad, tune):\n \"\"\"Inform the potential about a new sample during tuning.\n\n This can be used by adaptive potentials to change the\n mass matrix.\n \"\"\"\n pass\n\n def raise_ok(self, vmap=None):\n \"\"\"Check if the mass matrix is ok, and raise ValueError if not.\n\n Parameters\n ----------\n vmap: blocking.ArrayOrdering.vmap\n List of `VarMap`s, which are namedtuples with var, slc, shp, dtyp\n\n Raises\n ------\n ValueError if any standard deviations are 0 or infinite\n\n Returns\n -------\n None\n \"\"\"\n return None\n\n def reset(self):\n pass\n\n\ndef isquadpotential(value):\n \"\"\"Check whether an object might be a QuadPotential object.\"\"\"\n return isinstance(value, QuadPotential)\n\n\nclass QuadPotentialDiagAdapt(QuadPotential):\n \"\"\"Adapt a diagonal mass matrix from the sample variances.\"\"\"\n\n def __init__(\n self,\n n,\n initial_mean,\n initial_diag=None,\n initial_weight=0,\n adaptation_window=101,\n adaptation_window_multiplier=1,\n dtype=None,\n ):\n \"\"\"Set up a diagonal mass matrix.\"\"\"\n if initial_diag is not None and initial_diag.ndim != 1:\n raise ValueError(\"Initial diagonal must be one-dimensional.\")\n if initial_mean.ndim != 1:\n raise ValueError(\"Initial mean must be one-dimensional.\")\n if initial_diag is not None and len(initial_diag) != n:\n raise ValueError(\n \"Wrong shape for initial_diag: expected {} got {}\".format(n, len(initial_diag))\n )\n if len(initial_mean) != n:\n raise ValueError(\n \"Wrong shape for initial_mean: expected {} got {}\".format(n, len(initial_mean))\n )\n\n if dtype is None:\n dtype = aesara.config.floatX\n\n if initial_diag is None:\n initial_diag = np.ones(n, dtype=dtype)\n initial_weight = 1\n\n self.dtype = dtype\n self._n = n\n\n self._initial_mean = initial_mean\n self._initial_diag = initial_diag\n self._initial_weight = initial_weight\n self.adaptation_window = adaptation_window\n self.adaptation_window_multiplier = float(adaptation_window_multiplier)\n\n self.reset()\n\n def reset(self):\n self._var = np.array(self._initial_diag, dtype=self.dtype, copy=True)\n self._var_aesara = aesara.shared(self._var)\n self._stds = np.sqrt(self._initial_diag)\n self._inv_stds = floatX(1.0) / self._stds\n self._foreground_var = _WeightedVariance(\n self._n, self._initial_mean, self._initial_diag, self._initial_weight, self.dtype\n )\n self._background_var = _WeightedVariance(self._n, dtype=self.dtype)\n self._n_samples = 0\n\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n return np.multiply(self._var, x, out=out)\n\n def energy(self, x, velocity=None):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n if velocity is not None:\n return 0.5 * x.dot(velocity)\n return 0.5 * x.dot(self._var * x)\n\n def velocity_energy(self, x, v_out):\n \"\"\"Compute velocity and return kinetic energy at a position in parameter space.\"\"\"\n self.velocity(x, out=v_out)\n return 0.5 * np.dot(x, v_out)\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n vals = normal(size=self._n).astype(self.dtype)\n return self._inv_stds * vals\n\n def _update_from_weightvar(self, weightvar):\n weightvar.current_variance(out=self._var)\n np.sqrt(self._var, out=self._stds)\n np.divide(1, self._stds, out=self._inv_stds)\n self._var_aesara.set_value(self._var)\n\n def update(self, sample, grad, tune):\n \"\"\"Inform the potential about a new sample during tuning.\"\"\"\n if not tune:\n return\n\n self._foreground_var.add_sample(sample, weight=1)\n self._background_var.add_sample(sample, weight=1)\n self._update_from_weightvar(self._foreground_var)\n\n if self._n_samples > 0 and self._n_samples % self.adaptation_window == 0:\n self._foreground_var = self._background_var\n self._background_var = _WeightedVariance(self._n, dtype=self.dtype)\n self.adaptation_window = int(self.adaptation_window * self.adaptation_window_multiplier)\n\n self._n_samples += 1\n\n def raise_ok(self, vmap):\n \"\"\"Check if the mass matrix is ok, and raise ValueError if not.\n\n Parameters\n ----------\n vmap: blocking.ArrayOrdering.vmap\n List of `VarMap`s, which are namedtuples with var, slc, shp, dtyp\n\n Raises\n ------\n ValueError if any standard deviations are 0 or infinite\n\n Returns\n -------\n None\n \"\"\"\n if np.any(self._stds == 0):\n name_slc = []\n tmp_hold = list(range(self._stds.size))\n for vmap_ in vmap:\n slclen = len(tmp_hold[vmap_.slc])\n for i in range(slclen):\n name_slc.append((vmap_.var, i))\n index = np.where(self._stds == 0)[0]\n errmsg = [\"Mass matrix contains zeros on the diagonal. \"]\n for ii in index:\n errmsg.append(\n \"The derivative of RV `{}`.ravel()[{}] is zero.\".format(*name_slc[ii])\n )\n raise ValueError(\"\\n\".join(errmsg))\n\n if np.any(~np.isfinite(self._stds)):\n name_slc = []\n tmp_hold = list(range(self._stds.size))\n for vmap_ in vmap:\n slclen = len(tmp_hold[vmap_.slc])\n for i in range(slclen):\n name_slc.append((vmap_.var, i))\n index = np.where(~np.isfinite(self._stds))[0]\n errmsg = [\"Mass matrix contains non-finite values on the diagonal. \"]\n for ii in index:\n errmsg.append(\n \"The derivative of RV `{}`.ravel()[{}] is non-finite.\".format(*name_slc[ii])\n )\n raise ValueError(\"\\n\".join(errmsg))\n\n\nclass QuadPotentialDiagAdaptGrad(QuadPotentialDiagAdapt):\n \"\"\"Adapt a diagonal mass matrix from the variances of the gradients.\n\n This is experimental, and may be removed without prior deprication.\n \"\"\"\n\n def reset(self):\n super().reset()\n self._grads1 = np.zeros(self._n, dtype=self.dtype)\n self._ngrads1 = 0\n self._grads2 = np.zeros(self._n, dtype=self.dtype)\n self._ngrads2 = 0\n\n def _update(self, var):\n self._var[:] = var\n np.sqrt(self._var, out=self._stds)\n np.divide(1, self._stds, out=self._inv_stds)\n self._var_aesara.set_value(self._var)\n\n def update(self, sample, grad, tune):\n \"\"\"Inform the potential about a new sample during tuning.\"\"\"\n if not tune:\n return\n\n self._grads1[:] += np.abs(grad)\n self._grads2[:] += np.abs(grad)\n self._ngrads1 += 1\n self._ngrads2 += 1\n\n if self._n_samples <= 150:\n super().update(sample, grad, tune)\n else:\n self._update((self._ngrads1 / self._grads1) ** 2)\n\n if self._n_samples > 100 and self._n_samples % 100 == 50:\n self._ngrads1 = self._ngrads2\n self._ngrads2 = 1\n self._grads1[:] = self._grads2\n self._grads2[:] = 1\n\n\nclass _WeightedVariance:\n \"\"\"Online algorithm for computing mean of variance.\"\"\"\n\n def __init__(\n self, nelem, initial_mean=None, initial_variance=None, initial_weight=0, dtype=\"d\"\n ):\n self._dtype = dtype\n self.n_samples = float(initial_weight)\n if initial_mean is None:\n self.mean = np.zeros(nelem, dtype=\"d\")\n else:\n self.mean = np.array(initial_mean, dtype=\"d\", copy=True)\n if initial_variance is None:\n self.raw_var = np.zeros(nelem, dtype=\"d\")\n else:\n self.raw_var = np.array(initial_variance, dtype=\"d\", copy=True)\n\n self.raw_var[:] *= self.n_samples\n\n if self.raw_var.shape != (nelem,):\n raise ValueError(\"Invalid shape for initial variance.\")\n if self.mean.shape != (nelem,):\n raise ValueError(\"Invalid shape for initial mean.\")\n\n def add_sample(self, x, weight):\n x = np.asarray(x)\n self.n_samples += 1\n old_diff = x - self.mean\n self.mean[:] += old_diff / self.n_samples\n new_diff = x - self.mean\n self.raw_var[:] += weight * old_diff * new_diff\n\n def current_variance(self, out=None):\n if self.n_samples == 0:\n raise ValueError(\"Can not compute variance without samples.\")\n if out is not None:\n return np.divide(self.raw_var, self.n_samples, out=out)\n else:\n return (self.raw_var / self.n_samples).astype(self._dtype)\n\n def current_mean(self):\n return self.mean.copy(dtype=self._dtype)\n\n\nclass QuadPotentialDiag(QuadPotential):\n \"\"\"Quad potential using a diagonal covariance matrix.\"\"\"\n\n def __init__(self, v, dtype=None):\n \"\"\"Use a vector to represent a diagonal matrix for a covariance matrix.\n\n Parameters\n ----------\n v: vector, 0 <= ndim <= 1\n Diagonal of covariance matrix for the potential vector\n \"\"\"\n if dtype is None:\n dtype = aesara.config.floatX\n self.dtype = dtype\n v = v.astype(self.dtype)\n s = v ** 0.5\n\n self.s = s\n self.inv_s = 1.0 / s\n self.v = v\n\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n if out is not None:\n np.multiply(x, self.v, out=out)\n return\n return self.v * x\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n return floatX(normal(size=self.s.shape)) * self.inv_s\n\n def energy(self, x, velocity=None):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n if velocity is not None:\n return 0.5 * np.dot(x, velocity)\n return 0.5 * x.dot(self.v * x)\n\n def velocity_energy(self, x, v_out):\n \"\"\"Compute velocity and return kinetic energy at a position in parameter space.\"\"\"\n np.multiply(x, self.v, out=v_out)\n return 0.5 * np.dot(x, v_out)\n\n\nclass QuadPotentialFullInv(QuadPotential):\n \"\"\"QuadPotential object for Hamiltonian calculations using inverse of covariance matrix.\"\"\"\n\n def __init__(self, A, dtype=None):\n \"\"\"Compute the lower cholesky decomposition of the potential.\n\n Parameters\n ----------\n A: matrix, ndim = 2\n Inverse of covariance matrix for the potential vector\n \"\"\"\n if dtype is None:\n dtype = aesara.config.floatX\n self.dtype = dtype\n self.L = floatX(scipy.linalg.cholesky(A, lower=True))\n\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n vel = scipy.linalg.cho_solve((self.L, True), x)\n if out is None:\n return vel\n out[:] = vel\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n n = floatX(normal(size=self.L.shape[0]))\n return np.dot(self.L, n)\n\n def energy(self, x, velocity=None):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n if velocity is None:\n velocity = self.velocity(x)\n return 0.5 * x.dot(velocity)\n\n def velocity_energy(self, x, v_out):\n \"\"\"Compute velocity and return kinetic energy at a position in parameter space.\"\"\"\n self.velocity(x, out=v_out)\n return 0.5 * np.dot(x, v_out)\n\n\nclass QuadPotentialFull(QuadPotential):\n \"\"\"Basic QuadPotential object for Hamiltonian calculations.\"\"\"\n\n def __init__(self, cov, dtype=None):\n \"\"\"Compute the lower cholesky decomposition of the potential.\n\n Parameters\n ----------\n A: matrix, ndim = 2\n scaling matrix for the potential vector\n \"\"\"\n if dtype is None:\n dtype = aesara.config.floatX\n self.dtype = dtype\n self._cov = np.array(cov, dtype=self.dtype, copy=True)\n self._chol = scipy.linalg.cholesky(self._cov, lower=True)\n self._n = len(self._cov)\n\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n return np.dot(self._cov, x, out=out)\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n vals = np.random.normal(size=self._n).astype(self.dtype)\n return scipy.linalg.solve_triangular(self._chol.T, vals, overwrite_b=True)\n\n def energy(self, x, velocity=None):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n if velocity is None:\n velocity = self.velocity(x)\n return 0.5 * np.dot(x, velocity)\n\n def velocity_energy(self, x, v_out):\n \"\"\"Compute velocity and return kinetic energy at a position in parameter space.\"\"\"\n self.velocity(x, out=v_out)\n return self.energy(x, v_out)\n\n __call__ = random\n\n\nclass QuadPotentialFullAdapt(QuadPotentialFull):\n \"\"\"Adapt a dense mass matrix using the sample covariances.\"\"\"\n\n def __init__(\n self,\n n,\n initial_mean,\n initial_cov=None,\n initial_weight=0,\n adaptation_window=101,\n adaptation_window_multiplier=2,\n update_window=1,\n dtype=None,\n ):\n warnings.warn(\"QuadPotentialFullAdapt is an experimental feature\")\n\n if initial_cov is not None and initial_cov.ndim != 2:\n raise ValueError(\"Initial covariance must be two-dimensional.\")\n if initial_mean.ndim != 1:\n raise ValueError(\"Initial mean must be one-dimensional.\")\n if initial_cov is not None and initial_cov.shape != (n, n):\n raise ValueError(f\"Wrong shape for initial_cov: expected {n} got {initial_cov.shape}\")\n if len(initial_mean) != n:\n raise ValueError(\n \"Wrong shape for initial_mean: expected {} got {}\".format(n, len(initial_mean))\n )\n\n if dtype is None:\n dtype = aesara.config.floatX\n\n if initial_cov is None:\n initial_cov = np.eye(n, dtype=dtype)\n initial_weight = 1\n\n self.dtype = dtype\n self._n = n\n self._initial_mean = initial_mean\n self._initial_cov = initial_cov\n self._initial_weight = initial_weight\n\n self.adaptation_window = int(adaptation_window)\n self.adaptation_window_multiplier = float(adaptation_window_multiplier)\n self._update_window = int(update_window)\n\n self.reset()\n\n def reset(self):\n self._previous_update = 0\n self._cov = np.array(self._initial_cov, dtype=self.dtype, copy=True)\n self._chol = scipy.linalg.cholesky(self._cov, lower=True)\n self._chol_error = None\n self._foreground_cov = _WeightedCovariance(\n self._n, self._initial_mean, self._initial_cov, self._initial_weight, self.dtype\n )\n self._background_cov = _WeightedCovariance(self._n, dtype=self.dtype)\n self._n_samples = 0\n\n def _update_from_weightvar(self, weightvar):\n weightvar.current_covariance(out=self._cov)\n try:\n self._chol = scipy.linalg.cholesky(self._cov, lower=True)\n except (scipy.linalg.LinAlgError, ValueError) as error:\n self._chol_error = error\n\n def update(self, sample, grad, tune):\n if not tune:\n return\n\n # Steps since previous update\n delta = self._n_samples - self._previous_update\n\n self._foreground_cov.add_sample(sample, weight=1)\n self._background_cov.add_sample(sample, weight=1)\n\n # Update the covariance matrix and recompute the Cholesky factorization\n # every \"update_window\" steps\n if (delta + 1) % self._update_window == 0:\n self._update_from_weightvar(self._foreground_cov)\n\n # Reset the background covariance if we are at the end of the adaptation\n # window.\n if delta >= self.adaptation_window:\n self._foreground_cov = self._background_cov\n self._background_cov = _WeightedCovariance(self._n, dtype=self.dtype)\n\n self._previous_update = self._n_samples\n self.adaptation_window = int(self.adaptation_window * self.adaptation_window_multiplier)\n\n self._n_samples += 1\n\n def raise_ok(self, vmap):\n if self._chol_error is not None:\n raise ValueError(str(self._chol_error))\n\n\nclass _WeightedCovariance:\n \"\"\"Online algorithm for computing mean and covariance\n\n This implements the `Welford's algorithm\n <https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance>`_ based\n on the implementation in `the Stan math library\n <https://github.com/stan-dev/math>`_.\n\n \"\"\"\n\n def __init__(\n self,\n nelem,\n initial_mean=None,\n initial_covariance=None,\n initial_weight=0,\n dtype=\"d\",\n ):\n self._dtype = dtype\n self.n_samples = float(initial_weight)\n if initial_mean is None:\n self.mean = np.zeros(nelem, dtype=\"d\")\n else:\n self.mean = np.array(initial_mean, dtype=\"d\", copy=True)\n if initial_covariance is None:\n self.raw_cov = np.eye(nelem, dtype=\"d\")\n else:\n self.raw_cov = np.array(initial_covariance, dtype=\"d\", copy=True)\n\n self.raw_cov[:] *= self.n_samples\n\n if self.raw_cov.shape != (nelem, nelem):\n raise ValueError(\"Invalid shape for initial covariance.\")\n if self.mean.shape != (nelem,):\n raise ValueError(\"Invalid shape for initial mean.\")\n\n def add_sample(self, x, weight):\n x = np.asarray(x)\n self.n_samples += 1\n old_diff = x - self.mean\n self.mean[:] += old_diff / self.n_samples\n new_diff = x - self.mean\n self.raw_cov[:] += weight * new_diff[:, None] * old_diff[None, :]\n\n def current_covariance(self, out=None):\n if self.n_samples == 0:\n raise ValueError(\"Can not compute covariance without samples.\")\n if out is not None:\n return np.divide(self.raw_cov, self.n_samples - 1, out=out)\n else:\n return (self.raw_cov / (self.n_samples - 1)).astype(self._dtype)\n\n def current_mean(self):\n return np.array(self.mean, dtype=self._dtype)\n\n\ntry:\n import sksparse.cholmod as cholmod\n\n chol_available = True\nexcept ImportError:\n chol_available = False\n\nif chol_available:\n __all__ += [\"QuadPotentialSparse\"]\n\n import aesara.sparse\n\n class QuadPotentialSparse(QuadPotential):\n def __init__(self, A):\n \"\"\"Compute a sparse cholesky decomposition of the potential.\n\n Parameters\n ----------\n A: matrix, ndim = 2\n scaling matrix for the potential vector\n \"\"\"\n self.A = A\n self.size = A.shape[0]\n self.factor = factor = cholmod.cholesky(A)\n self.d_sqrt = np.sqrt(factor.D())\n\n def velocity(self, x):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n A = aesara.sparse.as_sparse(self.A)\n return aesara.sparse.dot(A, x)\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n n = floatX(normal(size=self.size))\n n /= self.d_sqrt\n n = self.factor.solve_Lt(n)\n n = self.factor.apply_Pt(n)\n return n\n\n def energy(self, x):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n return 0.5 * x.T.dot(self.velocity(x))\n" ]
[ [ "numpy.ones", "numpy.multiply", "numpy.eye", "numpy.divide", "numpy.zeros", "numpy.diag", "scipy.sparse.issparse", "numpy.any", "numpy.abs", "numpy.asarray", "numpy.random.normal", "numpy.where", "numpy.isnan", "numpy.sqrt", "numpy.dot", "numpy.array", "numpy.isfinite" ] ]
Vikicsizmadia/ctp
[ "d88fdfecf4b90ee42e6137a9767226c0d35b19a3" ]
[ "ctp/evaluation/slow.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\n# from tqdm import tqdm\n\nimport torch\nfrom torch import nn\n\nfrom ctp.util import make_batches\nfrom ctp.models import BaseLatentFeatureModel\n\nfrom typing import Tuple, Dict\n\n\ndef evaluate_slow(entity_embeddings: nn.Embedding,\n predicate_embeddings: nn.Embedding,\n test_triples: Tuple[str, str, str],\n all_triples: Tuple[str, str, str],\n entity_to_index: Dict[str, int],\n predicate_to_index: Dict[str, int],\n model: BaseLatentFeatureModel,\n batch_size: int,\n device: torch.device):\n\n xs = np.array([entity_to_index.get(s) for (s, _, _) in test_triples])\n xp = np.array([predicate_to_index.get(p) for (_, p, _) in test_triples])\n xo = np.array([entity_to_index.get(o) for (_, _, o) in test_triples])\n\n sp_to_o, po_to_s = {}, {}\n for s, p, o in all_triples:\n s_idx, p_idx, o_idx = entity_to_index.get(s), predicate_to_index.get(p), entity_to_index.get(o)\n sp_key = (s_idx, p_idx)\n po_key = (p_idx, o_idx)\n\n if sp_key not in sp_to_o:\n sp_to_o[sp_key] = []\n if po_key not in po_to_s:\n po_to_s[po_key] = []\n\n sp_to_o[sp_key] += [o_idx]\n po_to_s[po_key] += [s_idx]\n\n assert xs.shape == xp.shape == xo.shape\n nb_test_triples = xs.shape[0]\n\n batches = make_batches(nb_test_triples, batch_size)\n\n hits = dict()\n hits_at = [1, 3, 5, 10]\n\n for hits_at_value in hits_at:\n hits[hits_at_value] = 0.0\n\n def hits_at_n(n_, rank):\n if rank <= n_:\n hits[n_] = hits.get(n_, 0) + 1\n\n counter = 0\n mrr = 0.0\n\n ranks_l, ranks_r = [], []\n for start, end in batches:\n batch_xs = xs[start:end]\n batch_xp = xp[start:end]\n batch_xo = xo[start:end]\n\n batch_size = batch_xs.shape[0]\n counter += batch_size * 2\n\n with torch.no_grad():\n tensor_xs = torch.tensor(batch_xs, dtype=torch.long, device=device)\n tensor_xp = torch.tensor(batch_xp, dtype=torch.long, device=device)\n tensor_xo = torch.tensor(batch_xo, dtype=torch.long, device=device)\n\n tensor_xs_emb = entity_embeddings(tensor_xs)\n tensor_xp_emb = predicate_embeddings(tensor_xp)\n tensor_xo_emb = entity_embeddings(tensor_xo)\n # print(entity_embeddings.weight.shape)\n\n if model.model.facts[0].shape[0] < 90000:\n res_sp, res_po = model.forward_(tensor_xp_emb, tensor_xs_emb, tensor_xo_emb)\n else:\n res_sp, res_po = model.forward__(tensor_xp_emb, tensor_xs_emb, tensor_xo_emb)\n\n _scores_sp, _ = res_sp\n _scores_po, _ = res_po\n\n scores_sp, scores_po = _scores_sp.cpu().numpy(), _scores_po.cpu().numpy()\n\n del _scores_sp, _scores_po\n del tensor_xs, tensor_xp, tensor_xo\n del tensor_xs_emb, tensor_xp_emb, tensor_xo_emb\n del res_sp, res_po\n # print(scores_sp.shape, scores_po.shape)\n\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n batch_size = batch_xs.shape[0]\n for elem_idx in range(batch_size):\n s_idx, p_idx, o_idx = batch_xs[elem_idx], batch_xp[elem_idx], batch_xo[elem_idx]\n\n # Code for the filtered setting\n sp_key = (s_idx, p_idx)\n po_key = (p_idx, o_idx)\n\n o_to_remove = sp_to_o[sp_key]\n s_to_remove = po_to_s[po_key]\n\n for tmp_o_idx in o_to_remove:\n if tmp_o_idx != o_idx:\n scores_sp[elem_idx, tmp_o_idx] = - np.infty\n\n for tmp_s_idx in s_to_remove:\n if tmp_s_idx != s_idx:\n scores_po[elem_idx, tmp_s_idx] = - np.infty\n # End of code for the filtered setting\n\n rank_l = 1 + np.argsort(np.argsort(- scores_po[elem_idx, :]))[s_idx]\n rank_r = 1 + np.argsort(np.argsort(- scores_sp[elem_idx, :]))[o_idx]\n\n ranks_l += [rank_l]\n ranks_r += [rank_r]\n\n mrr += 1.0 / rank_l\n mrr += 1.0 / rank_r\n\n for n in hits_at:\n hits_at_n(n, rank_l)\n\n for n in hits_at:\n hits_at_n(n, rank_r)\n\n counter = float(counter)\n\n mrr /= counter\n\n for n in hits_at:\n hits[n] /= counter\n\n metrics = dict()\n metrics['MRR'] = mrr\n for n in hits_at:\n metrics['hits@{}'.format(n)] = hits[n]\n\n return metrics\n" ]
[ [ "torch.cuda.empty_cache", "torch.no_grad", "torch.tensor", "numpy.argsort", "torch.cuda.is_available" ] ]
dataflowr/evaluating_bdl
[ "b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2" ]
[ "depthCompletion/mcdropout_eval_auce.py" ]
[ "# code-checked\r\n# server-checked\r\n\r\nimport os\r\n\r\nimport torch\r\nimport torch.nn.parallel\r\nimport torch.optim\r\nimport torch.utils.data\r\nfrom torch.autograd import Variable\r\n\r\nfrom model_mcdropout import DepthCompletionNet\r\n\r\nfrom datasets import DatasetKITTIVal\r\nfrom criterion import MaskedL2Gauss, RMSE\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport pickle\r\n\r\nimport scipy.stats\r\n\r\nimport matplotlib\r\nmatplotlib.use(\"Agg\")\r\nimport matplotlib.pyplot as plt\r\n\r\nimport random\r\n\r\nmodel_id = \"mcdropout_virtual\"\r\n\r\nmodel_is = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\r\nprint (len(model_is))\r\n\r\nsnapshot_dir = \"/root/evaluating_bdl/depthCompletion/training_logs/%s_eval_auce\" % model_id\r\n\r\nkitti_depth_path = \"/root/data/kitti_depth\"\r\n\r\nbatch_size = 4\r\n\r\nif not os.path.exists(snapshot_dir):\r\n os.makedirs(snapshot_dir)\r\n\r\ncolors = {}\r\ncolors[1] = \"k\"\r\ncolors[2] = \"b\"\r\ncolors[4] = \"g\"\r\ncolors[8] = \"r\"\r\ncolors[16] = \"c\"\r\ncolors[32] = \"m\"\r\ncolors[64] = \"y\"\r\n\r\nnum_model_is = len(model_is)\r\nprint (num_model_is)\r\n\r\nM_values = [1, 2, 4, 8, 16, 32]\r\nprint (M_values)\r\n\r\n# # # # # # # # # # # # # # # # # # debug START:\r\n# M_values = [1, 2, 4]\r\n# model_is = [0, 1]\r\n# # # # # # # # # # # # # # # # # # debug END:\r\n\r\nnum_runs_per_M = 1\r\n\r\ncoverage_values = {}\r\navg_length_values = {}\r\ncoverage_error_values = {}\r\nabs_coverage_error_values = {}\r\nneg_coverage_error_values = {}\r\nfor model_i in model_is:\r\n coverage_values[model_i] = {}\r\n avg_length_values[model_i] = {}\r\n coverage_error_values[model_i] = {}\r\n abs_coverage_error_values[model_i] = {}\r\n neg_coverage_error_values[model_i] = {}\r\n\r\n for M in M_values:\r\n coverage_values[model_i][M] = {}\r\n avg_length_values[model_i][M] = {}\r\n coverage_error_values[model_i][M] = {}\r\n abs_coverage_error_values[model_i][M] = {}\r\n neg_coverage_error_values[model_i][M] = {}\r\n\r\nauc_abs_error_values = {}\r\nauc_neg_error_values = {}\r\nauc_length_values = {}\r\nloss_values = {}\r\nrmse_values = {}\r\nfor M in M_values:\r\n auc_abs_error_values[M] = []\r\n auc_neg_error_values[M] = []\r\n auc_length_values[M] = []\r\n\r\n loss_values[M] = []\r\n rmse_values[M] = []\r\n\r\neval_dataset = DatasetKITTIVal(kitti_depth_path=kitti_depth_path)\r\neval_loader = torch.utils.data.DataLoader(dataset=eval_dataset, batch_size=batch_size, shuffle=False, num_workers=4)\r\n\r\ncriterion = MaskedL2Gauss().cuda()\r\nrmse_criterion = RMSE().cuda()\r\n\r\nfor model_i in model_is:\r\n print (\"model_i: %d\" % model_i)\r\n\r\n restore_from = \"/root/evaluating_bdl/depthCompletion/trained_models/%s_%d/checkpoint_40000.pth\" % (model_id, model_i)\r\n model = DepthCompletionNet().cuda()\r\n model = torch.nn.DataParallel(model)\r\n model.load_state_dict(torch.load(restore_from))\r\n model.eval()\r\n\r\n for M in M_values:\r\n M_float = float(M)\r\n print (\"M: %d\" % M)\r\n\r\n for run in range(num_runs_per_M):\r\n print (\"run: %d\" % run)\r\n\r\n batch_losses = []\r\n batch_rmses = []\r\n sigma_alea_values = np.array([])\r\n sigma_epi_values = np.array([])\r\n sigma_pred_values = np.array([])\r\n mean_values = np.array([])\r\n target_values = np.array([])\r\n for i_iter, batch in enumerate(eval_loader):\r\n with torch.no_grad(): # (corresponds to setting volatile=True in all variables, this is done during inference to reduce memory consumption)\r\n imgs, sparses, targets, file_ids = batch\r\n imgs = Variable(imgs.cuda()) # (shape: (batch_size, h, w))\r\n sparses = Variable(sparses.cuda()) # (shape: (batch_size, h, w))\r\n targets = Variable(targets.cuda()) # (shape: (batch_size, h, w))\r\n\r\n means = []\r\n sigma_2_aleas = []\r\n for i in range(M):\r\n mean, log_var = model(imgs, sparses) # (both of shape: (batch_size, 1, h, w))\r\n\r\n sigma_2_alea = torch.exp(log_var) # (sigma_alea^2) # (shape: (batch_size, 1, h, w))\r\n\r\n means.append(mean)\r\n sigma_2_aleas.append(sigma_2_alea)\r\n\r\n mean = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w))\r\n for value in means:\r\n mean = mean + value/M_float\r\n\r\n sigma_2_alea = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_alea^2)\r\n for value in sigma_2_aleas:\r\n sigma_2_alea = sigma_2_alea + value/M_float\r\n\r\n sigma_2_epi = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_epi^2)\r\n for value in means:\r\n sigma_2_epi = sigma_2_epi + torch.pow(mean - value, 2)/M_float\r\n\r\n sigma_2_pred = sigma_2_alea + sigma_2_epi # (sigma_pred^2)\r\n\r\n loss = criterion(mean, torch.log(sigma_2_pred), targets)\r\n rmse = rmse_criterion(mean, targets)\r\n\r\n print('iter = {}/{} completed, loss = {}, rmse = {}'.format(i_iter, len(eval_dataset)/batch_size, loss.data.cpu().numpy(), rmse.data.cpu().numpy()))\r\n\r\n batch_losses.append(loss.data.cpu().numpy())\r\n batch_rmses.append(rmse.data.cpu().numpy())\r\n\r\n sigma_alea = torch.sqrt(sigma_2_alea) # (shape: (batch_size, 1, h, w))\r\n sigma_epi = torch.sqrt(sigma_2_epi) # (shape: (batch_size, 1, h, w))\r\n sigma_pred = torch.sqrt(sigma_2_pred) # (shape: (batch_size, 1, h, w))\r\n\r\n target = torch.unsqueeze(targets, 1) # (shape: (batch_size, 1, h, w))\r\n\r\n valid_mask = (target > 0).detach() # (shape: (batch_size, 1, h, w))\r\n\r\n mean = mean[valid_mask] # (shape: (num_valids, ))\r\n sigma_alea = sigma_alea[valid_mask] # (shape: (num_valids, ))\r\n sigma_epi = sigma_epi[valid_mask] # (shape: (num_valids, ))\r\n sigma_pred = sigma_pred[valid_mask] # (shape: (num_valids, ))\r\n target = target[valid_mask] # (shape: (num_valids, ))\r\n\r\n sigma_alea_values = np.concatenate((sigma_alea_values, sigma_alea.data.cpu().numpy()))\r\n sigma_epi_values = np.concatenate((sigma_epi_values, sigma_epi.data.cpu().numpy()))\r\n sigma_pred_values = np.concatenate((sigma_pred_values, sigma_pred.data.cpu().numpy()))\r\n mean_values = np.concatenate((mean_values, mean.data.cpu().numpy()))\r\n target_values = np.concatenate((target_values, target.data.cpu().numpy()))\r\n\r\n # # # # # # # # # # # # # # # # # # debug START:\r\n # if i_iter > 0:\r\n # break\r\n # # # # # # # # # # # # # # # # # # debug END:\r\n\r\n val_loss = np.mean(batch_losses)\r\n print (\"val loss: %g\" % val_loss)\r\n val_rmse = np.mean(batch_rmses)\r\n print (\"val rmse: %g\" % val_rmse)\r\n loss_values[M].append(val_loss)\r\n rmse_values[M].append(val_rmse)\r\n\r\n # (sigma_alea/epi/pred_values has shape: (num_predictions_with_GT, ))\r\n # (mean_values has shape: (num_predictions_with_GT, ))\r\n # (target_values has shape: (num_predictions_with_GT, ))\r\n\r\n print (sigma_alea_values.shape)\r\n print (sigma_epi_values.shape)\r\n print (sigma_pred_values.shape)\r\n print (mean_values.shape)\r\n print (target_values.shape)\r\n\r\n num_predictions_with_GT = float(target_values.shape[0])\r\n\r\n coverage_values_alea = []\r\n coverage_values_epi = []\r\n coverage_values_pred = []\r\n avg_length_values_alea = []\r\n avg_length_values_epi = []\r\n avg_length_values_pred = []\r\n alphas = list(np.arange(start=0.01, stop=1.0, step=0.01)) # ([0.01, 0.02, ..., 0.99], 99 elements)\r\n for step, alpha in enumerate(alphas):\r\n #print (\"alpha: %d/%d\" % (step+1, len(alphas)))\r\n\r\n lower_values_alea = mean_values - scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_alea_values # (shape: (num_predictions_with_GT, ))\r\n upper_values_alea = mean_values + scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_alea_values # (shape: (num_predictions_with_GT, ))\r\n\r\n coverage_alea = np.count_nonzero(np.logical_and(target_values >= lower_values_alea, target_values <= upper_values_alea))/num_predictions_with_GT\r\n coverage_values_alea.append(coverage_alea)\r\n\r\n avg_length_alea = np.mean(upper_values_alea - lower_values_alea)\r\n avg_length_values_alea.append(avg_length_alea)\r\n #\r\n lower_values_epi = mean_values - scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_epi_values # (shape: (num_predictions_with_GT, ))\r\n upper_values_epi = mean_values + scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_epi_values # (shape: (num_predictions_with_GT, ))\r\n\r\n coverage_epi = np.count_nonzero(np.logical_and(target_values >= lower_values_epi, target_values <= upper_values_epi))/num_predictions_with_GT\r\n coverage_values_epi.append(coverage_epi)\r\n\r\n avg_length_epi = np.mean(upper_values_epi - lower_values_epi)\r\n avg_length_values_epi.append(avg_length_epi)\r\n #\r\n lower_values_pred = mean_values - scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_pred_values # (shape: (num_predictions_with_GT, ))\r\n upper_values_pred = mean_values + scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_pred_values # (shape: (num_predictions_with_GT, ))\r\n\r\n coverage_pred = np.count_nonzero(np.logical_and(target_values >= lower_values_pred, target_values <= upper_values_pred))/num_predictions_with_GT\r\n coverage_values_pred.append(coverage_pred)\r\n\r\n avg_length_pred = np.mean(upper_values_pred - lower_values_pred)\r\n avg_length_values_pred.append(avg_length_pred)\r\n\r\n auc_length_alea = np.trapz(y=avg_length_values_alea, x=alphas)\r\n print (\"AUC - Length - Alea: %g\" % auc_length_alea)\r\n auc_length_epi = np.trapz(y=avg_length_values_epi, x=alphas)\r\n print (\"AUC - Length - Epi: %g\" % auc_length_epi)\r\n auc_length_pred = np.trapz(y=avg_length_values_pred, x=alphas)\r\n print (\"AUC - Length - Pred: %g\" % auc_length_pred)\r\n\r\n coverage_error_values_alea = np.array(coverage_values_alea) - (1.0 - np.array(alphas))\r\n coverage_error_values_epi = np.array(coverage_values_epi) - (1.0 - np.array(alphas))\r\n coverage_error_values_pred = np.array(coverage_values_pred) - (1.0 - np.array(alphas))\r\n\r\n abs_coverage_error_values_alea = np.abs(coverage_error_values_alea)\r\n abs_coverage_error_values_epi = np.abs(coverage_error_values_epi)\r\n abs_coverage_error_values_pred = np.abs(coverage_error_values_pred)\r\n\r\n neg_coverage_error_values_alea = (np.abs(coverage_error_values_alea) - coverage_error_values_alea)/2.0\r\n neg_coverage_error_values_epi = (np.abs(coverage_error_values_epi) - coverage_error_values_epi)/2.0\r\n neg_coverage_error_values_pred = (np.abs(coverage_error_values_pred) - coverage_error_values_pred)/2.0\r\n\r\n auc_error_alea = np.trapz(y=abs_coverage_error_values_alea, x=alphas)\r\n print (\"AUC - Empirical coverage absolute error - Alea: %g\" % auc_error_alea)\r\n auc_error_epi = np.trapz(y=abs_coverage_error_values_epi, x=alphas)\r\n print (\"AUC - Empirical coverage absolute error - Epi: %g\" % auc_error_epi)\r\n auc_error_pred = np.trapz(y=abs_coverage_error_values_pred, x=alphas)\r\n print (\"AUC - Empirical coverage absolute error - Pred: %g\" % auc_error_pred)\r\n\r\n auc_neg_error_alea = np.trapz(y=neg_coverage_error_values_alea, x=alphas)\r\n print (\"AUC - Empirical coverage negative error - Alea: %g\" % auc_neg_error_alea)\r\n auc_neg_error_epi = np.trapz(y=neg_coverage_error_values_epi, x=alphas)\r\n print (\"AUC - Empirical coverage negative error - Epi: %g\" % auc_neg_error_epi)\r\n auc_neg_error_pred = np.trapz(y=neg_coverage_error_values_pred, x=alphas)\r\n print (\"AUC - Empirical coverage negative error - Pred: %g\" % auc_neg_error_pred)\r\n\r\n coverage_values[model_i][M][run] = np.array(coverage_values_pred)\r\n avg_length_values[model_i][M][run] = np.array(avg_length_values_pred)\r\n coverage_error_values[model_i][M][run] = np.array(coverage_error_values_pred)\r\n abs_coverage_error_values[model_i][M][run] = abs_coverage_error_values_pred\r\n neg_coverage_error_values[model_i][M][run] = neg_coverage_error_values_pred\r\n\r\n auc_abs_error_values[M].append(auc_error_pred)\r\n auc_length_values[M].append(auc_length_pred)\r\n auc_neg_error_values[M].append(auc_neg_error_pred)\r\n\r\n print (\"#######################\")\r\n\r\n print (\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\")\r\n\r\n print (\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\r\n\r\nauc_abs_error_means = {}\r\nauc_abs_error_stds = {}\r\nauc_neg_error_means = {}\r\nauc_neg_error_stds = {}\r\nauc_length_means = {}\r\nauc_length_stds = {}\r\nloss_means = {}\r\nloss_stds = {}\r\nrmse_means = {}\r\nrmse_stds = {}\r\nfor M in M_values:\r\n auc_abs_error_values_mean = 0.0\r\n for value in auc_abs_error_values[M]:\r\n auc_abs_error_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n auc_abs_error_values_var = 0.0\r\n for value in auc_abs_error_values[M]:\r\n auc_abs_error_values_var += ((value - auc_abs_error_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n auc_abs_error_values_std = np.sqrt(auc_abs_error_values_var)\r\n\r\n auc_abs_error_means[M] = auc_abs_error_values_mean\r\n auc_abs_error_stds[M] = auc_abs_error_values_std\r\n\r\n ###\r\n\r\n auc_neg_error_values_mean = 0.0\r\n for value in auc_neg_error_values[M]:\r\n auc_neg_error_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n auc_neg_error_values_var = 0.0\r\n for value in auc_neg_error_values[M]:\r\n auc_neg_error_values_var += ((value - auc_neg_error_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n auc_neg_error_values_std = np.sqrt(auc_neg_error_values_var)\r\n\r\n auc_neg_error_means[M] = auc_neg_error_values_mean\r\n auc_neg_error_stds[M] = auc_neg_error_values_std\r\n\r\n ###\r\n\r\n auc_length_values_mean = 0.0\r\n for value in auc_length_values[M]:\r\n auc_length_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n auc_length_values_var = 0.0\r\n for value in auc_length_values[M]:\r\n auc_length_values_var += ((value - auc_length_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n auc_length_values_std = np.sqrt(auc_length_values_var)\r\n\r\n auc_length_means[M] = auc_length_values_mean\r\n auc_length_stds[M] = auc_length_values_std\r\n\r\n ###\r\n\r\n loss_values_mean = 0.0\r\n for value in loss_values[M]:\r\n loss_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n loss_values_var = 0.0\r\n for value in loss_values[M]:\r\n loss_values_var += ((value - loss_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n loss_values_std = np.sqrt(loss_values_var)\r\n\r\n loss_means[M] = loss_values_mean\r\n loss_stds[M] = loss_values_std\r\n\r\n ###\r\n\r\n rmse_values_mean = 0.0\r\n for value in rmse_values[M]:\r\n rmse_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n rmse_values_var = 0.0\r\n for value in rmse_values[M]:\r\n rmse_values_var += ((value - rmse_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n rmse_values_std = np.sqrt(rmse_values_var)\r\n\r\n rmse_means[M] = rmse_values_mean\r\n rmse_stds[M] = rmse_values_std\r\n\r\n\r\nfor M in M_values:\r\n print (\"M = %d, Empirical coverage absolute error (AUC) - mean: %g, std: %g\" % (M, auc_abs_error_means[M], auc_abs_error_stds[M]))\r\n print (\"M = %d, Empirical coverage negative error (AUC) - mean: %g, std: %g\" % (M, auc_neg_error_means[M], auc_neg_error_stds[M]))\r\n print (\"M = %d, Average length (AUC) - mean: %g, std: %g\" % (M, auc_length_means[M], auc_length_stds[M]))\r\n print (\"M = %d, Loss - mean: %g, std: %g\" % (M, loss_means[M], loss_stds[M]))\r\n print (\"M = %d, RMSE - mean: %g, std: %g\" % (M, rmse_means[M], rmse_stds[M]))\r\n print (\"#####\")\r\n\r\nplt.figure(1)\r\nplt.plot([0.0, 1.0], [0.0, 1.0], \"k:\", label=\"Perfect\")\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(coverage_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(coverage_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Empirical coverage\")\r\nplt.xlabel(\"p\")\r\nplt.title(\"Prediction intervals - Empirical coverage\")\r\nplt.savefig(\"%s/empirical_coverage.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nplt.figure(1)\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(avg_length_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(avg_length_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Average interval length [m]\")\r\nplt.xlabel(\"p\")\r\navg_length_ylim = plt.ylim()\r\nplt.title(\"Prediction intervals - Average interval length\")\r\nplt.savefig(\"%s/length.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nplt.figure(1)\r\nplt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Empirical coverage error\")\r\nplt.xlabel(\"p\")\r\ncoverage_error_ylim = plt.ylim()\r\nplt.title(\"Prediction intervals - Empirical coverage error\")\r\nplt.savefig(\"%s/empirical_coverage_error.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nplt.figure(1)\r\nplt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(abs_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(abs_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Empirical coverage absolute error\")\r\nplt.xlabel(\"p\")\r\nabs_coverage_error_ylim = plt.ylim()\r\nplt.title(\"Prediction intervals - Empirical coverage absolute error\")\r\nplt.savefig(\"%s/empirical_coverage_absolute_error.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nplt.figure(1)\r\nplt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(neg_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(neg_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Empirical coverage negative error\")\r\nplt.xlabel(\"p\")\r\nneg_coverage_error_ylim = plt.ylim()\r\nplt.title(\"Prediction intervals - Empirical coverage negative error\")\r\nplt.savefig(\"%s/empirical_coverage_negative_error.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nfor M in M_values:\r\n plt.figure(1)\r\n plt.plot([0.0, 1.0], [0.0, 1.0], \"k:\", label=\"Perfect\")\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(coverage_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(coverage_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Empirical coverage\")\r\n plt.xlabel(\"p\")\r\n plt.title(\"Prediction intervals - Empirical coverage\")\r\n plt.savefig(\"%s/empirical_coverage_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\n plt.figure(1)\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(avg_length_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(avg_length_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Average interval length [m]\")\r\n plt.xlabel(\"p\")\r\n plt.ylim(avg_length_ylim)\r\n plt.title(\"Prediction intervals - Average interval length\")\r\n plt.savefig(\"%s/length_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\n plt.figure(1)\r\n plt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Empirical coverage error\")\r\n plt.xlabel(\"p\")\r\n plt.ylim(coverage_error_ylim)\r\n plt.title(\"Prediction intervals - Empirical coverage error\")\r\n plt.savefig(\"%s/empirical_coverage_error_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\n plt.figure(1)\r\n plt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(abs_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(abs_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Empirical coverage absolute error\")\r\n plt.xlabel(\"p\")\r\n plt.ylim(abs_coverage_error_ylim)\r\n plt.title(\"Prediction intervals - Empirical coverage absolute error\")\r\n plt.savefig(\"%s/empirical_coverage_absolute_error_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\n plt.figure(1)\r\n plt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(neg_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(neg_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Empirical coverage negative error\")\r\n plt.xlabel(\"p\")\r\n plt.ylim(neg_coverage_error_ylim)\r\n plt.title(\"Prediction intervals - Empirical coverage negative error\")\r\n plt.savefig(\"%s/empirical_coverage_negative_error_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\nwith open(\"%s/auc_abs_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(auc_abs_error_values, file)\r\n\r\nwith open(\"%s/auc_neg_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(auc_neg_error_values, file)\r\n\r\nwith open(\"%s/auc_length_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(auc_length_values, file)\r\n\r\nwith open(\"%s/loss_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(loss_values, file)\r\n\r\nwith open(\"%s/rmse_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(rmse_values, file)\r\n\r\nwith open(\"%s/coverage_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(coverage_values, file)\r\n\r\nwith open(\"%s/avg_length_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(avg_length_values, file)\r\n\r\nwith open(\"%s/coverage_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(coverage_error_values, file)\r\n\r\nwith open(\"%s/abs_coverage_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(abs_coverage_error_values, file)\r\n\r\nwith open(\"%s/neg_coverage_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(neg_coverage_error_values, file)\r\n" ]
[ [ "torch.utils.data.DataLoader", "torch.no_grad", "torch.sqrt", "torch.log", "matplotlib.pyplot.ylabel", "numpy.trapz", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.logical_and", "numpy.abs", "matplotlib.pyplot.title", "matplotlib.use", "torch.nn.DataParallel", "numpy.mean", "torch.unsqueeze", "torch.load", "numpy.arange", "matplotlib.pyplot.close", "matplotlib.pyplot.ylim", "numpy.array", "torch.pow", "matplotlib.pyplot.legend", "torch.exp", "numpy.flip", "numpy.sqrt", "matplotlib.pyplot.xlabel" ] ]
Classic-Daniel/gradient-video-delay
[ "a03e11a6b14e9e89198cf46e2b435cc1e9035d63" ]
[ "main.py" ]
[ "import numpy as np\nimport cv2\n\nBUFFER_SIZE = 30\n\nclass imageGenerator:\n def __init__(self, img):\n self.imgBuffer = [img] * BUFFER_SIZE\n self.currentIndex = 0\n print(f\"Image type: {type(img)}\")\n print(f\"buffer shape: {self.imgBuffer[0].shape}\")\n\n def addNewImage(self, img):\n self.imgBuffer[self.currentIndex] = img\n self.currentIndex = (self.currentIndex + 1) % BUFFER_SIZE\n\n def getProcessedImage(self):\n generatedImg = np.copy(self.imgBuffer[self.currentIndex])\n height = self.imgBuffer[self.currentIndex].shape[1]\n heightStep = round(height / BUFFER_SIZE)\n\n for i in range(1, BUFFER_SIZE):\n generatedImg[:, heightStep * i : heightStep * (i + 1)] = self.imgBuffer[(self.currentIndex + i) % BUFFER_SIZE][:, heightStep * i : heightStep * (i + 1)]\n\n return generatedImg\n\ndef initCameraStream(): \n cap = cv2.VideoCapture(cv2.CAP_V4L2)\n generator = None\n # The device number might be 0 or 1 depending on the device and the webcam\n # cap.open(0, cv2.CAP_DSHOW)\n while(True):\n ret, frame = cap.read()\n if(ret and frame.shape[0] > 0):\n if generator == None:\n generator = imageGenerator(frame)\n\n generator.addNewImage(frame)\n cv2.imshow('frame', generator.getProcessedImage())\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n\ndef main():\n initCameraStream()\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.copy" ] ]
mhilmiasyrofi/Once-for-All-Adversarial-Training
[ "c92bc88bdcf8bd531ca02017a4d2d1410899519c" ]
[ "models/svhn/wide_resnet.py" ]
[ "\"\"\"PyTorch implementation of Wide-ResNet taken from \nhttps://github.com/jeromerony/fast_adversarial/blob/master/fast_adv/models/cifar10/wide_resnet.py\"\"\"\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, out_planes, stride, dropRate=0.0):\n super(BasicBlock, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.droprate = dropRate\n self.equalInOut = (in_planes == out_planes)\n self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False) or None\n\n def forward(self, x):\n if not self.equalInOut:\n x = self.relu1(self.bn1(x))\n else:\n out = self.relu1(self.bn1(x))\n out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))\n if self.droprate > 0:\n out = F.dropout(out, p=self.droprate, training=self.training)\n out = self.conv2(out)\n return torch.add(x if self.equalInOut else self.convShortcut(x), out)\n\n\nclass NetworkBlock(nn.Module):\n def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):\n super(NetworkBlock, self).__init__()\n self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)\n\n def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):\n layers = []\n for i in range(nb_layers):\n layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layer(x)\n\n\nclass WideResNet(nn.Module):\n def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):\n super(WideResNet, self).__init__()\n nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]\n assert (depth - 4) % 6 == 0, 'depth should be 6n+4'\n n = (depth - 4) // 6\n block = BasicBlock\n # 1st conv before any network block\n self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,\n padding=1, bias=False)\n # 1st block\n self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)\n # 2nd block\n self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)\n # 3rd block\n self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)\n # global average pooling and classifier\n self.bn1 = nn.BatchNorm2d(nChannels[3])\n self.relu = nn.ReLU(inplace=True)\n self.fc = nn.Linear(nChannels[3], num_classes)\n self.nChannels = nChannels[3]\n \n\n def forward(self, x):\n out = self.conv1(x)\n out = self.block1(out)\n out = self.block2(out)\n out = self.block3(out)\n out = self.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8)\n out = out.view(-1, self.nChannels)\n return self.fc(out)\n\ndef WRN16_8():\n return WideResNet(depth=16, num_classes=10, widen_factor=8, dropRate=0.3)" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.functional.avg_pool2d", "torch.nn.functional.dropout", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU" ] ]
ducnx/TPC-LoS-prediction
[ "49713f5bd7e77c2adb1ff950c885d087a398a1ad" ]
[ "models/hyperparameter_scripts/eICU/tpc_stage2.py" ]
[ "from eICU_preprocessing.split_train_test import create_folder\nfrom models.run_tpc import TPC\nimport numpy as np\nimport random\nfrom models.final_experiment_scripts.best_hyperparameters import best_global\nfrom models.initialise_arguments import initialise_tpc_arguments\n\n\ndef get_hyperparam_config(dataset):\n\n c = initialise_tpc_arguments()\n c['mode'] = 'train'\n c['exp_name'] = 'TPC'\n if dataset == 'MIMIC':\n c['no_diag'] = True\n c['dataset'] = dataset\n c['model_type'] = 'tpc'\n c = best_global(c)\n\n # hyper-parameter grid\n param_grid = {\n 'n_layers': [5, 6, 7, 8, 9, 10, 11, 12], # most promising range is 5-12 layers\n 'temp_kernels': list(int(x) for x in np.logspace(np.log2(4), np.log2(16), base=2, num=16)),\n 'point_sizes': list(int(x) for x in np.logspace(np.log2(4), np.log2(16), base=2, num=16)),\n 'learning_rate': list(np.logspace(np.log10(0.001), np.log10(0.01), base=10, num=100)),\n 'batch_size': list(int(x) for x in np.logspace(np.log2(4), np.log2(512), base=2, num=8)), # might have to search between 4 and 32 to avoid memory issues\n 'temp_dropout_rate': [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5],\n 'kernel_size': {#1: list(range(4, 25)),\n #2: [5, 6, 7, 8, 9, 10],\n #3: [3, 4, 5, 6, 7],\n #4: [2, 3, 4, 5, 6],\n 5: [2, 3, 4, 5],\n 6: [2, 3, 4, 5],\n 7: [2, 3, 4, 5],\n 8: [2, 3, 4, 5],\n 9: [3, 4],\n 10: [3, 4],\n 11: [3, 4],\n 12: [3, 4]}\n }\n\n c['n_layers'] = random.choice(param_grid['n_layers'])\n c['kernel_size'] = random.choice(param_grid['kernel_size'][c['n_layers']])\n c['temp_kernels'] = [random.choice(param_grid['temp_kernels'])] * c['n_layers']\n c['point_sizes'] = [random.choice(param_grid['point_sizes'])] * c['n_layers']\n c['learning_rate'] = round(random.choice(param_grid['learning_rate']), 5)\n c['batch_size'] = random.choice(param_grid['batch_size'])\n c['temp_dropout_rate'] = random.choice(param_grid['temp_dropout_rate'])\n\n return c\n\n\nif __name__=='__main__':\n\n\n for i in range(50):\n try:\n c = get_hyperparam_config('eICU')\n log_folder_path = create_folder('models/experiments/hyperparameters/eICU', c.exp_name)\n tpc = TPC(config=c,\n n_epochs=c.n_epochs,\n name=c.exp_name,\n base_dir=log_folder_path,\n explogger_kwargs={'folder_format': '%Y-%m-%d_%H%M%S{run_number}'})\n tpc.run()\n\n except RuntimeError:\n continue" ]
[ [ "numpy.log2", "numpy.log10" ] ]
mchatton/pyroomacoustics
[ "913b45a311634283fe28dc5d133b27b8b610627b" ]
[ "pyroomacoustics/tests/test_build_rir.py" ]
[ "from __future__ import division, print_function\nimport pyroomacoustics as pra\nimport numpy as np\n\ntry:\n from pyroomacoustics import build_rir\n build_rir_available = True\nexcept:\n print('build_rir not available')\n build_rir_available = False\n\n# tolerance for test success (1%)\ntol = 0.01\n\nfdl = 81\nfs = 16000\n\nt0 = (2 * fdl + 0.1) / fs\nt1 = (3 * fdl - 0.1) / fs\nt2 = (4 * fdl + 0.45) / fs\nt3 = (5 * fdl + 0.001) / fs\nt4 = (6 * fdl + 0.999) / fs\n\ntimes = np.array(\n [\n [ t0 , t1 + (1 / 40 / 16000), t2, ],\n [ t0, t1 + (10 / fs), 3 * t3, ],\n [ t0, t3, t4, ],\n ],\n )\nalphas = np.array(\n [\n [ 1., 0.5, -0.1 ],\n [ 0.5, 0.3, 0.1 ],\n [ 0.3, 2., 0.1 ],\n ],\n )\nvisibilities = np.array(\n [\n [ 1, 1, 1,],\n [ 1, 1, 1,],\n [ 0, 1, 1,],\n ],\n dtype=np.int32,\n )\n\ntime_short = np.array([0.])\ntime_long = np.array([0.1])\nalpha_dummy = np.array([1.])\nvisibility_dummy = np.array([1], dtype=np.int32)\n\n\ndef build_rir_wrap(time, alpha, visibility, fs, fdl):\n\n # fractional delay length\n fdl = pra.constants.get('frac_delay_length')\n fdl2 = (fdl-1) // 2\n\n # the number of samples needed\n N = int(np.ceil(time.max() * fs) + fdl)\n\n ir_ref = np.zeros(N)\n ir_cython = np.zeros(N)\n\n # Try to use the Cython extension\n build_rir.fast_rir_builder(ir_cython, time, alpha, visibility, fs, fdl)\n\n # fallback to pure Python implemenation\n for i in range(time.shape[0]):\n if visibility[i] == 1:\n time_ip = int(np.round(fs * time[i]))\n time_fp = (fs * time[i]) - time_ip\n ir_ref[time_ip-fdl2:time_ip+fdl2+1] += alpha[i] * pra.fractional_delay(time_fp)\n\n return ir_ref, ir_cython\n\ndef test_build_rir():\n\n if not build_rir_available:\n return\n\n for t, a, v in zip(times, alphas, visibilities):\n ir_ref, ir_cython = build_rir_wrap(times[0], alphas[0], visibilities[0], fs, fdl)\n assert np.max(np.abs(ir_ref - ir_cython)) < tol\n\ndef test_short():\n ''' Tests that an error is raised if a provided time goes below the zero index '''\n\n if not build_rir_available:\n return\n\n N = 100\n fs = 16000\n fdl = 81\n rir = np.zeros(N)\n\n time = np.array([0.])\n alpha = np.array([1.])\n visibility = np.array([1], dtype=np.int32)\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha, visibility, fs, fdl)\n assert False\n except AssertionError:\n print('Ok, short times are caught')\n\n\n\ndef test_long():\n ''' Tests that an error is raised if a time falls outside the rir array '''\n\n if not build_rir_available:\n return\n\n N = 100\n fs = 16000\n fdl = 81\n rir = np.zeros(N)\n\n time = np.array([(N-1) / fs])\n alpha = np.array([1.])\n visibility = np.array([1], dtype=np.int32)\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha, visibility, fs, fdl)\n assert False\n except AssertionError:\n print('Ok, long times are caught')\n\ndef test_errors():\n ''' Tests that errors are raised when array lengths differ '''\n\n if not build_rir_available:\n return\n\n N = 300\n fs = 16000\n fdl = 81\n rir = np.zeros(N)\n\n time = np.array([100 / fs, 200 / fs])\n alpha = np.array([1., 1.])\n visibility = np.array([1, 1], dtype=np.int32)\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha[:1], visibility, fs, fdl)\n assert False\n except:\n print('Ok, alpha error occured')\n pass\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha, visibility[:1], fs, fdl)\n assert False\n except:\n print('Ok, visibility error occured')\n pass\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha, visibility, fs, 80)\n assert False\n except:\n print('Ok, fdl error occured')\n pass\n\n\nif __name__ == '__main__':\n\n import matplotlib.pyplot as plt\n\n for t, a, v in zip(times, alphas, visibilities):\n ir_ref, ir_cython = build_rir_wrap(times[0], alphas[0], visibilities[0], fs, fdl)\n\n print('Error:', np.max(np.abs(ir_ref - ir_cython)))\n\n plt.figure()\n plt.plot(ir_ref, label='ref')\n plt.plot(ir_cython, label='cython')\n plt.legend()\n\n test_short()\n test_long()\n test_errors()\n\n plt.show()\n\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.abs", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.plot", "numpy.round" ] ]
sarikayamehmet/Framework-for-Actor-Critic-deep-reinforcement-learning-algorithms
[ "a2902f903956427074769b71b41ddc81e10276c3" ]
[ "A3C/environment/car_controller_environment.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport matplotlib\nmatplotlib.use('Agg',force=True) # no display\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom matplotlib.patches import Circle\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.lines import Line2D\n\nimport numpy as np\nfrom scipy import optimize\nfrom collections import deque\nfrom environment.environment import Environment\n\nimport options\nflags = options.get() # get command line args\n\t\nclass CarControllerEnvironment(Environment):\n\n\tdef get_state_shape(self):\n\t\t# There are 2 types of objects (obstacles and lanes), each object has 3 numbers (x, y and size)\n\t\tif self.max_obstacle_count > 0:\n\t\t\treturn (2,max(self.control_points_per_step,self.max_obstacle_count),3)\n\t\treturn (1,self.control_points_per_step,2) # no need for size because there are only lanes\n\n\tdef get_action_shape(self):\n\t\treturn (2,) # steering angle, continuous control without softmax\n\t\n\tdef __init__(self, thread_index):\n\t\tEnvironment.__init__(self)\n\t\tself.thread_index = thread_index\n\t\tself.max_step = 100\n\t\tself.control_points_per_step = 5\n\t\tself.mean_seconds_per_step = 0.1 # in average, a step every n seconds\n\t\tself.horizon_distance = 1 # meters\n\t\tself.max_distance_to_path = 0.1 # meters\n\t\t# obstacles related stuff\n\t\tself.max_obstacle_count = 3\n\t\tself.min_obstacle_radius = 0.15 # meters\n\t\tself.max_obstacle_radius = 0.45 # meters\n\t\t# information about speed parameters: http://www.ijtte.com/uploads/2012-10-01/5ebd8343-9b9c-b1d4IJTTE%20vol2%20no3%20%287%29.pdf\n\t\tself.min_speed = 0.1 # m/s\n\t\tself.max_speed = 1.4 # m/s\n\t\tself.speed_lower_limit = 0.7 # m/s # used together with max_speed to get the random speed upper limit\n\t\tself.max_speed_noise = 0.25 # m/s\n\t\t# the fastest car has max_acceleration 9.25 m/s (https://en.wikipedia.org/wiki/List_of_fastest_production_cars_by_acceleration)\n\t\t# the slowest car has max_acceleration 0.7 m/s (http://automdb.com/max_acceleration)\n\t\tself.max_acceleration = 0.7 # m/s\n\t\tself.max_steering_degree = 30\n\t\tself.max_steering_noise_degree = 2\n\t\tself.max_steering_angle = convert_degree_to_radiant(self.max_steering_degree)\n\t\tself.max_steering_noise_angle = convert_degree_to_radiant(self.max_steering_noise_degree)\n\t\t# splines related stuff\n\t\tself.spline_number = 2\n\t\tself.control_points_per_spline = 50\n\t\t# evaluator stuff\n\t\tself.episodes = deque()\n\t\t# shapes\n\t\tself.state_shape = self.get_state_shape()\n\t\tself.action_shape = self.get_action_shape()\n\t\n\tdef reset(self):\n\t\tself.step = 0\n\t\tself.seconds_per_step = self.get_step_seconds()\n\t\tself.path = self.build_random_path()\n\t\t# car position\n\t\tself.car_point = (0,0) # car point and orientation are always expressed with respect to the initial point and orientation of the road fragment\n\t\tself.car_progress, self.car_goal = self.get_position_and_goal(point=self.car_point)\n\t\tself.car_angle = self.get_angle_from_position(self.car_progress)\n\t\t# speed limit\n\t\tself.speed_upper_limit = self.speed_lower_limit + (self.max_speed-self.speed_lower_limit)*np.random.random() # in [speed_lower_limit,max_speed]\n\t\t# steering angle & speed\n\t\tself.speed = self.min_speed + (self.max_speed-self.min_speed)*np.random.random() # in [min_speed,max_speed]\n\t\tself.steering_angle = 0\n\t\t# get obstacles\n\t\tself.obstacles = self.get_new_obstacles()\n\t\t# init concat variables\n\t\tself.last_reward = 0\n\t\tself.last_state = self.get_state(car_point=self.car_point, car_angle=self.car_angle, car_progress=self.car_progress, car_goal=self.car_goal, obstacles=self.obstacles)\n\t\t# init log variables\n\t\tself.cumulative_reward = 0\n\t\tself.avg_speed_per_steps = 0\n\t\t\t\n\tdef get_new_obstacles(self):\n\t\tif self.max_obstacle_count <= 0:\n\t\t\treturn []\n\t\tobstacles = []\n\t\tpresence_mask = np.random.randint(2, size=self.max_obstacle_count)\n\t\tfor i in range(self.max_obstacle_count):\n\t\t\tif presence_mask[i] == 1: # obstacle is present\n\t\t\t\tpoint = self.get_point_from_position(self.spline_number*np.random.random())\n\t\t\t\tradius = self.min_obstacle_radius + (self.max_obstacle_radius-self.min_obstacle_radius)*np.random.random() # in [min_obstacle_radius,max_obstacle_radius]\n\t\t\t\tobstacles.append((point,radius))\n\t\treturn obstacles\n\t\t\n\tdef get_closest_obstacle(self, point, obstacles):\n\t\tif len(obstacles) == 0:\n\t\t\treturn None\n\t\tobstacle_distances_from_point = map(lambda obstacle: (obstacle, euclidean_distance(obstacle[0], point)-obstacle[1]), obstacles)\n\t\treturn min(obstacle_distances_from_point, key=lambda tup: tup[1])[0]\n\t\t\n\tdef get_point_from_position(self, position):\n\t\tspline = int(np.ceil(position)-1)\n\t\tif spline <= 0: # first spline \n\t\t\treturn (poly(position,self.U[0]), poly(position,self.V[0]))\n\t\t# second spline\n\t\treturn rotate_and_shift(poly(position-spline,self.U[spline]), poly(position-spline,self.V[spline]), self.middle_point[spline-1][0], self.middle_point[spline-1][1], self.theta[spline-1])\n\t\t\n\tdef get_angle_from_position(self, position):\n\t\tspline = int(np.ceil(position)-1)\n\t\tif spline <= 0: # first spline \n\t\t\treturn angle(position, self.U[0], self.V[0])\n\t\t# second spline\n\t\treturn angle(position-spline, self.U[spline], self.V[spline])+self.theta[spline-1]\n\t\t\n\tdef build_random_path(self):\n\t\t# setup environment\n\t\tself.U = []\n\t\tself.V = []\n\t\tself.theta = []\n\t\tself.middle_point = []\n\t\tfor i in range(self.spline_number):\n\t\t\tU, V = generate_random_polynomial()\n\t\t\tself.U.append(U)\n\t\t\tself.V.append(V)\n\t\t\tself.theta.append(angle(1, U, V))\n\t\t\tself.middle_point.append(self.get_point_from_position(i+1))\n\t\t# we generate all points for both polynomials, then we shall draw only a portion of them\n\t\tself.positions = np.linspace(start=0, stop=self.spline_number, num=self.spline_number*self.control_points_per_spline) # first spline is in [0,1] while the second one is in [1,2]\n\t\txy = [self.get_point_from_position(pos) for pos in self.positions]\n\t\treturn list(zip(*xy))\n\n\tdef is_terminal_position(self, position):\n\t\treturn position >= self.spline_number*0.9\n\n\tdef get_position_and_goal(self, point):\n\t\t# Find the closest spline point\n\t\tcar_closest_position = optimize.minimize_scalar(lambda pos: euclidean_distance(point, self.get_point_from_position(pos)), method='bounded', bounds=(0,self.spline_number))\n\t\tcar_position = car_closest_position.x\n\t\t# Find closest control point on horizon\n\t\tclosest_goal = optimize.minimize_scalar(lambda pos: np.absolute(euclidean_distance(point, self.get_point_from_position(pos))-self.horizon_distance), method='bounded', bounds=(car_position,self.spline_number))\n\t\tgoal = closest_goal.x\n\t\treturn car_position, goal\n\n\tdef move(self, point, angle, steering_angle, speed, add_noise=False):\n\t\t# add noise\n\t\tif add_noise:\n\t\t\tsteering_angle += (2*np.random.random()-1)*self.max_steering_noise_angle\n\t\t\tsteering_angle = np.clip(steering_angle, -self.max_steering_angle, self.max_steering_angle) # |steering_angle| <= max_steering_angle, ALWAYS\n\t\t\tspeed += (2*np.random.random()-1)*self.max_speed_noise\n\t\t# get new angle\n\t\tnew_angle = angle+steering_angle\n\t\t# move point\n\t\tx, y = point\n\t\tdir_x, dir_y = get_heading_vector(angle=new_angle, space=speed*self.seconds_per_step)\n\t\treturn (x+dir_x, y+dir_y), new_angle\n\n\tdef get_steering_angle_from_action(self, action): # action is in [-1,1]\n\t\treturn action*self.max_steering_angle # in [-max_steering_angle, max_steering_angle]\n\t\t\n\tdef get_acceleration_from_action(self, action): # action is in [-1,1]\n\t\treturn action*self.max_acceleration # in [-max_acceleration, max_acceleration]\n\t\t\n\tdef accelerate(self, speed, acceleration):\n\t\treturn np.clip(speed + acceleration*self.seconds_per_step, self.min_speed, self.max_speed)\n\t\t\n\tdef get_step_seconds(self):\n\t\treturn np.random.exponential(scale=self.mean_seconds_per_step)\n\n\tdef process(self, action_vector):\n\t\t# first of all, get the seconds passed from last step\n\t\tself.seconds_per_step = self.get_step_seconds()\n\t\t# compute new steering angle\n\t\tself.steering_angle = self.get_steering_angle_from_action(action=action_vector[0])\n\t\t# compute new acceleration\n\t\tself.acceleration = self.get_acceleration_from_action(action=action_vector[1])\n\t\t# compute new speed\n\t\tself.speed = self.accelerate(speed=self.speed, acceleration=self.acceleration)\n\t\t# move car\n\t\tself.car_point, self.car_angle = self.move(point=self.car_point, angle=self.car_angle, steering_angle=self.steering_angle, speed=self.speed, add_noise=True)\n\t\t# update position and direction\n\t\tcar_position, car_goal = self.get_position_and_goal(point=self.car_point)\n\t\t# compute perceived reward\n\t\treward, dead = self.get_reward(car_speed=self.speed, car_point=self.car_point, car_progress=self.car_progress, car_position=car_position, obstacles=self.obstacles)\n\t\tif car_position > self.car_progress: # is moving toward next position\n\t\t\tself.car_progress = car_position # progress update\n\t\t\tself.car_goal = car_goal\n\t\t# compute new state (after updating progress)\n\t\tstate = self.get_state(car_point=self.car_point, car_angle=self.car_angle, car_progress=self.car_progress, car_goal=self.car_goal, obstacles=self.obstacles)\n\t\t# update last action/state/reward\n\t\tself.last_state = state\n\t\tself.last_reward = reward\n\t\t# update cumulative reward\n\t\tself.cumulative_reward += reward\n\t\tself.avg_speed_per_steps += self.speed\n\t\t# update step\n\t\tself.step += 1\n\t\tterminal = dead or self.is_terminal_position(self.car_goal) or self.step >= self.max_step\n\t\tif terminal: # populate statistics\n\t\t\tstats = {\n\t\t\t\t\"avg_speed\": self.avg_speed_per_steps/self.step,\n\t\t\t\t\"reward\": self.cumulative_reward,\n\t\t\t\t\"step\": self.step,\n\t\t\t\t\"completed\": 1 if self.is_terminal_position(self.car_goal) else 0\n\t\t\t}\n\t\t\tif self.max_obstacle_count > 0:\n\t\t\t\tstats[\"hit\"] = 1 if dead else 0\n\t\t\tself.episodes.append(stats)\n\t\t\tif len(self.episodes) > flags.match_count_for_evaluation:\n\t\t\t\tself.episodes.popleft()\n\t\treturn state, reward, terminal\n\t\n\tdef get_concatenation_size(self):\n\t\treturn 4\n\t\t\n\tdef get_concatenation(self):\n\t\treturn [self.steering_angle, self.speed, self.seconds_per_step, self.speed_upper_limit]\n\t\t\n\tdef get_reward(self, car_speed, car_point, car_progress, car_position, obstacles):\n\t\tmax_distance_to_path = self.max_distance_to_path\n\t\tcar_projection_point = self.get_point_from_position(car_position)\n\t\tclosest_obstacle = self.get_closest_obstacle(point=car_projection_point, obstacles=obstacles)\n\t\tif closest_obstacle is not None:\n\t\t\tobstacle_point, obstacle_radius = closest_obstacle\n\t\t\tif euclidean_distance(obstacle_point, car_point) <= obstacle_radius: # collision\n\t\t\t\treturn (-1, True) # terminate episode\n\t\t\tif euclidean_distance(obstacle_point, car_projection_point) <= obstacle_radius: # could collide obstacle\n\t\t\t\tmax_distance_to_path += obstacle_radius\n\t\tif car_position > car_progress: # is moving toward next position\n\t\t\tdistance = euclidean_distance(car_point, car_projection_point)\n\t\t\tdistance_ratio = np.clip(distance/max_distance_to_path, 0,1) # always in [0,1]\n\t\t\tinverse_distance_ratio = 1 - distance_ratio\n\t\t\t# the more car_speed > self.speed_upper_limit, the bigger the malus\n\t\t\tmalus = self.speed_upper_limit*max(0,car_speed/self.speed_upper_limit-1)*self.seconds_per_step\n\t\t\t# smaller distances to path give higher rewards\n\t\t\tbonus = min(car_speed,self.speed_upper_limit)*self.seconds_per_step*inverse_distance_ratio\n\t\t\treturn (bonus-malus, False) # do not terminate episode\n\t\t# else is NOT moving toward next position\n\t\treturn (-0.1, False) # do not terminate episode\n\t\t\n\tdef get_state(self, car_point, car_angle, car_progress, car_goal, obstacles):\n\t\tstate = np.zeros(self.state_shape)\n\t\tcar_x, car_y = car_point\n\t\tcontrol_distance = (car_goal - car_progress)/self.control_points_per_step\n\t\t# add control points\n\t\tfor i in range(self.control_points_per_step):\n\t\t\tcp_x, cp_y = self.get_point_from_position(car_progress + (i+1)*control_distance)\n\t\t\trcp_x, rcp_y = shift_and_rotate(cp_x, cp_y, -car_x, -car_y, -car_angle) # get control point with coordinates relative to car point\n\t\t\tif self.max_obstacle_count > 0:\n\t\t\t\tstate[0][i] = (rcp_x, rcp_y, 0) # no collision with lanes\n\t\t\telse:\n\t\t\t\tstate[0][i] = (rcp_x, rcp_y)\n\t\t# add obstacles\n\t\tfor (j, obstacle) in enumerate(obstacles):\n\t\t\tobstacle_point, obstacle_radius = obstacle\n\t\t\tif euclidean_distance(obstacle_point,car_point) <= self.horizon_distance+obstacle_radius:\n\t\t\t\tro_x, ro_y = shift_and_rotate(obstacle_point[0], obstacle_point[1], -car_x, -car_y, -car_angle) # get control point with coordinates relative to car point\n\t\t\t\tstate[1][j] = (ro_x, ro_y, obstacle_radius)\n\t\treturn state\n\t\t\n\tdef get_screen(self): # RGB array\n\t\t# First set up the figure and the axis\n\t\t# fig, ax = matplotlib.pyplot.subplots(nrows=1, ncols=1, sharey=False, sharex=False, figsize=(10,10)) # this method causes memory leaks\n\t\tfigure = Figure(figsize=(5,5))\n\t\tcanvas = FigureCanvas(figure)\n\t\tax = figure.add_subplot(111) # nrows=1, ncols=1, index=1\n\t\t# [Obstacles]\n\t\tif len(self.obstacles) > 0:\n\t\t\tcircles = [Circle(point,radius,color='b') for (point,radius) in self.obstacles]\n\t\t\tpatch_collection = PatchCollection(circles, match_original=True)\n\t\t\tax.add_collection(patch_collection)\n\t\t# [Car]\n\t\tcar_x, car_y = self.car_point\n\t\tcar_handle = ax.scatter(car_x, car_y, marker='o', color='g', label='Car')\n\t\t# [Heading Vector]\n\t\tdir_x, dir_y = get_heading_vector(angle=self.car_angle)\n\t\theading_vector_handle, = ax.plot([car_x, car_x+dir_x],[car_y, car_y+dir_y], color='g', alpha=0.5, label='Heading Vector')\n\t\t# [Goal]\n\t\twaypoint_x, waypoint_y = self.get_point_from_position(self.car_goal)\n\t\tgoal_handle = ax.scatter(waypoint_x, waypoint_y, marker='o', color='r', label='Horizon')\n\t\t# [Path]\n\t\tpath_handle, = ax.plot(self.path[0], self.path[1], lw=2, alpha=0.5, label='Path')\n\t\t# Adjust ax limits in order to get the same scale factor on both x and y\n\t\ta,b = ax.get_xlim()\n\t\tc,d = ax.get_ylim()\n\t\tmax_length = max(d-c, b-a)\n\t\tax.set_xlim([a,a+max_length])\n\t\tax.set_ylim([c,c+max_length])\n\t\t# Build legend\n\t\thandles = [car_handle,heading_vector_handle,goal_handle,path_handle]\n\t\tif len(self.obstacles) > 0:\n\t\t\t# https://stackoverflow.com/questions/11423369/matplotlib-legend-circle-markers\n\t\t\thandles.append(Line2D(range(1), range(1), color=\"white\", marker='o', markerfacecolor=\"blue\", label='Obstacle'))\n\t\tax.legend(handles=handles)\n\t\t# Draw plot\n\t\tfigure.suptitle('[Speed]{0:.2f} m/s [Angle]{1:.2f} deg \\n [Limit]{3:.2f} m/s [Step]{2}'.format(self.speed,convert_radiant_to_degree(self.steering_angle), self.step, self.speed_upper_limit))\n\t\tcanvas.draw()\n\t\t# Save plot into RGB array\n\t\tdata = np.fromstring(figure.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n\t\tdata = data.reshape(figure.canvas.get_width_height()[::-1] + (3,))\n\t\treturn data # RGB array\n\t\t\n\tdef get_frame_info(self, network, value, action, reward, policy):\n\t\tstate_info = \"reward={}, speed={}, steering_angle={}, agent={}, value={}, policy={}\\n\".format(reward, self.speed, self.steering_angle, network.agent_id, value, policy)\n\t\tstate_info += \"state={}\\n\".format(self.last_state)\n\t\taction_info = \"action={}\\n\".format(action)\n\t\tframe_info = { \"log\": state_info + action_info }\n\t\tif flags.save_episode_screen:\n\t\t\tframe_info[\"screen\"] = { \"value\": self.get_screen(), \"type\": 'RGB' }\n\t\treturn frame_info\n\t\t\n\tdef get_statistics(self):\n\t\tresult = {}\n\t\tresult[\"avg_reward\"] = 0\n\t\tresult[\"avg_step\"] = 0\n\t\tresult[\"avg_speed\"] = 0\n\t\tresult[\"avg_completed\"] = 0\n\t\tif self.max_obstacle_count > 0:\n\t\t\tresult[\"avg_hit\"] = 0\n\t\tcount = len(self.episodes)\n\t\tif count>0:\n\t\t\tresult[\"avg_reward\"] = sum(e[\"reward\"] for e in self.episodes)/count\n\t\t\tresult[\"avg_step\"] = sum(e[\"step\"] for e in self.episodes)/count\n\t\t\tresult[\"avg_speed\"] = sum(e[\"avg_speed\"] for e in self.episodes)/count\n\t\t\tresult[\"avg_completed\"] = sum(e[\"completed\"] for e in self.episodes)/count\n\t\t\tif self.max_obstacle_count > 0:\n\t\t\t\tresult[\"avg_hit\"] = sum(e[\"hit\"] for e in self.episodes)/count\n\t\treturn result\n\t\t\ndef rotate(x,y,theta):\n\treturn (x*np.cos(theta)-y*np.sin(theta), x*np.sin(theta)+y*np.cos(theta))\n\ndef shift_and_rotate(xv,yv,dx,dy,theta):\n\treturn rotate(xv+dx,yv+dy,theta)\n\ndef rotate_and_shift(xv,yv,dx,dy,theta):\n\t(x,y) = rotate(xv,yv,theta)\n\treturn (x+dx,y+dy)\n\ndef generate_random_polynomial():\n\t#both x and y are defined by two polynomials in a third variable p, plus\n\t#an initial angle (that, when connecting splines, will be the same as\n\t#the final angle of the previous polynomial)\n\t#Both polynomials are third order.\n\t#The polynomial for x is aU, bU, cU, dU\n\t#The polynomial for y is aV, bV, cV, dV\n\t#aU and bU are always 0 (start at origin) and bV is always 0 (derivative at\n\t#origin is 0). bU must be positive\n\t# constraints initial coordinates must be the same as\n\t# ending coordinates of the previous polynomial\n\taU = 0\n\taV = 0\n\t# initial derivative must the same as the ending\n\t# derivative of the previous polynomial\n\tbU = (10-6)*np.random.random()+6 #around 8\n\tbV = 0\n\t#we randonmly generate values for cU and dU in the range ]-1,1[\n\tcU = 2*np.random.random()-1\n\tdU = 2*np.random.random()-1\n\tfinalV = 10*np.random.random()-5\n\t#final derivative between -pi/6 and pi/6\n\tfinald = np.tan((np.pi/3)*np.random.random() - np.pi/6)\n\t#now we fix parameters to meet the constraints:\n\t#bV + cV + dV = finalV \n\t#angle(1) = finald; see the definition of angle below\n\tUd = bU + 2*cU + 3*dU\n\t#Vd = bU + 2*cU + 3*dU = finald*Ud\n\tdV = finald*Ud - 2*finalV + bV\n\tcV = finalV - dV - bV\n\treturn ((aU,bU,cU,dU), (aV,bV,cV,dV))\n\ndef poly(p, points):\n\treturn points[0] + points[1]*p + points[2]*p**2 + points[3]*p**3\n\ndef derivative(p, points):\n\treturn points[1] + 2*points[2]*p + 3*points[3]*p**2\n\ndef angle(p, U, V):\n\tUd = derivative(p,U)\n\tVd = derivative(p,V)\n\treturn (np.arctan(Vd/Ud)) if abs(Ud) > abs(Vd/1000) else (np.pi/2)\n\t\ndef norm(angle):\n if angle >= np.pi:\n angle -= 2*np.pi\n elif angle < -np.pi:\n angle += 2*np.pi\n return angle\n\ndef convert_degree_to_radiant(degree):\n\treturn (degree/180)*np.pi\n\t\ndef convert_radiant_to_degree(radiant):\n\treturn radiant*(180/np.pi)\n\t\ndef get_heading_vector(angle, space=1):\n\treturn (space*np.cos(angle), space*np.sin(angle))\n\t\ndef euclidean_distance(a,b):\n\treturn np.sqrt(sum((j-k)**2 for (j,k) in zip(a,b)))" ]
[ [ "numpy.ceil", "numpy.zeros", "matplotlib.figure.Figure", "numpy.arctan", "matplotlib.collections.PatchCollection", "numpy.cos", "numpy.random.random", "matplotlib.patches.Circle", "numpy.clip", "numpy.random.exponential", "matplotlib.use", "numpy.sin", "matplotlib.backends.backend_agg.FigureCanvasAgg", "numpy.random.randint", "numpy.linspace" ] ]
gimlidc/igre
[ "bf3425e838cca3d1fa8254a2550ecb44774ee0ef" ]
[ "src/tftools/idx2pixel_layer.py" ]
[ "import tensorflow as tf\n\nglobal_visible = None\n\n\nclass Idx2PixelLayer(tf.keras.layers.Layer):\n\n def __init__(self, visible, trainable=False, shift_multi=1, **kwargs):\n \"\"\"\n :param visible: one dimension of visible image (for this dimension [x,y] will be computed)\n \"\"\"\n super(Idx2PixelLayer, self).__init__(**kwargs)\n self.visible = tf.constant(visible, dtype=tf.float32)\n global global_visible\n global_visible = self.visible\n\n def call(self, coords, **kwargs):\n return linear_interpolation(coords)\n\n\ndef reset_visible(stage_data):\n global global_visible\n global_visible = tf.constant(stage_data.copy(), dtype=tf.float32)\n\n\[email protected]_gradient\ndef linear_interpolation(coords):\n \"\"\"\n Calculate image pixel intensities from input coordinates by means of bilinear\n interpolation. Also calculate corresponding gradients for ANN training.\n\n 'Bottom-left', 'bottom-right', 'top-left', 'top-right' mean the four\n neighboring pixels closest to input coordinates. top/bottom corresponds to the\n first axis coordinate, right/left to the second. Coordinate values increase\n from left to right, top to bottom.\n\n\n top_left top_right\n mid_top\n X-----------------------------------X\n | . |\n | . |\n | . |\n | . x |\n mid_left X.......*...........................| mid_right\n | . |\n | . |\n | . |\n | . |\n | . |\n | . |\n | . |\n | . |\n X-------X---------------------------X\n mid_bottom\n bottom_left bottom_right\n \"\"\"\n # multiply gradient by factor to slow down learning of 'bias'\n grad_multiplier = tf.constant(1, dtype=tf.float32)\n visible = global_visible\n\n # ensure that the coordinates are in range [1, max-2] so we can take 2x2 neighbourhood of the coord in the Jacobian\n # TODO: We might do precheck outside this function\n\n # 0 - 400\n coords = tf.subtract(coords, 1)\n # -1 - 399\n coords = tf.math.mod(coords, tf.subtract(tf.cast(visible.shape.as_list()[:-1], dtype=tf.float32), 4))\n # 0 - (401-4) 397\n coords = tf.add(coords, 1)\n # 1 - 398\n # we can do coords -1 and +2 now\n\n # calculate index of top-left point\n idx_low = tf.floor(coords)\n\n # offset of input coordinates from top-left point\n delta = tf.cast(tf.subtract(coords, idx_low), dtype=tf.float32)\n # coords are the size of (batch, 2), delta as well\n\n top_left = tf.gather_nd(visible, tf.cast(idx_low, tf.int32))\n top_right = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [1, 0]), tf.int32))\n bottom_left = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [0, 1]), tf.int32))\n bottom_right = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [1, 1]), tf.int32))\n # these values are of size of [batch_size, input_dimensions]\n\n mid_bottom = tf.add(bottom_right, tf.einsum(\"i,ij->ij\", delta[:, 0], tf.subtract(bottom_left, bottom_right)))\n mid_top = tf.add(top_right, tf.einsum(\"i,ij->ij\", delta[:, 0], tf.subtract(top_left, top_right)))\n\n mid_left = tf.add(bottom_left, tf.einsum(\"i,ij->ij\", delta[:, 1], tf.subtract(top_left, bottom_left)))\n mid_right = tf.add(bottom_right, tf.einsum(\"i,ij->ij\", delta[:, 1], tf.subtract(top_right, bottom_right)))\n\n interpolation = tf.add(mid_bottom, tf.einsum(\"i,ij->ij\", delta[:, 1],\n tf.subtract(mid_top, mid_bottom)))\n\n def compute_2x2_jacobian():\n # This will produce Jacobian of size [batch_size, 2, input_dims]\n # Take bigger neighbourhood around the coord\n ttl = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [0, -1]), tf.int32))\n ttr = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [1, -1]), tf.int32))\n bbl = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [0, 2]), tf.int32))\n bbr = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [1, 2]), tf.int32))\n tll = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [-1, 0]), tf.int32))\n trr = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [2, 0]), tf.int32))\n bll = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [-1, 1]), tf.int32))\n brr = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [2, 1]), tf.int32))\n\n mid_bb = tf.add(bbr, tf.einsum(\"i,ij->ij\", delta[:, 0], tf.subtract(bbl, bbr)))\n mid_tt = tf.add(ttr, tf.einsum(\"i,ij->ij\", delta[:, 0], tf.subtract(ttl, ttr)))\n mid_ll = tf.add(bll, tf.einsum(\"i,ij->ij\", delta[:, 1], tf.subtract(tll, bll)))\n mid_rr = tf.add(brr, tf.einsum(\"i,ij->ij\", delta[:, 1], tf.subtract(trr, brr)))\n\n d_x_r = tf.subtract(mid_rr, mid_right)\n d_x_c = tf.subtract(mid_right, mid_left)\n d_x_l = tf.subtract(mid_left, mid_ll)\n d_y_t = tf.subtract(mid_top, mid_tt)\n d_y_c = tf.subtract(mid_bottom, mid_top)\n d_y_b = tf.subtract(mid_bb, mid_bottom)\n\n # Weighted average of the derivatives\n d_x = tf.multiply(tf.add(d_x_r, d_x_l), 0.5)\n d_x = tf.multiply(tf.add(d_x, d_x_c), 0.5)\n d_y = tf.multiply(tf.add(d_y_t, d_y_b), 0.5)\n d_y = tf.multiply(tf.add(d_y, d_y_c), 0.5)\n return d_x, d_y\n\n d_c_x, d_c_y = compute_2x2_jacobian()\n jacob = tf.stack([d_c_x, d_c_y], axis=1)\n\n def grad(dy):\n \"\"\" This method should return tensor of gradients [batch_size, 6]\"\"\"\n return tf.multiply(tf.einsum(\"ijk,ik->ij\", jacob, dy), grad_multiplier)\n\n coords_off_boundary = tf.greater(tf.cast(coords, dtype=tf.float32), tf.cast(visible.shape[:-1], dtype=tf.float32))\n boundary_condition = tf.logical_or(coords_off_boundary[:, 0], coords_off_boundary[:, 0])\n masked = tf.where(boundary_condition, tf.zeros(tf.shape(interpolation)), interpolation)\n\n return masked, grad\n" ]
[ [ "tensorflow.stack", "tensorflow.logical_or", "tensorflow.shape", "tensorflow.subtract", "tensorflow.add", "tensorflow.cast", "tensorflow.einsum", "tensorflow.floor", "tensorflow.constant" ] ]
alexlioralexli/diagnosing_qlearning
[ "20a4338a324c1bab79e6ca65937830529d941302" ]
[ "debugq/envs/env_suite.py" ]
[ "import numpy as np\nimport itertools\nimport random\nfrom debugq.envs import random_obs_wrapper, time_limit_wrapper, env_wrapper\nfrom rlutil.envs.tabular_cy import tabular_env\nfrom rlutil.envs.gridcraft import grid_env_cy\nfrom rlutil.envs.gridcraft import grid_spec_cy\nfrom rlutil.logging import log_utils\nfrom rlutil import math_utils\nfrom rlutil.envs.gridcraft.grid_spec_cy import TileType\n\n\ndef random_grid_env(size_x, size_y, dim_obs=32, time_limit=50, wall_ratio=0.1, smooth_obs=False, distance_reward=True,\n one_hot_obs=False,\n seed=None, absorb=False, tabular=False):\n total_size = size_x * size_y\n locations = list(itertools.product(range(size_x), range(size_y)))\n start_loc = (int(size_x/2), int(size_y/2))\n locations.remove(start_loc)\n\n with math_utils.np_seed(seed):\n # randomly place walls\n wall_locs = random.sample(locations, int(total_size*wall_ratio))\n [locations.remove(loc) for loc in wall_locs]\n\n cand_reward_locs = random.sample(locations, int(0.25 * total_size))\n # pick furthest one from center\n cand_reward_dists = [np.linalg.norm(np.array(reward_loc) - start_loc) for reward_loc in cand_reward_locs]\n furthest_reward = np.argmax(cand_reward_dists)\n reward_loc = cand_reward_locs[furthest_reward]\n locations.remove(cand_reward_locs[furthest_reward])\n\n gs = grid_spec_cy.spec_from_sparse_locations(size_x, size_y, {TileType.START: [start_loc],\n TileType.WALL: wall_locs,\n TileType.REWARD: [reward_loc]})\n\n if distance_reward:\n env = grid_env_cy.DistanceRewardGridEnv(gs, reward_loc[0], reward_loc[1], start_loc[0], start_loc[1])\n else:\n env = grid_env_cy.GridEnv(gs)\n env = env_wrapper.StochasticActionWrapper(env, eps=0.05)\n\n if absorb:\n env = env_wrapper.AbsorbingStateWrapper(env)\n if tabular:\n env = wrap_time(env, time_limit=time_limit)\n else:\n env = wrap_obs_time(env, time_limit=time_limit, one_hot_obs=one_hot_obs, dim_obs=dim_obs, smooth_obs=smooth_obs)\n return env\n\ndef wrap_obs_time(env, dim_obs=32, time_limit=50, smooth_obs=False, one_hot_obs=False):\n if smooth_obs:\n env = random_obs_wrapper.LocalObsWrapper(env, dim_obs=dim_obs)\n elif one_hot_obs:\n env = random_obs_wrapper.OneHotObsWrapper(env)\n else:\n env = random_obs_wrapper.RandomObsWrapper(env, dim_obs=dim_obs)\n env = time_limit_wrapper.TimeLimitWrapper(env, time_limit=time_limit)\n return env\n\ndef wrap_time(env, time_limit=50):\n return time_limit_wrapper.TimeLimitWrapper(env, time_limit=time_limit)\n\n# suite\nENV_KEYS = ['grid16randomobs', 'grid16onehot', 'grid64randomobs', 'grid64onehot', 'cliffwalk', 'pendulum', 'mountaincar', 'sparsegraph']\ndef get_env(name):\n if name == 'grid16randomobs':\n env = random_grid_env(16, 16, dim_obs=16, time_limit=50, wall_ratio=0.2, smooth_obs=False, seed=0)\n elif name == 'grid16onehot':\n env = random_grid_env(16, 16, time_limit=50, wall_ratio=0.2, one_hot_obs=True, seed=0)\n elif name == 'grid16sparse':\n env = random_grid_env(16, 16, time_limit=50, wall_ratio=0.2, one_hot_obs=True, seed=0, distance_reward=False)\n elif name == 'grid64randomobs':\n env = random_grid_env(64, 64, dim_obs=64, time_limit=100, wall_ratio=0.2, smooth_obs=False, seed=0)\n elif name == 'grid64onehot':\n env = random_grid_env(64, 64, time_limit=100, wall_ratio=0.2, one_hot_obs=True, seed=0)\n elif name == 'cliffwalk':\n with math_utils.np_seed(0):\n env = tabular_env.CliffwalkEnv(25)\n # Cliffwalk is unsolvable by QI with moderate entropy - up the reward to reduce the effects.\n env = env_wrapper.AbsorbingStateWrapper(env, absorb_reward=10.0)\n env = wrap_obs_time(env, dim_obs=16, time_limit=50)\n elif name == 'pendulum':\n env = tabular_env.InvertedPendulum(state_discretization=32, action_discretization=5)\n env = wrap_time(env, time_limit=50)\n elif name == 'mountaincar':\n env = tabular_env.MountainCar(posdisc=56, veldisc=32)\n # MountainCar is unsolvable by QI with moderate entropy - up the reward to reduce the effects.\n env = env_wrapper.AbsorbingStateWrapper(env, absorb_reward=10.0) \n env = wrap_time(env, time_limit=100)\n elif name == 'sparsegraph':\n with math_utils.np_seed(0):\n env = tabular_env.RandomTabularEnv(num_states=500, num_actions=3, transitions_per_action=1, self_loop=True)\n env = env_wrapper.AbsorbingStateWrapper(env, absorb_reward=10.0)\n env = wrap_obs_time(env, dim_obs=4, time_limit=10)\n else:\n raise NotImplementedError('Unknown env id: %s' % name)\n return env\n" ]
[ [ "numpy.array", "numpy.argmax" ] ]