repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
kuolunwang/DoorGym | [
"d9fbb67382756e659025b640857ede3a3735fb1d"
] | [
"a2c_ppo_acktr/envs.py"
] | [
"import os\nimport sys\n\nimport gym\nimport numpy as np\nimport torch\nfrom gym.spaces.box import Box\n\nfrom baselines import bench\nfrom baselines.common.atari_wrappers import make_atari, wrap_deepmind\nfrom baselines.common.vec_env.vec_env import \\\n VecEnvWrapper, VecEnv, CloudpickleWrapper, clear_mpi_env_vars\nfrom baselines.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom baselines.common.vec_env.shmem_vec_env import ShmemVecEnv, _subproc_worker\nfrom baselines.common.vec_env.vec_normalize import \\\n VecNormalize as VecNormalize_\nfrom baselines.common.vec_env.util import dict_to_obs, obs_space_info, obs_to_dict\n\nimport multiprocessing as mp\nimport ctypes\nfrom baselines import logger\n\n_NP_TO_CT = {np.float64: ctypes.c_double,\n np.float32: ctypes.c_float,\n np.int32: ctypes.c_int32,\n np.int8: ctypes.c_int8,\n np.uint8: ctypes.c_char,\n np.bool: ctypes.c_bool}\n\ntry:\n import dm_control2gym\nexcept ImportError:\n pass\n\ntry:\n import roboschool\nexcept ImportError:\n pass\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pass\n\n# Derived from\n# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/shmem_vec_env.py\nclass ShmemVecEnv_DR(ShmemVecEnv):\n def __init__(self, env_fns, spaces=None, context='spawn'):\n \"\"\"\n If you don't specify observation_space, we'll have to create a dummy\n environment to get it.\n \"\"\"\n\n ctx = mp.get_context(context)\n if spaces:\n observation_space, action_space = spaces\n else:\n logger.log('Creating dummy env object to get spaces')\n with logger.scoped_configure(format_strs=[]):\n dummy = env_fns[0]()\n observation_space, action_space = dummy.observation_space, dummy.action_space\n #dummy.close()\n try:\n self.visionnet_input = dummy.env.env.env.visionnet_input\n self.nn = dummy.env.env.env.nn\n self.xml_path = dummy.env.env.env.xml_path\n if dummy.env.env.env.unity:\n dummy.env.env.env.close() ## HACK>>>\n except Exception as e:\n print(e)\n pass\n del dummy\n\n VecEnv.__init__(self, len(env_fns), observation_space, action_space)\n self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space)\n self.obs_bufs = [\n {k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys}\n for _ in env_fns]\n self.parent_pipes = []\n self.procs = []\n with clear_mpi_env_vars():\n for env_fn, obs_buf in zip(env_fns, self.obs_bufs):\n wrapped_fn = CloudpickleWrapper(env_fn)\n parent_pipe, child_pipe = ctx.Pipe()\n proc = ctx.Process(target=_subproc_worker,\n args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))\n proc.daemon = True\n self.procs.append(proc)\n self.parent_pipes.append(parent_pipe)\n proc.start()\n child_pipe.close()\n self.waiting_step = False\n self.viewer = None\n\n def step_async(self, actions):\n assert len(actions) == len(self.parent_pipes)\n for pipe, act in zip(self.parent_pipes, actions):\n pipe.send(('step', act))\n\n def step_wait(self):\n outs = [pipe.recv() for pipe in self.parent_pipes]\n obs, rews, dones, infos = zip(*outs)\n return self._decode_obses(obs), np.array(rews), np.array(dones), infos\n\ndef make_env(env_id, seed, rank, log_dir, allow_early_resets, env_kwargs=None):\n def _thunk():\n if env_id.find(\"doorenv\")>-1:\n env = gym.make(env_id, **env_kwargs) #\n env._max_episode_steps = 512\n if env_kwargs['unity']:\n env.env.init(rank)\n elif env_id.find('Fetch')>-1:\n env = gym.make(env_id, reward_type=\"sparse\")\n env = gym.wrappers.FlattenDictWrapper(env, dict_keys=['observation', 'desired_goal'])\n else:\n if env_id.startswith(\"dm\"):\n _, domain, task = env_id.split('.')\n env = dm_control2gym.make(domain_name=domain, task_name=task)\n else:\n env = gym.make(env_id) \n env._max_episode_steps = 512\n is_atari = hasattr(gym.envs, 'atari') and isinstance(\n env.unwrapped, gym.envs.atari.atari_env.AtariEnv)\n if is_atari:\n env = make_atari(env_id)\n\n env.seed(seed + rank)\n\n obs_shape = env.observation_space.shape\n\n if str(env.__class__.__name__).find('TimeLimit') >= 0:\n env = TimeLimitMask(env)\n\n if log_dir is not None:\n env = bench.Monitor(\n env,\n os.path.join(log_dir, str(rank)),\n allow_early_resets=allow_early_resets)\n\n if is_atari:\n if len(env.observation_space.shape) == 3:\n env = wrap_deepmind(env)\n elif len(env.observation_space.shape) == 3:\n raise NotImplementedError(\n \"CNN models work only for atari,\\n\"\n \"please use a custom wrapper for a custom pixel input env.\\n\"\n \"See wrap_deepmind for an example.\")\n\n # If the input has shape (W,H,3), wrap for PyTorch convolutions\n obs_shape = env.observation_space.shape\n if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:\n env = TransposeImage(env, op=[2, 0, 1])\n\n return env\n\n return _thunk\n\ndef make_vec_envs(env_name,\n seed,\n num_processes,\n gamma,\n log_dir,\n device,\n allow_early_resets,\n num_frame_stack=None,\n env_kwargs=None,):\n envs = [\n make_env(env_name, seed, i, log_dir, allow_early_resets, env_kwargs)\n for i in range(num_processes)\n ]\n\n if len(envs) > 1:\n envs = ShmemVecEnv_DR(envs, context='fork')\n else:\n envs = DummyVecEnv(envs)\n\n if len(envs.observation_space.shape) == 1:\n if gamma is None:\n envs = VecNormalize(envs, ret=False)\n else:\n envs = VecNormalize(envs, gamma=gamma)\n\n envs = VecPyTorch(envs, device)\n\n if num_frame_stack is not None:\n envs = VecPyTorchFrameStack(envs, num_frame_stack, device)\n elif len(envs.observation_space.shape) == 3:\n envs = VecPyTorchFrameStack(envs, 4, device)\n\n return envs\n\n\n# Checks whether done was caused my timit limits or not\nclass TimeLimitMask(gym.Wrapper):\n def step(self, action):\n obs, rew, done, info = self.env.step(action)\n if done and self.env._max_episode_steps == self.env._elapsed_steps:\n info['bad_transition'] = True\n\n return obs, rew, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\n# Can be used to test recurrent policies for Reacher-v2\nclass MaskGoal(gym.ObservationWrapper):\n def observation(self, observation):\n if self.env._elapsed_steps > 0:\n observation[-2:] = 0\n return observation\n\n\nclass TransposeObs(gym.ObservationWrapper):\n def __init__(self, env=None):\n \"\"\"\n Transpose observation space (base class)\n \"\"\"\n super(TransposeObs, self).__init__(env)\n\n\nclass TransposeImage(TransposeObs):\n def __init__(self, env=None, op=[2, 0, 1]):\n \"\"\"\n Transpose observation space for images\n \"\"\"\n super(TransposeImage, self).__init__(env)\n assert len(op) == 3, f\"Error: Operation, {str(op)}, must be dim3\"\n self.op = op\n obs_shape = self.observation_space.shape\n self.observation_space = Box(\n self.observation_space.low[0, 0, 0],\n self.observation_space.high[0, 0, 0], [\n obs_shape[self.op[0]], obs_shape[self.op[1]],\n obs_shape[self.op[2]]\n ],\n dtype=self.observation_space.dtype)\n\n def observation(self, ob):\n return ob.transpose(self.op[0], self.op[1], self.op[2])\n\n\nclass VecPyTorch(VecEnvWrapper):\n def __init__(self, venv, device):\n \"\"\"Return only every `skip`-th frame\"\"\"\n super(VecPyTorch, self).__init__(venv)\n self.device = device\n # TODO: Fix data types\n\n def reset(self):\n obs = self.venv.reset()\n obs = torch.from_numpy(obs).float().to(self.device)\n return obs\n\n def step_async(self, actions):\n if isinstance(actions, torch.LongTensor):\n # Squeeze the dimension for discrete actions\n actions = actions.squeeze(1)\n actions = actions.cpu().numpy()\n self.venv.step_async(actions)\n\n def step_wait(self):\n obs, reward, done, info = self.venv.step_wait()\n obs = torch.from_numpy(obs).float().to(self.device)\n reward = torch.from_numpy(reward).unsqueeze(dim=1).float()\n return obs, reward, done, info\n\n\nclass VecNormalize(VecNormalize_):\n def __init__(self, *args, **kwargs):\n super(VecNormalize, self).__init__(*args, **kwargs)\n self.training = True\n\n def _obfilt(self, obs, update=True):\n obfilt = False\n if obfilt:\n if self.ob_rms:\n if self.training and update:\n self.ob_rms.update(obs)\n obs = np.clip((obs - self.ob_rms.mean) /\n np.sqrt(self.ob_rms.var + self.epsilon),\n -self.clipob, self.clipob)\n return obs\n return obs\n\n\n def train(self):\n self.training = True\n\n def eval(self):\n self.training = False\n\n\n# Derived from\n# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py\nclass VecPyTorchFrameStack(VecEnvWrapper):\n def __init__(self, venv, nstack, device=None):\n self.venv = venv\n self.nstack = nstack\n\n wos = venv.observation_space # wrapped ob space\n self.shape_dim0 = wos.shape[0]\n\n low = np.repeat(wos.low, self.nstack, axis=0)\n high = np.repeat(wos.high, self.nstack, axis=0)\n\n if device is None:\n device = torch.device('cpu')\n self.stacked_obs = torch.zeros((venv.num_envs, ) +\n low.shape).to(device)\n\n observation_space = gym.spaces.Box(\n low=low, high=high, dtype=venv.observation_space.dtype)\n VecEnvWrapper.__init__(self, venv, observation_space=observation_space)\n\n def step_wait(self):\n obs, rews, news, infos = self.venv.step_wait()\n self.stacked_obs[:, :-self.shape_dim0] = \\\n self.stacked_obs[:, self.shape_dim0:]\n for (i, new) in enumerate(news):\n if new:\n self.stacked_obs[i] = 0\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs, rews, news, infos\n\n def reset(self):\n obs = self.venv.reset()\n if torch.backends.cudnn.deterministic:\n self.stacked_obs = torch.zeros(self.stacked_obs.shape)\n else:\n self.stacked_obs.zero_()\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs\n\n def close(self):\n self.venv.close()\n"
] | [
[
"numpy.sqrt",
"torch.zeros",
"torch.from_numpy",
"numpy.prod",
"torch.device",
"numpy.repeat",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jakubzadrozny/pixelnerf | [
"989894044a7943c34ac0b29f431fc211d5837fd8"
] | [
"src/model/custom_encoder.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .. import util\n\n\nclass ConvEncoder(nn.Module):\n \"\"\"\n Basic, extremely simple convolutional encoder\n \"\"\"\n\n def __init__(\n self,\n dim_in=3,\n norm_layer=util.get_norm_layer(\"group\"),\n padding_type=\"reflect\",\n use_leaky_relu=True,\n use_skip_conn=True,\n ):\n super().__init__()\n self.dim_in = dim_in\n self.norm_layer = norm_layer\n self.activation = nn.LeakyReLU() if use_leaky_relu else nn.ReLU()\n self.padding_type = padding_type\n self.use_skip_conn = use_skip_conn\n\n # TODO: make these configurable\n first_layer_chnls = 64\n mid_layer_chnls = 128\n last_layer_chnls = 128\n n_down_layers = 3\n self.n_down_layers = n_down_layers\n\n self.conv_in = nn.Sequential(\n nn.Conv2d(dim_in, first_layer_chnls, kernel_size=7, stride=2, bias=False),\n norm_layer(first_layer_chnls),\n self.activation,\n )\n\n chnls = first_layer_chnls\n for i in range(0, n_down_layers):\n conv = nn.Sequential(\n nn.Conv2d(chnls, 2 * chnls, kernel_size=3, stride=2, bias=False),\n norm_layer(2 * chnls),\n self.activation,\n )\n setattr(self, \"conv\" + str(i), conv)\n\n deconv = nn.Sequential(\n nn.ConvTranspose2d(\n 4 * chnls, chnls, kernel_size=3, stride=2, bias=False\n ),\n norm_layer(chnls),\n self.activation,\n )\n setattr(self, \"deconv\" + str(i), deconv)\n chnls *= 2\n\n self.conv_mid = nn.Sequential(\n nn.Conv2d(chnls, mid_layer_chnls, kernel_size=4, stride=4, bias=False),\n norm_layer(mid_layer_chnls),\n self.activation,\n )\n\n self.deconv_last = nn.ConvTranspose2d(\n first_layer_chnls, last_layer_chnls, kernel_size=3, stride=2, bias=True\n )\n\n self.dims = [last_layer_chnls]\n\n def forward(self, x):\n x = util.same_pad_conv2d(x, padding_type=self.padding_type, layer=self.conv_in)\n x = self.conv_in(x)\n\n inters = []\n for i in range(0, self.n_down_layers):\n conv_i = getattr(self, \"conv\" + str(i))\n x = util.same_pad_conv2d(x, padding_type=self.padding_type, layer=conv_i)\n x = conv_i(x)\n inters.append(x)\n\n x = util.same_pad_conv2d(x, padding_type=self.padding_type, layer=self.conv_mid)\n x = self.conv_mid(x)\n x = x.reshape(x.shape[0], -1, 1, 1).expand(-1, -1, *inters[-1].shape[-2:])\n\n for i in reversed(range(0, self.n_down_layers)):\n if self.use_skip_conn:\n x = torch.cat((x, inters[i]), dim=1)\n deconv_i = getattr(self, \"deconv\" + str(i))\n x = deconv_i(x)\n x = util.same_unpad_deconv2d(x, layer=deconv_i)\n x = self.deconv_last(x)\n x = util.same_unpad_deconv2d(x, layer=self.deconv_last)\n return x\n"
] | [
[
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.LeakyReLU",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pjuangph/sagan-pytorch | [
"b766f0c53184cfc02b4220329585a4d59bbfb2c7"
] | [
"model.py"
] | [
"import torch\r\n\r\nfrom torch import nn\r\nfrom torch.nn import init\r\nfrom torch.nn import functional as F\r\n\r\nimport functools\r\nfrom torch.autograd import Variable\r\n\r\n\r\ndef init_linear(linear):\r\n init.xavier_uniform_(linear.weight)\r\n linear.bias.data.zero_()\r\n\r\n\r\ndef init_conv(conv, glu=True):\r\n init.xavier_uniform_(conv.weight)\r\n if conv.bias is not None:\r\n conv.bias.data.zero_()\r\n\r\n\r\nclass SpectralNorm:\r\n def __init__(self, name):\r\n self.name = name\r\n\r\n def compute_weight(self, module):\r\n weight = getattr(module, self.name + '_orig')\r\n u = getattr(module, self.name + '_u')\r\n size = weight.size()\r\n weight_mat = weight.contiguous().view(size[0], -1)\r\n with torch.no_grad():\r\n v = weight_mat.t() @ u\r\n v = v / v.norm()\r\n u = weight_mat @ v\r\n u = u / u.norm()\r\n sigma = u @ weight_mat @ v\r\n weight_sn = weight / sigma\r\n # weight_sn = weight_sn.view(*size)\r\n\r\n return weight_sn, u\r\n\r\n @staticmethod\r\n def apply(module, name):\r\n fn = SpectralNorm(name)\r\n\r\n weight = getattr(module, name)\r\n del module._parameters[name]\r\n module.register_parameter(name + '_orig', weight)\r\n input_size = weight.size(0)\r\n u = weight.new_empty(input_size).normal_()\r\n module.register_buffer(name, weight)\r\n module.register_buffer(name + '_u', u)\r\n\r\n module.register_forward_pre_hook(fn)\r\n\r\n return fn\r\n\r\n def __call__(self, module, input):\r\n weight_sn, u = self.compute_weight(module)\r\n setattr(module, self.name, weight_sn)\r\n setattr(module, self.name + '_u', u)\r\n\r\n\r\ndef spectral_norm(module, name='weight'):\r\n SpectralNorm.apply(module, name)\r\n\r\n return module\r\n\r\n\r\ndef spectral_init(module, gain=1):\r\n init.kaiming_uniform_(module.weight, gain)\r\n if module.bias is not None:\r\n module.bias.data.zero_()\r\n\r\n return spectral_norm(module)\r\n\r\n\r\ndef leaky_relu(input):\r\n return F.leaky_relu(input, negative_slope=0.2)\r\n\r\n\r\nclass SelfAttention(nn.Module):\r\n def __init__(self, in_channel, gain=1):\r\n super().__init__()\r\n\r\n self.query = spectral_init(nn.Conv1d(in_channel, in_channel // 8, 1),\r\n gain=gain)\r\n self.key = spectral_init(nn.Conv1d(in_channel, in_channel // 8, 1),\r\n gain=gain)\r\n self.value = spectral_init(nn.Conv1d(in_channel, in_channel, 1),\r\n gain=gain)\r\n\r\n self.gamma = nn.Parameter(torch.tensor(0.0))\r\n\r\n def forward(self, input):\r\n shape = input.shape\r\n flatten = input.view(shape[0], shape[1], -1)\r\n query = self.query(flatten).permute(0, 2, 1)\r\n key = self.key(flatten)\r\n value = self.value(flatten)\r\n query_key = torch.bmm(query, key)\r\n attn = F.softmax(query_key, 1)\r\n attn = torch.bmm(value, attn)\r\n attn = attn.view(*shape)\r\n out = self.gamma * attn + input\r\n\r\n return out\r\n\r\n\r\nclass ConditionalNorm(nn.Module):\r\n def __init__(self, in_channel, n_class):\r\n super().__init__()\r\n\r\n self.bn = nn.BatchNorm2d(in_channel, affine=False)\r\n self.embed = nn.Embedding(n_class, in_channel * 2)\r\n self.embed.weight.data[:, :in_channel] = 1\r\n self.embed.weight.data[:, in_channel:] = 0\r\n\r\n def forward(self, input, class_id):\r\n out = self.bn(input)\r\n embed = self.embed(class_id)\r\n gamma, beta = embed.chunk(2, 1)\r\n gamma = gamma.unsqueeze(2).unsqueeze(3)\r\n beta = beta.unsqueeze(2).unsqueeze(3)\r\n out = gamma * out + beta\r\n\r\n return out\r\n\r\n\r\nclass ConvBlock(nn.Module):\r\n def __init__(self, in_channel, out_channel, kernel_size=[3, 3],\r\n padding=1, stride=1, n_class=None, bn=True,\r\n activation=F.relu, upsample=True, self_attention=False):\r\n super().__init__()\r\n\r\n self.conv = spectral_init(nn.Conv2d(in_channel, out_channel,\r\n kernel_size, stride, padding,\r\n bias=False if bn else True))\r\n\r\n self.upsample = upsample\r\n self.activation = activation\r\n self.bn = bn\r\n if bn:\r\n self.norm = ConditionalNorm(out_channel, n_class)\r\n\r\n self.self_attention = self_attention\r\n if self_attention:\r\n self.attention = SelfAttention(out_channel, 1)\r\n\r\n def forward(self, input, class_id=None):\r\n out = input\r\n if self.upsample:\r\n out = F.interpolate(out, scale_factor=2) # upsample\r\n\r\n out = self.conv(out)\r\n\r\n if self.bn:\r\n out = self.norm(out, class_id)\r\n\r\n if self.activation is not None:\r\n out = self.activation(out)\r\n\r\n if self.self_attention:\r\n out = self.attention(out)\r\n\r\n return out\r\n\r\n\r\nclass Generator(nn.Module):\r\n def __init__(self, att=True, image_size=28, n_class=10, image_channels=3):\r\n \"\"\"Generates an image\r\n\r\n Args:\r\n att (bool, optional): Include attention. Defaults to True.\r\n image_size (int, optional): Pixels (HxW) of the square image. Defaults to 28.\r\n n_class (int, optional): Number of classes (dog, cat, bird). Defaults to 10.\r\n image_channels (int, optional): 1 for Grayscale, 3 for RGB. Defaults to 3.\r\n \"\"\"\r\n super().__init__()\r\n\r\n self.lin_code = spectral_init(nn.Linear(image_size, 4 * 4 * 512))\r\n self.conv = nn.ModuleList([ConvBlock(512, 512, n_class=n_class),\r\n ConvBlock(512, 512, n_class=n_class),\r\n ConvBlock(512, 512, n_class=n_class,\r\n self_attention=att),\r\n ConvBlock(512, 256, n_class=n_class),\r\n ConvBlock(256, 128, n_class=n_class)])\r\n\r\n self.colorize = spectral_init(nn.Conv2d(128, image_channels, [3, 3], padding=1))\r\n\r\n def forward(self, input:torch.Tensor, class_id):\r\n \"\"\"Generates an image from a random input and class_id\r\n\r\n Args:\r\n input (torch.Tensor): random image as input\r\n class_id (torch.Tensor): tensor of integers representing a class \r\n\r\n Returns:\r\n [type]: [description]\r\n \"\"\"\r\n out = self.lin_code(input)\r\n out = F.relu(out)\r\n out = out.view(-1, 512, 4, 4)\r\n\r\n for conv in self.conv: # Use module list because we need to pass a class_id into each one of them\r\n out = conv(out, class_id)\r\n\r\n out = self.colorize(out)\r\n\r\n return torch.tanh(out)\r\n\r\n\r\nclass Discriminator(nn.Module):\r\n def __init__(self, n_class=10):\r\n super().__init__()\r\n\r\n def conv(in_channel, out_channel, stride=2,\r\n self_attention=False):\r\n return ConvBlock(in_channel, out_channel, stride=stride,\r\n bn=False, activation=leaky_relu,\r\n upsample=False, self_attention=self_attention)\r\n\r\n self.conv = nn.Sequential(conv(3, 128),\r\n conv(128, 256),\r\n conv(256, 512, stride=1,\r\n self_attention=True),\r\n conv(512, 512),\r\n conv(512, 512),\r\n conv(512, 512))\r\n\r\n self.linear = spectral_init(nn.Linear(512, 1))\r\n\r\n self.embed = nn.Embedding(n_class, 512)\r\n self.embed.weight.data.uniform_(-0.1, 0.1)\r\n self.embed = spectral_norm(self.embed)\r\n\r\n def forward(self, input, class_id):\r\n out = self.conv(input)\r\n out = out.view(out.size(0), out.size(1), -1)\r\n out = out.sum(2)\r\n out_linear = self.linear(out).squeeze(1)\r\n embed = self.embed(class_id)\r\n prod = (out * embed).sum(1)\r\n\r\n return out_linear + prod\r\n"
] | [
[
"torch.nn.functional.softmax",
"torch.nn.Conv2d",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.init.kaiming_uniform_",
"torch.tanh",
"torch.nn.Conv1d",
"torch.nn.functional.relu",
"torch.no_grad",
"torch.bmm",
"torch.nn.functional.leaky_relu",
"torch.nn.init.xavier_uniform_",
"torch.nn.BatchNorm2d",
"torch.nn.functional.interpolate",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
messwith/phazes | [
"34b67292e6feaa95428fe68a5ceb29c9862e21d4"
] | [
"prototype_test.py"
] | [
"import numpy\nfrom prototype import Changer\n\n\ndef test_changer():\n changer = Changer(0.5, 1, 1)\n matrix = numpy.array([[0, 0, 0]])\n changer.change(matrix)\n assert matrix[0, 2] == 0\n changer.change(matrix)\n assert matrix[0, 2] == -1\n changer.change(matrix)\n assert matrix[0, 2] == 0\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
d04943016/ColorScience | [
"b874d70c217249ec47a6017b47c5e3ca2008a6a8"
] | [
"Help/myNumericalIntegration.py"
] | [
"#!/usr/bin/env python3\r\n# Copyright (c) 2018 Wei-Kai Lee. All rights reserved\r\n\r\n# coding=utf-8\r\n# -*- coding: utf8 -*-\r\n\r\n\r\nimport numpy as np\r\n\r\ndef dx(x):\r\n return x[1:]-x[0:x.size-1]\r\ndef yave(y):\r\n xszie = y.shape[-1]\r\n return ( y[...,1::]+y[...,0:(xszie-1):] )/2\r\ndef myNumericalIntegration(x,y):\r\n \"\"\"\r\n myNumericalIntegration is a function to calculate the area of \r\n a y = function(x) by trapezoid method.\r\n x must be ascending.\r\n\r\n >>> x = np.linspace(0,1,100, dtype=np.float64) \r\n >>> y = x**2\r\n >>> value = myNumericalIntegration(x,y)\r\n >>> print(value)\r\n 0.33335033840084355\r\n >>> x = np.linspace(0,1,1000, dtype=np.float64) \r\n >>> y = x**2\r\n >>> value = myNumericalIntegration(x,y)\r\n >>> print(value)\r\n 0.3333335003338339\r\n >>> x = np.linspace(0,1,10000, dtype=np.float64) \r\n >>> y = x**2\r\n >>> value = myNumericalIntegration(x,y)\r\n >>> print(value)\r\n 0.3333333350003337\r\n \"\"\"\r\n x = np.array(x)\r\n y = np.array(y)\r\n return np.einsum('...i,i->...',yave(y),dx(x))\r\ndef yave2(y):\r\n xszie = y.shape[0]\r\n return ( y[1::,...]+y[0:(xszie-1):,...] )/2\r\ndef myNumericalIntegration2(x,y):\r\n x = np.array(x)\r\n y = np.array(y)\r\n return np.einsum('i...,i->...',yave2(y),dx(x))\r\ndef myTRAPEZOIDAL(fun,x0,x1,xPts=200):\r\n \"\"\"\r\n myTRAPEZOIDAL is a function to calclate the integration of function f from \r\n x0 to x1 with equal spacing. (xPts: points of x)\r\n\r\n >>> import numpy as np\r\n >>> f = lambda x: [np.sin(x), np.cos(x)]\r\n >>> Int, xList, yListList = myTRAPEZOIDAL(f,0,np.pi)\r\n >>> print(Int)\r\n [1.99995846e+00 9.02056208e-17]\r\n >>> Int, xList, yListList = myTRAPEZOIDAL(f,0,np.pi,xPts=300)\r\n >>> print(Int)\r\n [1.99998160e+00 2.68882139e-16]\r\n >>> Int, xList, yListList = myTRAPEZOIDAL(f,0,np.pi,xPts=400)\r\n >>> print(Int)\r\n [1.99998967e+00 1.02348685e-16]\r\n >>> Int, xList, yListList = myTRAPEZOIDAL(f,0,np.pi,xPts=1000)\r\n >>> print(Int)\r\n [ 1.99999835e+00 -8.96418356e-16]\r\n \"\"\"\r\n xList = np.linspace(x0, x1, num=int(xPts) )\r\n data = np.array(fun(xList))\r\n Int = myNumericalIntegration(xList,data)\r\n return Int, xList, data\r\ndef myFunIntegration(f, x0, x1, tol=1e-5, recursiveLim=1e4, xCountStart=100, intfun=myTRAPEZOIDAL):\r\n \"\"\"\r\n >>> import numpy as np\r\n >>> f = lambda x: [np.sin(x), np.cos(x)]\r\n >>> Sn, err, nodes, count = myFunIntegration(f, 0, np.pi, tol=1e-5)\r\n >>> print(Sn)\r\n [1.99999934e+00 2.22044605e-16]\r\n >>> print(err)\r\n 2.781470994472901e-06\r\n >>> print(nodes.size)\r\n 17\r\n >>> print(count)\r\n 8\r\n >>> f = lambda x: [ x**2, x**3]\r\n >>> Sn, err, nodes, count = myFunIntegration(f, -2, 2, tol=1e-5)\r\n >>> print(Sn)\r\n [5.333334 0. ]\r\n >>> print(err)\r\n 4.433938502017287e-06\r\n >>> print(count)\r\n 24\r\n >>> print(nodes.size)\r\n 49\r\n >>> f = lambda x: [ np.exp(x), np.exp(x**2)]\r\n >>> Sn, err, nodes, count = myFunIntegration(f, -2, 2, tol=1e-5)\r\n >>> print(Sn)\r\n [ 7.25372109 32.90525734]\r\n >>> print(err)\r\n 5.414152568605779e-06\r\n >>> print(count)\r\n 68\r\n >>> print(nodes.size)\r\n 137\r\n \"\"\"\r\n S, xList, yListList = intfun(f, x0, x1, xPts=xCountStart)\r\n Sn, err, nodes, count = recursive_integration1(f, x0, x1, S, tol=tol, recursiveLim=recursiveLim, xCountStart=xCountStart, intfun=intfun)\r\n return Sn, err, nodes, count\r\n \r\n # S, xList, yListList = intfun(f, x0, x1, xPts=xCountStart)\r\n # Sn, err, xList, count = recursive_integration2(f, x0, x1, S, xList, tol=tol, recursiveLim=recursiveLim)\r\n # return Sn, err, count\r\n\r\n # S, xList, yListList = intfun(f, x0, x1, xPts=xCountStart)\r\n # Sn, err, xListNew, yListListNew, count, dxmin = recursive_integration3(f, S, xList, yListList, tol=tol, recursiveLim=recursiveLim, xCountStart=xCountStart, dxMin=dxMin)\r\n # return Sn, err, xListNew, count\r\ndef recursive_integration1(f, x0, x1, S, tol=1e-3, recursiveLim=1e4, xCountStart=100, intfun=myTRAPEZOIDAL):\r\n \"\"\" \r\n f: function of f(x)\r\n [a,b] : the interval of integration\r\n S : the previous integration result\r\n tol : the tolerance\r\n \r\n This is a subfunction of adapt_simpson.\r\n \"\"\"\r\n xc = float(x0+x1)/2\r\n SL, xListL, dataL = intfun(f,x0,xc,xPts=xCountStart)\r\n SR, xListR, dataR = intfun(f,xc,x1,xPts=xCountStart)\r\n Sn = SL+SR\r\n err = max( np.abs(Sn-S) )\r\n if err <= tol or recursiveLim==1:\r\n nodes = np.array([x0,xc,x1])\r\n count = 1\r\n return Sn, err, nodes, count\r\n fac = 0.5\r\n SL, err1, nodes1, countL = recursive_integration1(f, x0, xc, SL, tol=tol*fac, recursiveLim=recursiveLim-1, xCountStart=xCountStart, intfun=intfun)\r\n SR, err2, nodes2, countR = recursive_integration1(f, xc, x1, SR, tol=tol*(1-fac), recursiveLim=recursiveLim-1, xCountStart=xCountStart, intfun=intfun)\r\n err = err1 + err2\r\n nodes = np.append(nodes1, nodes2[1::])\r\n count = countL+1 if countL>=countR else countR+1 # countL+countR # countL+1 if countL>=countR else countR+1\r\n Sn = SL+SR\r\n return Sn, err, nodes, count\r\ndef myMidpointList_Integration2(f, xList, S):\r\n # Mid point\r\n xList = np.array(xList)\r\n xList2 = (xList[0:xList.size-1]+xList[1:xList.size])/2\r\n data = f(xList2)\r\n # Sum\r\n temptsum = np.einsum('...i,i->...', data, dx(xList) ) \r\n Sn = (S+temptsum)/2\r\n # Merge List\r\n xListNew = np.zeros( xList.size+xList2.size, dtype=xList.dtype )\r\n xListNew[0::2] = xList\r\n xListNew[1::2] = xList2\r\n return Sn, xListNew\r\ndef recursive_integration2(f, x0, x1, S, xList, tol=1e-5, recursiveLim=1e4):\r\n \"\"\" \r\n f: function of f(x)\r\n [a,b] : the interval of integration\r\n S : the previous integration result\r\n tol : the tolerance\r\n \r\n This is a subfunction of adapt_simpson.\r\n \"\"\"\r\n Sn, xListNew = myMidpointList_Integration2(f, xList, S)\r\n err = max( np.abs(Sn-S) )\r\n if err <= tol or recursiveLim==1:\r\n count = 1\r\n return Sn, err, xListNew, count\r\n Sn, err, xListNew, count = recursive_integration2(f, x0, x1, Sn, xList=xListNew, tol=tol, recursiveLim=recursiveLim-1)\r\n count = count + 1\r\n return Sn, err, xListNew, count\r\ndef myMidpointList_Integration3(f, xList, yListList):\r\n # Mid point\r\n xList = np.array(xList)\r\n xList2 = (xList[0:xList.size-1]+xList[1:xList.size])/2\r\n data = f(xList2)\r\n # Sum\r\n temptsum = np.einsum('...i,i->...', data, dx(xList) ) \r\n # Merge x List\r\n xListNew = np.zeros( xList.size+xList2.size, dtype=xList.dtype )\r\n xListNew[0::2] = xList\r\n xListNew[1::2] = xList2\r\n # Merge y List\r\n yListListNew = np.zeros( (yListList.shape[0], xListNew.size) , dtype=yListList.dtype)\r\n yListListNew[:,0::2] = yListList\r\n yListListNew[:,1::2] = data\r\n # Sum\r\n Sn = myNumericalIntegration(xListNew,yListListNew)\r\n return Sn, xListNew, yListListNew\r\ndef recursive_integration3(f, S, xList, yListList, tol=1e-5, recursiveLim=1e4, xCountStart=100, dxMin=None):\r\n \"\"\" \r\n f: function of f(x)\r\n [a,b] : the interval of integration\r\n S : the previous integration result\r\n tol : the tolerance\r\n \r\n This is a subfunction of adapt_simpson.\r\n \"\"\"\r\n indMid = int(len(xList)/2)\r\n SL, xListL, yListListL = myMidpointList_Integration3(f, xList[:indMid+1:], yListList[:,:indMid+1:] )\r\n SR, xListR, yListListR = myMidpointList_Integration3(f, xList[indMid::], yListList[:,indMid::] )\r\n Sn = SL+SR\r\n err = max( np.abs(Sn-S) )\r\n # End Case\r\n xListNew = np.append( xListL, xListR[1::] )\r\n dxmin = min(dx(xListNew))\r\n if err <= tol or recursiveLim==1 or dxMin==None or dxmin<dxMin:\r\n count = 1\r\n yListListNew = np.append( yListListL, yListListR[:,1::] ) \r\n return Sn, err, xListNew, yListListNew, count, dxmin\r\n # Iterative Case\r\n sL, sR = np.sum(SL), np.sum(SR)\r\n fac = sL/(sL+sR) if (sL+sR)!=0 else 0.5\r\n SL, errL, xListL, yListListL, countL, dxL = recursive_integration3(f, SL, xList=xListL, yListList=yListListL, tol=tol*fac, recursiveLim=recursiveLim-1, dxMin=dxMin)\r\n SR, errR, xListR, yListListR, countR, dxR = recursive_integration3(f, SR, xList=xListR, yListList=yListListR, tol=tol*(1-fac), recursiveLim=recursiveLim-1, dxMin=dxMin)\r\n Sn = SL+SR\r\n err = errL + errR\r\n count = countL+1 if countL>countR else countR+1 #countL+countR # countL+1 if countL>countR else countR+1\r\n xListNew = np.append( xListL, xListR[1::] )\r\n dxmin = dxL if dxL<dxR else dxR\r\n yListListNew = np.append( yListListL, yListListR[:,1::] ) \r\n return Sn, err, xListNew, yListListNew, count, dxmin\r\n\r\nif __name__ == '__main__':\r\n import doctest\r\n doctest.testmod()\r\n \"\"\"\r\n x = []\r\n y = []\r\n value = myNumericalIntegration(x,y)\r\n \"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"numpy.abs",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shettyprithvi/scattertext | [
"a15613b6feef3ddc56c03aadb8e1e629d28a427d",
"a15613b6feef3ddc56c03aadb8e1e629d28a427d",
"a15613b6feef3ddc56c03aadb8e1e629d28a427d"
] | [
"scattertext/termscoring/CohensDCalculator.py",
"scattertext/test/test_scaledFScore.py",
"scattertext/termcompaction/CompactTerms.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\n\n\nclass CohensDCalculator(object):\n def get_cohens_d_df(self, cat_X, ncat_X, correction_method=None):\n empty_cat_X_smoothing_doc = np.zeros((1, cat_X.shape[1]))\n empty_ncat_X_smoothing_doc = np.zeros((1, ncat_X.shape[1]))\n smoothed_cat_X = np.vstack([empty_cat_X_smoothing_doc, cat_X])\n smoothed_ncat_X = np.vstack([empty_ncat_X_smoothing_doc, ncat_X])\n n1, n2 = float(smoothed_cat_X.shape[0]), float(smoothed_ncat_X.shape[0])\n n = n1 + n2\n #print(cat_X.shape, type(cat_X))\n m1 = cat_X.mean(axis=0).A1 if type(cat_X) == np.matrix else cat_X.mean(axis=0)\n m2 = ncat_X.mean(axis=0).A1 if type(ncat_X) == np.matrix else ncat_X.mean(axis=0)\n v1 = smoothed_cat_X.var(axis=0).A1 if type(smoothed_cat_X) == np.matrix else smoothed_cat_X.mean(axis=0)\n v2 = smoothed_ncat_X.var(axis=0).A1 if type(smoothed_ncat_X) == np.matrix else smoothed_ncat_X.mean(axis=0)\n s_pooled = np.sqrt(((n2 - 1) * v2 + (n1 - 1) * v1) / (n - 2.))\n cohens_d = (m1 - m2) / s_pooled\n cohens_d_se = np.sqrt(((n - 1.) / (n - 3)) * (4. / n) * (1 + np.square(cohens_d) / 8.))\n cohens_d_z = cohens_d / cohens_d_se\n cohens_d_p = norm.sf(cohens_d_z)\n hedges_r = cohens_d * (1 - 3. / ((4. * (n - 2)) - 1))\n hedges_r_se = np.sqrt(n / (n1 * n2) + np.square(hedges_r) / (n - 2.))\n hedges_r_z = hedges_r / hedges_r_se\n hedges_r_p = norm.sf(hedges_r_z)\n score_df = pd.DataFrame({\n 'cohens_d': cohens_d,\n 'cohens_d_se': cohens_d_se,\n 'cohens_d_z': cohens_d_z,\n 'cohens_d_p': cohens_d_p,\n 'hedges_r': hedges_r,\n 'hedges_r_se': hedges_r_se,\n 'hedges_r_z': hedges_r_z,\n 'hedges_r_p': hedges_r_p,\n 'm1': m1,\n 'm2': m2,\n }).fillna(0)\n if correction_method is not None:\n from statsmodels.stats.multitest import multipletests\n score_df['hedges_r_p_corr'] = 0.5\n for method in ['cohens_d', 'hedges_r']:\n score_df[method + '_p_corr'] = 0.5\n pvals = score_df.loc[(score_df['m1'] != 0) | (score_df['m2'] != 0), method + '_p']\n pvals = np.min(np.array([pvals, 1. - pvals])) * 2.\n score_df.loc[(score_df['m1'] != 0) | (score_df['m2'] != 0), method + '_p_corr'] = (\n multipletests(pvals, method=correction_method)[1]\n )\n return score_df",
"from unittest import TestCase\n\nimport numpy as np\n\nfrom scattertext.termscoring.ScaledFScore import ScaledFScore, ScaledFScorePresets\n\n\nclass TestScaledFScore(TestCase):\n def test_get_scores(self):\n cat_counts, not_cat_counts = self._get_counts()\n scores = ScaledFScore.get_scores(cat_counts, not_cat_counts, beta=1.)\n np.testing.assert_almost_equal(scores,\n np.array([0.2689108, 0., 0.2689108, 0.1266617, 1.,\n 0.5, 0.5590517, 0.5, 0.5, 0.5720015]))\n\n def test_get_scores_zero_all_same(self):\n cat_counts = np.array([0, 0, 0, 0, 0, 0, 1, 2])\n not_cat_counts = np.array([1, 1, 2, 1, 1, 1, 1, 2])\n scores = ScaledFScore.get_scores(cat_counts, not_cat_counts)\n np.testing.assert_almost_equal(scores, [0.5, 0.5, 0, 0.5, 0.5, 0.5, 0.5, 1.])\n\n def test_score_difference(self):\n cat_counts = np.array([0, 0, 0, 0, 0, 0, 1, 2])\n not_cat_counts = np.array([1, 1, 2, 1, 1, 1, 1, 2])\n scores = ScaledFScorePresets(use_score_difference=True).get_scores(cat_counts, not_cat_counts)\n np.testing.assert_almost_equal(scores, [0.4857218, 0.4857218, 0.1970024, 0.4857218, 0.4857218, 0.4857218,\n 0.8548192, 0.90317])\n\n def test_get_scores_zero_median(self):\n cat_counts = np.array([0, 0, 0, 0, 0, 0, 1, 2])\n not_cat_counts = np.array([1, 1, 2, 1, 1, 1, 1, 3])\n ScaledFScore.get_scores(cat_counts, not_cat_counts)\n\n def get_scores_for_category(self):\n cat_counts, not_cat_counts = self._get_counts()\n scores = ScaledFScore.get_scores_for_category(cat_counts, not_cat_counts)\n np.testing.assert_almost_equal(scores,\n [0.23991183969723384, 0.24969810634506373, 0.23991183969723384,\n 0.27646711056272855, 0.92885244834997516, 0.42010144843632563,\n 0.49166017105966719, 0.0, 0.0, 0.50262304057984664])\n\n def _get_counts(self):\n cat_counts = np.array([1, 5, 1, 9, 100, 1, 1, 0, 0, 2])\n not_cat_counts = np.array([100, 510, 100, 199, 0, 1, 0, 1, 1, 0])\n\n return cat_counts, not_cat_counts\n",
"import numpy as np\n\nfrom scattertext.CSRMatrixTools import CSRMatrixFactory\nfrom scattertext.indexstore import IndexStore\nfrom scattertext.termranking import AbsoluteFrequencyRanker\n\n\nclass CompactTerms(object):\n\tdef __init__(self,\n\t term_ranker=AbsoluteFrequencyRanker,\n\t minimum_term_count=0,\n\t slack=1):\n\t\t'''\n\n\t\tParameters\n\t\t----------\n\t\tterm_ranker : TermRanker\n\t\t\tDefault AbsoluteFrequencyRanker\n\t\tminimum_term_count : int\n\t\t\tDefault 0\n\t\tslack : int\n\t\t\tDefault 1\n\n\t\t'''\n\t\tself.term_ranker = term_ranker\n\t\tself.minimum_term_count = minimum_term_count\n\t\tself.redundancy_slack = slack\n\n\tdef compact(self, term_doc_matrix, non_text=False):\n\t\t'''\n\t\tParameters\n\t\t----------\n\t\tterm_doc_matrix : TermDocMatrix\n\t\t\tTerm document matrix object to compact\n\t\tnon_text : bool\n\t\t\tUse non-text features instead of terms\n\n\t\tReturns\n\t\t-------\n\t\tNew term doc matrix\n\t\t'''\n\t\treturn term_doc_matrix.remove_terms_by_indices(self._indices_to_compact(term_doc_matrix, non_text), non_text)\n\n\tdef _indices_to_compact(self, term_doc_matrix, non_text=False):\n\t\tranker = self.term_ranker(term_doc_matrix)\n\t\tif non_text:\n\t\t\tranker = ranker.use_non_text_features()\n\t\tindicies = self._get_term_indices_to_compact_from_term_freqs(\n\t\t\tranker.get_ranks(),\n\t\t\tterm_doc_matrix,\n\t\t\tnon_text\n\t\t)\n\t\treturn list(indicies)\n\n\tdef _get_term_indices_to_compact_from_term_freqs(self, term_freqs, term_doc_matrix, non_text):\n\t\tidx = IndexStore()\n\t\ttdf_vals = term_freqs.values\n\t\tvalid_terms_mask = tdf_vals.sum(axis=1) >= self.minimum_term_count\n\t\ttdf_vals = term_freqs[valid_terms_mask].values\n\t\tterms = np.array(term_freqs.index)[valid_terms_mask]\n\n\t\tlengths = []\n\t\tfact = CSRMatrixFactory()\n\t\tfor i, t in enumerate(terms):\n\t\t\tfor tok in t.split():\n\t\t\t\tfact[i, idx.getidx(tok)] = 1\n\t\t\tlengths.append(len(t.split()))\n\t\tlengths = np.array(lengths)\n\t\tmat = fact.get_csr_matrix()\n\n\t\tcoocs = lengths - (mat * mat.T)\n\t\tpairs = np.argwhere(coocs == 0).T\n\t\tpairs = self._limit_to_non_identical_terms(pairs)\n\t\tpairs = self._limit_to_pairs_of_bigrams_and_a_constituent_unigram(pairs, terms)\n\t\tpairs = self._limit_to_redundant_unigrams(pairs, tdf_vals)\n\t\tidx_store = term_doc_matrix._get_relevant_idx_store(non_text)\n\t\tredundant_terms = idx_store.getidxstrictbatch(terms[np.unique(pairs[:, 1])])\n\t\tinfrequent_terms = np.argwhere(~valid_terms_mask).T[0]\n\t\tterms_to_remove = np.concatenate([redundant_terms, infrequent_terms])\n\t\treturn terms_to_remove\n\n\tdef _limit_to_redundant_unigrams(self, pairs, tdf_vals):\n\t\treturn pairs[np.all(tdf_vals[pairs[:, 1]] <= tdf_vals[pairs[:, 0]] + self.redundancy_slack, axis=1)]\n\n\tdef _limit_to_pairs_of_bigrams_and_a_constituent_unigram(self, pairs, terms):\n\t\treturn pairs[np.array([terms[i[1]] in terms[i[0]] for i in pairs])]\n\n\tdef _limit_to_non_identical_terms(self, pairs):\n\t\treturn pairs.T[(pairs[0] != pairs[1])]\n"
] | [
[
"numpy.square",
"numpy.sqrt",
"pandas.DataFrame",
"scipy.stats.norm.sf",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
],
[
"numpy.testing.assert_almost_equal",
"numpy.array"
],
[
"numpy.unique",
"numpy.argwhere",
"numpy.concatenate",
"numpy.all",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
holman57/Lafite | [
"9e5981a666cd2dcd3ff2a7f38229d6b8678ce6bb"
] | [
"train.py"
] | [
"\nimport os\nimport click\nimport re\nimport json\nimport tempfile\nimport torch\nimport dnnlib\n\nfrom training import training_loop\nfrom metrics import metric_main\nfrom torch_utils import training_stats\nfrom torch_utils import custom_ops\n\n#----------------------------------------------------------------------------\n\nclass UserError(Exception):\n pass\n\n#----------------------------------------------------------------------------\n\ndef setup_training_loop_kwargs(\n f_dim = None,\n d_use_norm = None, # normalize the feature extracted by discriminator or not\n d_use_fts = None, # discriminator extract semantic feature or not\n mixing_prob= None, # mixing probability of ground-truth and language-free generated pairs, mixing_prob=0 means only use ground-truth, mixing_prob=1. means using only pseudo pairs(language-free)\n lam = None, # hyper-parameter for contrastive loss\n temp = None, # hyper-parameter for contrastive loss\n change = None, # hyper-parameter for architecture\n map_num = None, # hyper-parameter for architecture\n gather = None, # hyper-parameter for contrastive loss\n itd = None, # hyper-parameter for contrastive loss\n itc = None, # hyper-parameter for contrastive loss\n iid = None, # hyper-parameter for contrastive loss\n iic = None, # hyper-parameter for contrastive loss\n metric_only_test = None, # hyper-parameter for computing metrics\n fmap = None, # hyper-parameter for architecture, related to channel number\n ratio = None,\n # General options (not included in desc).\n gpus = None, # Number of GPUs: <int>, default = 1 gpu\n snap = None, # Snapshot interval: <int>, default = 50 ticks\n metrics = None, # List of metric names: [], ['fid50k_full'] (default), ...\n seed = None, # Random seed: <int>, default = 0\n # Dataset.\n data = None, # Training dataset (required): <path>\n test_data = None, # Testing dataset for metrics, if not use training dataset\n cond = None, # Train conditional model based on dataset labels: <bool>, default = False\n subset = None, # Train with only N images: <int>, default = all\n mirror = None, # Augment dataset with x-flips: <bool>, default = False\n\n # Base config.\n cfg = None, # Base config: 'auto' (default), 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar'\n gamma = None, # Override R1 gamma: <float>\n kimg = None, # Override training duration: <int>\n batch = None, # Override batch size: <int>\n\n # Discriminator augmentation.\n aug = None, # Augmentation mode: 'ada' (default), 'noaug', 'fixed'\n p = None, # Specify p for 'fixed' (required): <float>\n target = None, # Override ADA target for 'ada': <float>, default = depends on aug\n augpipe = None, # Augmentation pipeline: 'blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc' (default), ..., 'bgcfnc'\n\n # Transfer learning.\n resume = None, # Load previous network: 'noresume' (default), 'ffhq256', 'ffhq512', 'ffhq1024', 'celebahq256', 'lsundog256', <file>, <url>\n freezed = None, # Freeze-D: <int>, default = 0 discriminator layers\n\n # Performance options (not included in desc).\n fp32 = None, # Disable mixed-precision training: <bool>, default = False\n nhwc = None, # Use NHWC memory format with FP16: <bool>, default = False\n allow_tf32 = None, # Allow PyTorch to use TF32 for matmul and convolutions: <bool>, default = False\n nobench = None, # Disable cuDNN benchmarking: <bool>, default = False\n workers = None, # Override number of DataLoader workers: <int>, default = 3\n):\n args = dnnlib.EasyDict()\n\n # ------------------------------------------\n # General options: gpus, snap, metrics, seed\n # ------------------------------------------\n if f_dim is None:\n f_dim = 512\n assert isinstance(f_dim, int)\n args.f_dim = f_dim\n\n if ratio is None:\n ratio = 1.0\n args.ratio = ratio\n \n if mixing_prob is None:\n mixing_prob = 0.\n args.mixing_prob = mixing_prob\n \n if fmap is None:\n fmap = 1.\n \n if metric_only_test is None:\n metric_only_test = False\n args.metric_only_test = metric_only_test\n \n if map_num is None:\n map_num = 8\n \n if lam is None:\n lam = 0.\n args.lam = lam\n \n if temp is None:\n temp = 0.5\n args.temp = temp\n \n if itd is None:\n itd = 10.\n args.itd = itd\n if itc is None:\n itc = 10.\n args.itc = itc\n \n if iid is None:\n iid = 0.\n args.iid = iid\n if iic is None:\n iic = 0.\n args.iic = iic\n \n \n \n if change is None:\n change = 256\n \n if d_use_norm is None:\n d_use_norm = False\n assert isinstance(d_use_norm, bool)\n args.d_use_norm = d_use_norm\n \n if d_use_fts is None:\n d_use_fts = True\n args.d_use_fts = d_use_fts\n \n if gather is None:\n gather = False\n args.gather = gather\n \n if gpus is None:\n gpus = 1\n assert isinstance(gpus, int)\n if not (gpus >= 1 and gpus & (gpus - 1) == 0):\n raise UserError('--gpus must be a power of two')\n args.num_gpus = gpus\n\n if snap is None:\n snap = 50\n assert isinstance(snap, int)\n if snap < 1:\n raise UserError('--snap must be at least 1')\n args.image_snapshot_ticks = snap\n args.network_snapshot_ticks = snap\n\n if metrics is None:\n metrics = ['fid50k_full']\n assert isinstance(metrics, list)\n if not all(metric_main.is_valid_metric(metric) for metric in metrics):\n raise UserError('\\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))\n args.metrics = metrics\n\n if seed is None:\n seed = 0\n assert isinstance(seed, int)\n args.random_seed = seed\n\n # -----------------------------------\n # Dataset: data, cond, subset, mirror\n # -----------------------------------\n\n assert data is not None\n assert isinstance(data, str)\n print('using data: ', data, 'testing data: ', test_data)\n if test_data is None:\n test_data = data\n args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False, use_clip=True, ratio=args.ratio)\n args.testing_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=test_data, use_labels=True, max_size=None, xflip=False, use_clip=True, ratio=1.0)\n args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=False, num_workers=1, prefetch_factor=2)\n try:\n training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset\n args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution\n args.training_set_kwargs.use_labels = training_set.has_labels # be explicit about labels\n args.training_set_kwargs.max_size = len(training_set) # be explicit about dataset size\n desc = training_set.name\n args.testing_set_kwargs.resolution = training_set.resolution # be explicit about resolution\n args.testing_set_kwargs.use_labels = training_set.has_labels # be explicit about labels\n del training_set # conserve memory\n\n except IOError as err:\n raise UserError(f'--data: {err}')\n\n if cond is None:\n cond = False\n assert isinstance(cond, bool)\n if cond:\n if not args.training_set_kwargs.use_labels:\n raise UserError('--cond=True requires labels specified in dataset.json')\n desc += '-cond'\n else:\n args.training_set_kwargs.use_labels = False\n args.testing_set_kwargs.use_labels = False\n\n if subset is not None:\n assert isinstance(subset, int)\n if not 1 <= subset <= args.training_set_kwargs.max_size:\n raise UserError(f'--subset must be between 1 and {args.training_set_kwargs.max_size}')\n desc += f'-subset{subset}'\n if subset < args.training_set_kwargs.max_size:\n args.training_set_kwargs.max_size = subset\n args.training_set_kwargs.random_seed = args.random_seed\n\n if mirror is None:\n mirror = False\n assert isinstance(mirror, bool)\n if mirror:\n desc += '-mirror'\n args.training_set_kwargs.xflip = True\n args.testing_set_kwargs.xflip = True\n\n # ------------------------------------\n # Base config: cfg, gamma, kimg, batch\n # ------------------------------------\n\n if cfg is None:\n cfg = 'auto'\n assert isinstance(cfg, str)\n desc += f'-{cfg}-lam{lam:g}-temp{temp:g}-map_num{map_num:g}'\n\n cfg_specs = {\n 'auto': dict(ref_gpus=-1, kimg=25000, mb=-1, mbstd=-1, fmaps=-1, lrate=-1, gamma=1., ema=-1, ramp=0.05, map=map_num), # Populated dynamically based on resolution and GPU count.\n }\n\n assert cfg in cfg_specs\n spec = dnnlib.EasyDict(cfg_specs[cfg])\n if cfg == 'auto':\n desc += f'-gpus{gpus:d}'\n spec.ref_gpus = gpus\n res = args.training_set_kwargs.resolution\n spec.mb = 16*gpus#max(min(gpus * min(4096 // res, 32), 64), gpus) # keep gpu memory consumption at bay\n spec.mbstd = min(spec.mb // gpus, 4) # other hyperparams behave more predictably if mbstd group size remains fixed\n spec.fmaps = 1 if res >= 512 else fmap\n spec.lrate = 0.002 if res >= 1024 else 0.0025\n spec.gamma = 0.0002 * (res ** 2) / spec.mb # heuristic formula\n spec.ema = spec.mb * 10 / 32\n \n# args.M_kwargs = dnnlib.EasyDict(class_name='training.networks.ManiNetwork', z_dim=args.f_dim, layer_features=args.f_dim, w_dim=512, num_layers=8)\n args.G_kwargs = dnnlib.EasyDict(class_name='training.networks.Generator', z_dim=512, w_dim=512, m_layer_features=args.f_dim, m_num_layers=8, mapping_kwargs=dnnlib.EasyDict(), synthesis_kwargs=dnnlib.EasyDict())\n args.D_kwargs = dnnlib.EasyDict(class_name='training.networks.Discriminator', use_norm=args.d_use_norm, use_fts=args.d_use_fts, block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())\n args.G_kwargs.synthesis_kwargs.channel_base = args.D_kwargs.channel_base = int(spec.fmaps * 32768)\n args.G_kwargs.synthesis_kwargs.channel_max = args.D_kwargs.channel_max = 512\n args.G_kwargs.mapping_kwargs.num_layers = spec.map\n args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 4 # enable mixed-precision training\n args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow\n args.G_kwargs.synthesis_kwargs.change = change\n args.G_kwargs.synthesis_kwargs.f_dim = args.f_dim\n args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd\n args.D_kwargs.epilogue_kwargs.f_dim = args.f_dim\n \n args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)\n args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)\n args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss', r1_gamma=spec.gamma)\n\n args.total_kimg = spec.kimg\n args.batch_size = spec.mb\n args.batch_gpu = spec.mb // spec.ref_gpus\n args.ema_kimg = spec.ema\n args.ema_rampup = spec.ramp\n\n if cfg == 'cifar':\n args.loss_kwargs.pl_weight = 0 # disable path length regularization\n args.loss_kwargs.style_mixing_prob = 0 # disable style mixing\n args.D_kwargs.architecture = 'orig' # disable residual skip connections\n\n if gamma is not None:\n assert isinstance(gamma, float)\n if not gamma >= 0:\n raise UserError('--gamma must be non-negative')\n desc += f'-gamma{gamma:g}'\n args.loss_kwargs.r1_gamma = gamma\n\n if kimg is not None:\n assert isinstance(kimg, int)\n if not kimg >= 1:\n raise UserError('--kimg must be at least 1')\n desc += f'-kimg{kimg:d}'\n args.total_kimg = kimg\n\n if batch is not None:\n assert isinstance(batch, int)\n if not (batch >= 1 and batch % gpus == 0):\n raise UserError('--batch must be at least 1 and divisible by --gpus')\n desc += f'-batch{batch}'\n args.batch_size = batch\n args.batch_gpu = batch // gpus\n\n # ---------------------------------------------------\n # Discriminator augmentation: aug, p, target, augpipe\n # ---------------------------------------------------\n\n if aug is None:\n aug = 'noaug' # no augmentation is used in our experiments\n else:\n assert isinstance(aug, str)\n desc += f'-{aug}'\n\n if aug == 'ada':\n args.ada_target = 0.6\n\n elif aug == 'noaug':\n pass\n\n elif aug == 'fixed':\n if p is None:\n raise UserError(f'--aug={aug} requires specifying --p')\n\n else:\n raise UserError(f'--aug={aug} not supported')\n\n if p is not None:\n assert isinstance(p, float)\n if aug != 'fixed':\n raise UserError('--p can only be specified with --aug=fixed')\n if not 0 <= p <= 1:\n raise UserError('--p must be between 0 and 1')\n desc += f'-p{p:g}'\n args.augment_p = p\n\n if target is not None:\n assert isinstance(target, float)\n if aug != 'ada':\n raise UserError('--target can only be specified with --aug=ada')\n if not 0 <= target <= 1:\n raise UserError('--target must be between 0 and 1')\n desc += f'-target{target:g}'\n args.ada_target = target\n\n assert augpipe is None or isinstance(augpipe, str)\n if augpipe is None:\n augpipe = 'bgc'\n else:\n if aug == 'noaug':\n raise UserError('--augpipe cannot be specified with --aug=noaug')\n desc += f'-{augpipe}'\n\n augpipe_specs = {\n 'blit': dict(xflip=1, rotate90=1, xint=1),\n 'geom': dict(scale=1, rotate=1, aniso=1, xfrac=1),\n 'color': dict(brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),\n 'filter': dict(imgfilter=1),\n 'noise': dict(noise=1),\n 'cutout': dict(cutout=1),\n 'bg': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1),\n 'bgc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),\n 'bgcf': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1),\n 'bgfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, imgfilter=1, noise=1),\n 'bgcfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1),\n 'bgcfnc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1, cutout=1),\n }\n\n assert augpipe in augpipe_specs\n if aug != 'noaug':\n args.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', **augpipe_specs[augpipe])\n\n # ----------------------------------\n # Transfer learning: resume, freezed\n # ----------------------------------\n\n resume_specs = {\n 'ffhq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res256-mirror-paper256-noaug.pkl',\n 'ffhq512': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res512-mirror-stylegan2-noaug.pkl',\n 'ffhq1024': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res1024-mirror-stylegan2-noaug.pkl',\n 'celebahq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/celebahq-res256-mirror-paper256-kimg100000-ada-target0.5.pkl',\n 'lsundog256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/lsundog-res256-paper256-kimg100000-noaug.pkl',\n }\n\n assert resume is None or isinstance(resume, str)\n if resume is None:\n resume = 'noresume'\n elif resume == 'noresume':\n desc += '-noresume'\n elif resume in resume_specs:\n desc += f'-resume{resume}'\n args.resume_pkl = resume_specs[resume] # predefined url\n else:\n desc += '-resumecustom'\n args.resume_pkl = resume # custom path or url\n\n if resume != 'noresume':\n args.ada_kimg = 100 # make ADA react faster at the beginning\n args.ema_rampup = None # disable EMA rampup\n\n if freezed is not None:\n assert isinstance(freezed, int)\n if not freezed >= 0:\n raise UserError('--freezed must be non-negative')\n desc += f'-freezed{freezed:d}'\n args.D_kwargs.block_kwargs.freeze_layers = freezed\n\n # -------------------------------------------------\n # Performance options: fp32, nhwc, nobench, workers\n # -------------------------------------------------\n\n if fp32 is None:\n fp32 = False\n assert isinstance(fp32, bool)\n if fp32:\n args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 0\n args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = None\n\n if nhwc is None:\n nhwc = False\n assert isinstance(nhwc, bool)\n if nhwc:\n args.G_kwargs.synthesis_kwargs.fp16_channels_last = args.D_kwargs.block_kwargs.fp16_channels_last = True\n\n if nobench is None:\n nobench = False\n assert isinstance(nobench, bool)\n if nobench:\n args.cudnn_benchmark = False\n\n if allow_tf32 is None:\n allow_tf32 = False\n assert isinstance(allow_tf32, bool)\n if allow_tf32:\n args.allow_tf32 = True\n\n if workers is not None:\n assert isinstance(workers, int)\n if not workers >= 1:\n raise UserError('--workers must be at least 1')\n args.data_loader_kwargs.num_workers = workers\n\n return desc, args\n\n#----------------------------------------------------------------------------\n\ndef subprocess_fn(rank, args, temp_dir):\n dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)\n\n # Init torch.distributed.\n if args.num_gpus > 1:\n init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))\n if os.name == 'nt':\n init_method = 'file:///' + init_file.replace('\\\\', '/')\n torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)\n else:\n init_method = f'file://{init_file}'\n torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)\n\n # Init torch_utils.\n sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None\n training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)\n if rank != 0:\n custom_ops.verbosity = 'none'\n\n # Execute training loop.\n training_loop.training_loop(rank=rank, **args)\n\n#----------------------------------------------------------------------------\n\nclass CommaSeparatedList(click.ParamType):\n name = 'list'\n\n def convert(self, value, param, ctx):\n _ = param, ctx\n if value is None or value.lower() == 'none' or value == '':\n return []\n return value.split(',')\n\n#----------------------------------------------------------------------------\n\[email protected]()\[email protected]_context\n\[email protected]('--f_dim', help='dimension of features', type=int, metavar='INT')\[email protected]('--change', help='change structure', type=int, metavar='INT')\[email protected]('--map_num', help='layer number of mapping network', type=int, metavar='INT')\[email protected]('--d_use_norm', help='Input features into every layer of discriminator', type=bool, metavar='BOOL')\[email protected]('--d_use_fts', help='Use text feature in discriminator or not', type=bool, metavar='BOOL')\[email protected]('--gather', help='gather all negative samples across gpus or not', type=bool, metavar='BOOL')\[email protected]('--mixing_prob', help='if mixing_prob==1 -> no text data used', type=float)\[email protected]('--lam', help='hyper-parameter for contrastive loss (softmax along different dimensions)', type=float)\[email protected]('--temp', help='temperature for contrastive loss', type=float)\[email protected]('--itd', help='', type=float)\[email protected]('--itc', help='', type=float)\[email protected]('--iid', help='', type=float)\[email protected]('--iic', help='', type=float)\[email protected]('--metric_only_test', help='compute metrics using test dataset vs test dataset?', type=bool, metavar='BOOL')\[email protected]('--fmap', help='', type=float)\[email protected]('--ratio', help='ratio of data with ground-truth text used', type=float)\n\n\n# General options.\[email protected]('--outdir', help='Where to save the results', required=True, metavar='DIR')\[email protected]('--gpus', help='Number of GPUs to use [default: 1]', type=int, metavar='INT')\[email protected]('--snap', help='Snapshot interval [default: 50 ticks]', type=int, metavar='INT')\[email protected]('--metrics', help='Comma-separated list or \"none\" [default: fid50k_full]', type=CommaSeparatedList())\[email protected]('--seed', help='Random seed [default: 0]', type=int, metavar='INT')\[email protected]('-n', '--dry-run', help='Print training options and exit', is_flag=True)\n\n# Dataset.\[email protected]('--data', help='Training data (directory or zip)', metavar='PATH', required=True)\[email protected]('--test_data', help='Testing data (directory or zip)', metavar='PATH', required=True)\n\[email protected]('--cond', help='Train conditional model based on dataset labels [default: false]', type=bool, metavar='BOOL')\[email protected]('--subset', help='Train with only N images [default: all]', type=int, metavar='INT')\[email protected]('--mirror', help='Enable dataset x-flips [default: false]', type=bool, metavar='BOOL')\n\n# Base config.\[email protected]('--cfg', help='Base config [default: auto]', type=click.Choice(['auto', 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar']))\[email protected]('--gamma', help='Override R1 gamma', type=float)\[email protected]('--kimg', help='Override training duration', type=int, metavar='INT')\[email protected]('--batch', help='Override batch size', type=int, metavar='INT')\n\n# Discriminator augmentation.\[email protected]('--aug', help='Augmentation mode [default: ada]', type=click.Choice(['noaug', 'ada', 'fixed']))\[email protected]('--p', help='Augmentation probability for --aug=fixed', type=float)\[email protected]('--target', help='ADA target value for --aug=ada', type=float)\[email protected]('--augpipe', help='Augmentation pipeline [default: bgc]', type=click.Choice(['blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc', 'bgcf', 'bgcfn', 'bgcfnc']))\n\n# Transfer learning.\[email protected]('--resume', help='Resume training [default: noresume]', metavar='PKL')\[email protected]('--freezed', help='Freeze-D [default: 0 layers]', type=int, metavar='INT')\n\n# Performance options.\[email protected]('--fp32', help='Disable mixed-precision training', type=bool, metavar='BOOL')\[email protected]('--nhwc', help='Use NHWC memory format with FP16', type=bool, metavar='BOOL')\[email protected]('--nobench', help='Disable cuDNN benchmarking', type=bool, metavar='BOOL')\[email protected]('--allow-tf32', help='Allow PyTorch to use TF32 internally', type=bool, metavar='BOOL')\[email protected]('--workers', help='Override number of DataLoader workers', type=int, metavar='INT')\n\ndef main(ctx, outdir, dry_run, **config_kwargs):\n \"\"\"Train a GAN using the techniques described in the paper\n \"Training Generative Adversarial Networks with Limited Data\".\n\n Examples:\n\n \\b\n # Train with custom dataset using 1 GPU.\n python train.py --outdir=~/training-runs --data=~/mydataset.zip --gpus=1\n\n \\b\n # Train class-conditional CIFAR-10 using 2 GPUs.\n python train.py --outdir=~/training-runs --data=~/datasets/cifar10.zip \\\\\n --gpus=2 --cfg=cifar --cond=1\n\n \\b\n # Transfer learn MetFaces from FFHQ using 4 GPUs.\n python train.py --outdir=~/training-runs --data=~/datasets/metfaces.zip \\\\\n --gpus=4 --cfg=paper1024 --mirror=1 --resume=ffhq1024 --snap=10\n\n \\b\n # Reproduce original StyleGAN2 config F.\n python train.py --outdir=~/training-runs --data=~/datasets/ffhq.zip \\\\\n --gpus=8 --cfg=stylegan2 --mirror=1 --aug=noaug\n\n \\b\n Base configs (--cfg):\n auto Automatically select reasonable defaults based on resolution\n and GPU count. Good starting point for new datasets.\n stylegan2 Reproduce results for StyleGAN2 config F at 1024x1024.\n paper256 Reproduce results for FFHQ and LSUN Cat at 256x256.\n paper512 Reproduce results for BreCaHAD and AFHQ at 512x512.\n paper1024 Reproduce results for MetFaces at 1024x1024.\n cifar Reproduce results for CIFAR-10 at 32x32.\n\n \\b\n Transfer learning source networks (--resume):\n ffhq256 FFHQ trained at 256x256 resolution.\n ffhq512 FFHQ trained at 512x512 resolution.\n ffhq1024 FFHQ trained at 1024x1024 resolution.\n celebahq256 CelebA-HQ trained at 256x256 resolution.\n lsundog256 LSUN Dog trained at 256x256 resolution.\n <PATH or URL> Custom network pickle.\n \"\"\"\n dnnlib.util.Logger(should_flush=True)\n\n # Setup training options.\n try:\n run_desc, args = setup_training_loop_kwargs(**config_kwargs)\n except UserError as err:\n ctx.fail(err)\n\n # Pick output directory.\n prev_run_dirs = []\n if os.path.isdir(outdir):\n prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]\n prev_run_ids = [re.match(r'^\\d+', x) for x in prev_run_dirs]\n prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]\n cur_run_id = max(prev_run_ids, default=-1) + 1\n args.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')\n assert not os.path.exists(args.run_dir)\n\n # Print options.\n print()\n print('Training options:')\n print(json.dumps(args, indent=2))\n print()\n print(f'Output directory: {args.run_dir}')\n print(f'Training data: {args.training_set_kwargs.path}')\n print(f'Training duration: {args.total_kimg} kimg')\n print(f'Number of GPUs: {args.num_gpus}')\n print(f'Number of images: {args.training_set_kwargs.max_size}')\n print(f'Image resolution: {args.training_set_kwargs.resolution}')\n print(f'Conditional model: {args.training_set_kwargs.use_labels}')\n print(f'Dataset x-flips: {args.training_set_kwargs.xflip}')\n print(f'Discriminator use normalization: {args.d_use_norm}')\n print(f'Discriminator use fts: {args.d_use_fts}')\n\n # Dry run?\n if dry_run:\n print('Dry run; exiting.')\n return\n\n # Create output directory.\n print('Creating output directory...')\n os.makedirs(args.run_dir)\n with open(os.path.join(args.run_dir, 'training_options.json'), 'wt') as f:\n json.dump(args, f, indent=2)\n\n # Launch processes.\n print('Launching processes...')\n torch.multiprocessing.set_start_method('spawn')\n with tempfile.TemporaryDirectory() as temp_dir:\n if args.num_gpus == 1:\n subprocess_fn(rank=0, args=args, temp_dir=temp_dir)\n else:\n torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main() # pylint: disable=no-value-for-parameter\n\n#----------------------------------------------------------------------------\n"
] | [
[
"torch.device",
"torch.multiprocessing.set_start_method",
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
organic-chemistry/repli1D | [
"1cef3aa3ffd760f9b88d0831bf1dce92c819c949",
"1cef3aa3ffd760f9b88d0831bf1dce92c819c949"
] | [
"src/repli1d/nn.py",
"src/repli1d/whole_pipeline_from_data_file.py"
] | [
"import os\n\nimport numpy as np\nimport pandas as pd\n\nfrom repli1d.analyse_RFD import nan_polate, smooth\n\n\ndef normal_seq(signal, q=99, output_path='../data/'):\n \"\"\"\n normalization function that transforms each fature in range (0,1)\n and outputs the minimum and maximum of features in a csv file in\n data folder inside the repository, suitable for future transformation\n on new dataset in a trained\n neural network.\n\n Parameters\n ----------\n signal : numpy array or pandas dataframe\n in the shape of (n_samples, n_features)\n output_path : str, default='../data/'\n q : float, default=99\n the quantile threshold, to act like a lowerpass filter\n to remove the outliers. The q is in percentage, this function substitutes\n (100-q) quantile from reversed sorted data by the quantile of data that\n specified by user. if user set q=None there would be no denoising and it\n would scale the input by its minimum, and its maximum.\n Returns\n -------\n transformed : numpy array\n a normalised sequence or features in the range (0,1)\n \"\"\"\n max_element = []\n min_element = []\n transformed = []\n if isinstance(signal, pd.DataFrame):\n signal = signal.to_numpy(copy=True)\n elif isinstance(signal, list):\n signal = np.array(signal)\n if signal.ndim == 1:\n if q is not None:\n max_element = np.percentile(signal, q)\n else:\n max_element = max(signal)\n min_element = min(signal)\n signal[signal > max_element] = max_element\n transformed.append((signal-min_element)/(\n max_element-min_element))\n else:\n if q is not None:\n max_element = np.percentile(signal, q, axis=0)\n else:\n max_element = signal.max(axis=0)\n for i in range(signal.shape[1]):\n min_element.append(min(signal[:, i]))\n signal[signal[:, i] > max_element[i]] = max_element[i]\n transformed.append((signal[:, i]-min_element[i])/(\n max_element[i]-min_element[i]))\n transformed = np.array(transformed).T # transpose for correspondence\n if output_path is not None:\n result = pd.DataFrame((min_element, max_element), index=['minimum',\n 'maximum'])\n result.to_csv(output_path + 'min_max_inputs.csv')\n return transformed\n\n\ndef inv_transform(signal, input_path='../data/'):\n \"\"\"\n Inversre transform is a function for transforming the output of NN to the\n scale of real dataset.\n\n Parameters\n ----------\n signal : numpy array or pandas dataframe\n in the shape of (n_samples, n_features)\n input_path : str, default='../data/'\n the address of a folder that contains min_max_outputs.csv.\n Returns\n -------\n inv_transformed : numpy array\n \"\"\"\n if isinstance(signal, pd.DataFrame):\n signal = signal.to_numpy(copy=True)\n scales = pd.read_csv(input_path + 'min_max_outputs.csv')\n min_s = scales.to_numpy(copy=True)[0, 1:]\n max_s = scales.to_numpy(copy=True)[1, 1:]\n scales = max_s - min_s\n scales = scales.reshape(1, -1)\n inv_transformed = np.multiply(signal, scales) + min_s\n return inv_transformed\n\n\ndef dev_transform(signal, input_path='../data/', is_denoised=True):\n \"\"\"\n normalization function that transforms each fature based on the\n scaling of the trainning set. This transformation should be done on\n test set(developmental set), or any new input for a trained neural\n network. Due to existence of a denoising step in the normal_seq funciton,\n this transformation can not reproduce the exact same of initial sequences,\n instead it transforms to the scale of denoised version of training set.\n\n Parameters\n ----------\n signal : numpy array or pandas dataframe\n in the shape of (n_samples, n_features)\n input_path : str, default='../data/'\n is_denoised : boolean\n it specifies the state if original sequence is denoised by a threshold,\n if it's set to False it means that user used q=None in normal_seq function.\n Returns\n -------\n transformed : numpy array\n a normalised sequence or features\n \"\"\"\n transformed = []\n if isinstance(signal, pd.DataFrame):\n signal = signal.to_numpy(copy=True)\n elif isinstance(signal, list):\n signal = np.array(signal)\n scales = pd.read_csv(input_path + 'min_max_inputs.csv')\n max_element = scales.to_numpy(copy=True)[1, 1:]\n min_element = scales.to_numpy(copy=True)[0, 1:]\n if signal.ndim == 1:\n if is_denoised is True:\n signal[signal > max_element] = max_element\n transformed.append((signal-min_element)/(\n max_element-min_element))\n else:\n for i in range(signal.shape[1]):\n if is_denoised is True:\n signal[signal[:, i] > max_element[i]] = max_element[i]\n transformed.append((signal[:, i]-min_element[i])/(\n max_element[i]-min_element[i]))\n transformed = np.array(transformed).T # transpose for correspondence\n return transformed\n\n\ndef transform_norm(signal):\n s = np.array(signal).copy()\n s -= np.percentile(s, 10)\n p = np.percentile(s, 50)\n if p == 0:\n p = np.mean(s)\n s /= p\n s /= 5\n s[s > 50] = 50\n return np.array(s, dtype=np.float32) # mod\n\n\ndef transform_DNase(signal):\n s = np.array(signal).copy()\n s /= 500\n s[s > 1] = 1\n return s\n\n\ndef transform_norm_meth(signal):\n s = np.array(signal).copy()\n print(np.percentile(s, [10, 95]))\n # s = np.percentile(s,10)\n s /= np.percentile(s, 95)\n s /= 20\n return s\n\n# print(transform_norm)\n\n\ndef filter_anomalyf(signal, smv, percentile, nf):\n for n in range(nf):\n delta = np.abs(signal-smooth(signal, smv))\n p = np.percentile(np.abs(delta), percentile)\n signal[np.abs(delta) > p] = np.nan\n signal = nan_polate(signal)\n return signal\n\n\ndef load_signal(name,\n marks=[\"H3K4me1\", \"H3K4me3\", \"H3K27me3\", \"H3K36me3\",\n \"H3K9me3\", \"H2A.Z\", \"H3K79me2\", \"H3K9ac\", \"H3K4me2\",\n \"H3K27ac\", \"H4K20me1\"],\n targets=[\"initiation\"], t_norm=None, smm=None, wig=True,\n augment=None, show=True, add_noise=False,\n filter_anomaly=False,\n repertory_scaling_param=\"../data/\"):\n \"\"\"\n This function does some modification on datset based on its column names\n and also revoke the scaling methods for different features and outputs,\n it also makes a mask for different chromosomes. to be able to\n adapt the method for different chromosomes it is necessary to call\n load_signal, and transform_seq for training set and then revoke them for\n test set or any other set (revoking two consequent load_signal on two\n different dataset then tranform_seq them may return wrong stacked\n sequences), it is necessary due to variable that defines in load_signal.\n\n Parameters\n ----------\n name : str or pd.Dataframe\n the address of a csv file or pandas dataframe\n marks : list\n a list that contains the names of markers as features for NN.\n targets : list\n a list that contains columns names of desired outputs of NN.\n repertory_scaling_param : str\n the address to save the scaling parameters in it.\n Returns\n -------\n df : numpy array\n a scaled dataset of features\n y_init : numpy array\n a scaled dataset of outputs\n notnan : numpy array\n \"\"\"\n if type(name) == str:\n df = pd.read_csv(name)\n\n # wig = True\n mask_borders = np.cumsum(df.chrom.value_counts().to_numpy(copy=True))\n if \"signal\" in df.columns:\n df[\"initiation\"] = df[\"signal\"]\n\n if wig:\n lm = [\"DNaseI\", \"initiation\", \"Meth\", \"Meth450\", \"RFDs\", \"MRTs\",\n \"RFDe\", \"MRTe\", \"AT_20\", \"RNA_seq\", \"AT_5\", \"AT_30\"]\n marks0 = [m+\"wig\" for m in marks if m not in lm]\n for sm in lm:\n if sm in marks:\n marks0 += [sm]\n\n assert(len(marks) == len(marks0))\n marks = marks0\n\n if \"notnan\" in df.columns:\n if show:\n print(\"Found notnan\")\n notnan = df[\"notnan\"]\n else:\n notnan = []\n\n df = df[targets+marks]\n if show:\n print(df.describe())\n\n yinit = [df.pop(target) for target in targets]\n # print(yinit.shape,\"Yinit shape\")\n\n if t_norm is not None:\n transform_norm = t_norm\n\n if transform_norm == normal_seq:\n df = pd.DataFrame(transform_norm(df,\n output_path=repertory_scaling_param))\n else:\n for col in df.columns:\n if show:\n print(col)\n if col not in [\"DNaseI\", \"initiation\", \"Meth\", \"Meth450\", \"RFDe\",\n \"MRTe\", \"RFDs\", \"MRTs\"]:\n df[col] = transform_norm(df[col])\n elif col == \"DNaseI\":\n df[col] = transform_DNase(df[col])\n elif col in [\"initiation\", \"Stall\"]:\n df[col] = df[col] / np.max(df[col])\n elif \"Meth\" in col:\n df[col] = transform_norm_meth(df[col])\n elif \"RFD\" in col:\n if \"RFD\" in col:\n if col == \"RFDe\" and filter_anomaly:\n df[col] = filter_anomalyf(df[col].copy(), smv=5,\n percentile=98.5, nf=4)\n\n # print(\"Nanpo\")\n df[col] = nan_polate(df[col])\n if add_noise and col == \"RFDs\":\n print(\"Noise: \", int(len(df)*0.01))\n for p in np.random.randint(0, len(df),\n size=int(len(df)*0.01)): # article 1%\n df[col][p] = 2*np.random.rand()-1\n\n if smm is not None:\n df[col] = smooth(df[col], smm)\n df[col] = (df[col]+1)/2\n elif \"MRT\" in col:\n if \"MRT\" in col:\n df[col] = nan_polate(df[col])\n if augment == \"test\":\n for asm in [10, 50, 200]:\n df[col+f\"_sm_{asm}\"] = smooth(nan_polate(df[col]), asm)\n df[col + f\"_sm_{asm}\"] -= np.mean(df[col+f\"_sm_{asm}\"])\n df[col + f\"_sm_{asm}\"] /= np.std(df[col + f\"_sm_{asm}\"])\n\n pass\n\n if np.sum(np.isnan(df[col])) != 0:\n raise \"NanVal\"\n\n if show:\n print(np.max(yinit[0]), \"max\")\n print(df.describe())\n\n yinit0 = []\n min_outputs = []\n max_outputs = []\n for y, t in zip(yinit, targets):\n if t in [\"initiation\", \"Stall\"]:\n max_outputs.append(np.max(y))\n min_outputs.append(np.min(y))\n trunc = (y - np.min(y)) / (np.max(y)-np.min(y)) # np.percentile(y,99)\n # trunc[trunc>1] = 1\n result = pd.DataFrame((min_outputs, max_outputs), index=['minimum',\n 'maximum'])\n result.to_csv(os.path.join(repertory_scaling_param,\n 'min_max_outputs.csv'))\n yinit0.append(trunc)\n\n elif t == \"DNaseI\":\n yinit0.append(transform_DNase(y))\n elif t == \"OKSeq\":\n yinit0.append((y+1)/2)\n elif t == \"ORC2\":\n yinit0.append(y)\n else:\n raise \"Undefined target\"\n\n yinit = np.array(yinit0).T\n yinit[np.isnan(yinit)] = 0\n # print(yinit.shape)\n \"\"\"\n import pylab\n f=pylab.figure()\n pylab.plot(yinit)\n pylab.plot(df[\"RFDs\"])\n pylab.show()\n \"\"\"\n dict = {\"df\": df,\n \"yinit\": yinit,\n \"notnan\": notnan,\n \"mask_borders\": mask_borders}\n return dict\n\n\ndef window_stack(a, mask_borders, stepsize=1, width=3):\n \"\"\"\n This function makes windows of the size specified as 'width'\n and sweeping over dataset with the specified step size.\n\n Parameters\n ----------\n a : numpy array\n in the shape of (n_samples, n_features)\n step_size : int\n width : int\n mask_borders : list\n list of end positions of each chromosome as elements along\n the first axis of dataset.\n Returns\n -------\n window_stacked : numpy array or pandas dataframe\n in the shape of (n_windows, n_features*width)\n an array of stacked windows, column wise.\n \"\"\"\n window_stacked = []\n # print([[i,1+i-width or None,stepsize] for i in range(0,width)])\n for index, elem in enumerate(mask_borders):\n if index != 0:\n boundary = mask_borders[index-1] + 1\n else:\n boundary = 0\n b = a[boundary: elem+1]\n window_stacked.append([b[i:1+i-width or None:stepsize] for i in range(0, width)])\n window_stacked = np.hstack(window_stacked)\n return window_stacked\n\n\ndef transform_seq(Xt, yt, stepsize=1, width=3, impair=True):\n \"\"\"\n This function reshapes the output of window_stack function into a\n suitable shape for neural network.\n\n Parameters\n ----------\n Xt : numpy array\n in the shape of (n_samples, n_features)\n yt : numpy array\n in the shape of (n_samples, n_features)\n step_size : int\n width : int\n impair : bool\n Returns\n -------\n X : numpy array\n in the shape of (n_windows, 1, width, n_features)\n Y : numpy array\n in the shape of (n_windows, n_outputs)\n \"\"\"\n # X = (seq,dim)\n # y = (seq)\n # Xt = np.array(Xt, dtype=np.float16)\n yt = np.array(yt, dtype=np.float16)\n # print(Xt.shape, yt.shape)\n\n assert(len(Xt.shape) == 2)\n assert(len(yt.shape) == 2)\n if impair:\n assert(width % 2 == 1)\n X = window_stack(Xt, mask_borders, stepsize, width).reshape(-1, width, Xt.shape[-1])[::, np.newaxis, ::, ::]\n # [::,np.newaxis] #Take the value at the middle of the segment\n Y = window_stack(yt[::, np.newaxis], mask_borders, stepsize, width)[::, width//2]\n\n # print(X.shape, Y.shape)\n # exit()\n\n return X, Y\n\n\ndef train_test_split(chrom, ch_train, ch_test, notnan):\n print(list(ch_train), list(ch_test))\n\n chltrain = ch_train\n chltest = ch_test\n if len(notnan) != 0:\n train = [chi in chltrain and notna for chi, notna in zip(chrom.chrom, notnan)]\n test = [chi in chltest and notna for chi, notna in zip(chrom.chrom, notnan)]\n else:\n print(\"Working on all (no nan)\")\n train = [chi in chltrain for chi in chrom.chrom]\n test = [chi in chltest for chi in chrom.chrom]\n print(np.sum(train), np.sum(test), np.sum(test)/len(test))\n return train, test\n\n\ndef unison_shuffled_copies(a, b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]\n\n\ndef repad1d(res, window):\n return np.concatenate([np.zeros(window//2), res, np.zeros(window//2)])\n\n\nif __name__ == \"__main__\":\n\n import argparse\n import os\n\n from keras.callbacks import (EarlyStopping, History, ModelCheckpoint,\n ReduceLROnPlateau)\n from repli1d.models import jm_cnn_model as create_model\n from keras.models import load_model\n\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--cell', type=str, default=None)\n parser.add_argument('--rootnn', type=str, default=None)\n parser.add_argument('--nfilters', type=int, default=15)\n parser.add_argument('--resolution', type=int, default=5)\n parser.add_argument('--sm', type=int, default=None) # Smoothing exp data\n\n parser.add_argument('--window', type=int, default=51)\n parser.add_argument('--max_epoch', type=int, default=150)\n parser.add_argument('--batch_size', type=int, default=128)\n\n parser.add_argument('--imp', action=\"store_true\")\n parser.add_argument('--reduce_lr', action=\"store_true\")\n\n parser.add_argument('--wig', type=int, default=None)\n parser.add_argument('--dropout', type=float, default=0.1)\n\n parser.add_argument('--kernel_length', type=int, default=10)\n parser.add_argument('--weight', type=str, default=None)\n parser.add_argument('--loss', type=str, default=\"binary_crossentropy\")\n parser.add_argument('--augment', type=str, default=\"\")\n\n parser.add_argument('--marks', nargs='+', type=str, default=[])\n parser.add_argument('--targets', nargs='+', type=str, default=[\"initiation\"])\n parser.add_argument('--listfile', nargs='+', type=str, default=[])\n parser.add_argument('--enrichment', nargs='+', type=float, default=[0.1, 1.0, 5.0])\n parser.add_argument('--roadmap', action=\"store_true\")\n parser.add_argument('--noenrichment', action=\"store_true\")\n parser.add_argument('--predict_files', nargs='+', type=str, default=[])\n\n parser.add_argument('--restart', action=\"store_true\")\n parser.add_argument('--datafile', action=\"store_true\")\n parser.add_argument('--add_noise', action=\"store_true\")\n parser.add_argument('--filter_anomaly', action=\"store_true\")\n\n args = parser.parse_args()\n\n cell = args.cell\n rootnn = args.rootnn\n window = args.window\n marks = args.marks\n if marks == []:\n marks = ['H2az', 'H3k27ac', 'H3k79me2', 'H3k27me3', 'H3k9ac',\n 'H3k4me2', 'H3k4me3', 'H3k9me3', 'H3k4me1', 'H3k36me3', \"H4k20me1\"]\n\n lcell = [cell]\n\n if cell == \"all\":\n lcell = [\"K562\", \"GM\", \"Hela\"]\n\n os.makedirs(args.rootnn, exist_ok=True)\n\n root = \"/home/jarbona/projet_yeast_replication/notebooks/DNaseI/repli1d/\"\n if not args.datafile:\n if args.resolution == 5:\n XC = pd.read_csv(root + \"coords_K562.csv\", sep=\"\\t\") # List of chromosome coordinates\n if args.resolution == 1:\n XC = pd.read_csv(\"data/Hela_peak_1_kb.csv\", sep=\"\\t\")\n\n if args.listfile == []:\n listfile = []\n for cellt in lcell:\n name = \"/home/jarbona/repli1D/data/mlformat_whole_sig_%s_dec2.csv\" % cellt\n name = \"/home/jarbona/repli1D/data/mlformat_whole_sig_standard%s_dec2.csv\" % cellt\n name = \"/home/jarbona/repli1D/data/mlformat_whole_sig_standard%s_nn.csv\" % cellt\n wig = True\n if args.roadmap:\n name = \"/home/jarbona/repli1D/data/roadmap_%s_nn.csv\" % cellt\n wig = False\n\n listfile.append(name)\n else:\n listfile = args.listfile\n wig = False\n\n if args.wig is not None:\n if args.wig == 1:\n wig = True\n else:\n wig = False\n if args.weight is None or args.restart:\n X_train = []\n for name in listfile:\n print(name)\n temp_dict = load_signal(\n name, marks, targets=args.targets, t_norm=transform_norm,\n smm=args.sm, wig=wig, augment=args.augment,\n add_noise=args.add_noise,repertory_scaling_param=args.rootnn+\"/\")\n df, yinit, notnan, mask_borders = temp_dict.values()\n \"\"\"\n traint = [1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19]\n valt = [4, 18, 21, 22]\n testt = [5, 20]\n \"\"\"\n if not args.datafile:\n traint = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\n 18, 19] + [20, 21, 22, 23]\n valt = [20, 21, 22, 23]\n valt = [2]\n testt = [1] # 1]\n\n traint = [\"chr%i\" for i in traint]\n valt = [\"chr%i\" for i in valt]\n testt = [\"chr%i\" for i in testt]\n\n else:\n XC = pd.read_csv(args.listfile[0])\n chs = set(XC.chrom)\n traint = list(chs)\n tests = [\"chr1\"]\n valt = [\"chr2\"]\n traint.remove(tests[0])\n traint.remove(valt[0])\n\n # traint.pop(0)\n\n if not args.datafile:\n for v in testt:\n assert(v not in traint)\n for v in valt:\n assert(v not in traint)\n train, val = train_test_split(XC, traint, valt, notnan)\n X_train_us, X_val_us, y_train_us, y_val_us = df[train], df[val], yinit[train], yinit[val]\n\n vtrain = transform_seq(X_train_us, y_train_us, 1, window)\n vval = transform_seq(X_val_us, y_val_us, 1, window)\n del X_train_us, X_val_us, y_train_us, y_val_us\n if X_train == []:\n X_train, y_train = vtrain\n X_val, y_val = vval\n else:\n X_train = np.concatenate([X_train, vtrain[0]])\n y_train = np.concatenate([y_train, vtrain[1]])\n X_val = np.concatenate([X_val, vval[0]])\n y_val = np.concatenate([y_val, vval[1]])\n\n X_train, y_train = unison_shuffled_copies(X_train, y_train)\n\n n = X_train.shape[0] * X_train.shape[2]\n if n > 1e9:\n nmax = int(0.5e9//X_train.shape[2])\n print(nmax)\n X_train = X_train[:nmax]\n y_train = y_train[:nmax]\n\n print(\"Shape\", X_train.shape, y_train.shape)\n\n weight=None\n if (args.weight is not None) or os.path.exists(rootnn+\"/%sweights.hdf5\" % cell):\n weight= args.weight\n if weight is None:\n weight = rootnn+\"/%sweights.hdf5\" % cell\n multi_layer_keras_model = load_model(weight)\n multi_layer_keras_model.summary()\n del X_train, y_train\n\n if not args.restart and weight is not None:\n #load_model(args.weight)\n pass\n\n else:\n if not args.imp:\n multi_layer_keras_model = create_model(\n X_train, targets=args.targets, nfilters=args.nfilters,\n kernel_length=args.kernel_length, loss=args.loss)\n else:\n multi_layer_keras_model = create_model_imp(\n X_train, targets=args.targets, nfilters=args.nfilters,\n kernel_length=args.kernel_length, loss=args.loss)\n\n if args.restart:\n multi_layer_keras_model = load_model(args.weight)\n\n \"\"\"\n if (len(args.targets) == 1) and (args.targets[0] == \"OKSeq\"):\n\n selpercents = [1.0]\n else:\n selpercents = [0.1, 1.0, 5.0, \"all\"]\n \"\"\"\n\n totenr = args.enrichment + [\"all\"]\n if args.noenrichment:\n totenr = [\"all\"]\n\n print(totenr)\n for selp in totenr:\n\n print(sum(y_train == 0), sum(y_train != 0))\n if type(selp) == float:\n sel = y_train[::, 0] != 0\n th = np.percentile(y_train[::, 0], 100-selp)\n print(\"sepp,th\", selp, th)\n sel = y_train[::, 0] > th\n # sel = y_train[::, 0] > 0.2\n \"\"\"\n if sum(sel)/len(sel) > selp:\n th = np.percentile(sel,100-100*selp)\n print(th)\n sel = y_train[::, 0] > th\n \"\"\"\n print(\"top %i , Total %i, selected %i\" % (sum(sel), len(sel), int(0.01*selp*len(sel))))\n sel[np.random.randint(0, len(sel-1), int(0.01*selp*len(sel)))] = True\n print(\"Chekc\", np.sum(sel))\n else:\n\n sel = np.ones_like(y_train[::, 0], dtype=np.bool)\n print(np.sum(sel), sel.shape)\n print(X_train.shape, X_train[sel].shape)\n cp = [EarlyStopping(patience=3)]\n batch_size = 128\n if selp == \"all\" and False:\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n patience=3, min_lr=0.0001)\n cp = [reduce_lr]\n if args.reduce_lr:\n cp = [EarlyStopping(patience=5),\n ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n patience=3, min_lr=0.0001)]\n\n if args.datafile:\n validation_data = (X_val, y_val)\n validation_split = 0.\n else:\n validation_data = (X_val, y_val)\n validation_split = 0.\n history_multi_filter = multi_layer_keras_model.fit(x=X_train[sel],\n y=y_train[sel],\n batch_size=args.batch_size,\n epochs=args.max_epoch,\n verbose=1,\n callbacks=cp+[History(),\n ModelCheckpoint(save_best_only=True,\n filepath=rootnn+\"/%sweights.{epoch:02d}-{val_loss:.4f}.hdf5\" % cell,\n verbose=1)],\n validation_data=validation_data,\n validation_split=validation_split)\n\n multi_layer_keras_model.save(rootnn+\"/%sweights.hdf5\" % cell)\n print(\"Saving on\", rootnn+\"/%sweights.hdf5\" % cell)\n del X_train, y_train\n ###################################\n # predict\n print(\"Predict\")\n if args.listfile == [] or args.roadmap or (len(args.predict_files) != 0):\n if marks == [\"RFDs\", \"MRTs\"]:\n marks = [\"RFDe\", \"MRTe\"]\n to_pred = []\n if len(args.predict_files) == 0:\n lcell = [\"K562\", \"Hela\", \"GM\"]\n if args.cell is not None and args.weight is not None:\n lcell = [args.cell]\n for cellp in lcell:\n namep = \"/home/jarbona/repli1D/data/mlformat_whole_sig_%s_dec2.csv\" % cellp\n namep = \"/home/jarbona/repli1D/data/mlformat_whole_sig_standard%s_nn.csv\" % cellp\n wig = True\n if args.roadmap:\n namep = \"/home/jarbona/repli1D/data/roadmap_%s_nn.csv\" % cellp\n wig = False\n to_pred.append(namep)\n else:\n to_pred = args.predict_files\n\n if args.wig is not None:\n if args.wig == 1:\n wig = True\n else:\n wig = False\n\n for namep in to_pred:\n\n cellp = os.path.split(namep)[1].split(\"_\")[0] # namep.split(\"_\")[-1][:-4]\n\n print(\"Reading %s, cell %s\" % (namep, cellp))\n temp_dict = load_signal(\n namep, marks, targets=args.targets, t_norm=transform_norm,\n wig=wig, smm=args.sm, augment=args.augment,\n filter_anomaly=args.filter_anomaly)\n df, yinit, notnan, mask_borders = temp_dict.values()\n X, y = transform_seq(df, yinit, 1, window)\n print(X.shape)\n res = multi_layer_keras_model.predict(X)\n del df, X, y\n print(res.shape, \"resshape\", yinit.shape)\n\n for itarget, target in enumerate(args.targets):\n XC[\"signalValue\"] = repad1d(res[::, itarget], window)\n if target == \"OKSeq\":\n XC[\"signalValue\"] = XC[\"signalValue\"] * 2-1\n # XC.to_csv(\"nn_hela_fk.csv\",index=False,sep=\"\\t\")\n if target == \"initiation\":\n ns = rootnn+\"/nn_%s_from_%s.csv\" % (cellp, cell)\n s = 0\n for y1, y2 in zip(yinit, XC[\"signalValue\"]):\n s += (y1-y2)**2\n print(\"Average delta\", s/len(yinit))\n else:\n ns = rootnn+\"/nn_%s_%s_from_%s.csv\" % (cellp, target, cell)\n\n print(\"Saving to\", ns)\n XC.to_csv(ns, index=False, sep=\"\\t\")\n else:\n for namep in args.listfile:\n marks = [\"RFDe\", \"MRTe\"]\n temp_dict = load_signal(\n namep, marks, targets=args.targets, t_norm=transform_norm,\n smm=args.sm, augment=args.augment,\n filter_anomaly=args.filter_anomaly)\n df, yinit, notnan, mask_borders = temp_dict.values()\n X, y = transform_seq(df, yinit, 1, window)\n print(X.shape)\n res = multi_layer_keras_model.predict(X)\n del df, X, y\n print(res.shape, \"resshape\", yinit.shape)\n\n for itarget, target in enumerate(args.targets):\n XC[\"signalValue\"] = repad1d(res[::, itarget], window)\n if target == \"OKSeq\":\n XC[\"signalValue\"] = XC[\"signalValue\"] * 2-1\n # XC.to_csv(\"nn_hela_fk.csv\",index=False,sep=\"\\t\")\n if target in [\"initiation\", \"Init\"]:\n namew = namep.split(\"/\")[-1][:-4]\n ns = rootnn+\"/nn_%s.csv\" % (namew)\n s = 0\n for y1, y2 in zip(yinit, XC[\"signalValue\"]):\n s += (y1-y2)**2\n print(\"Average delta\", s/len(yinit))\n else:\n ns = rootnn+\"/nn_%s_%s.csv\" % (namew, target)\n # print(\"Not implemented\")\n # ns = rootnn+\"/nn_%s_%s_from_%s.csv\" % (cellp, target, cell)\n\n print(\"Saving to\", ns)\n XC.to_csv(ns, index=False, sep=\"\\t\")\n",
"import argparse\nimport subprocess\nimport os\nimport json\nimport pandas as pd\nimport numpy as np\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--data', type=str, default=\"datafile.csv\")\nparser.add_argument('--name_script', type=str, default=\"script.sh\")\n\nparser.add_argument('--no-optiMRT',dest=\"optiMRT\", action=\"store_false\")\nparser.add_argument('--no-optiRFD',dest=\"optiRFD\", action=\"store_false\")\nparser.add_argument('--resolution', type=int,default=5,\n help=\"Resolution in kb\")\nparser.add_argument('--speed', type=float,default=2.5,\n help=\"speed in kb/min\")\nparser.add_argument('--nsim', type=int,default=200,\n help=\"number of simulation\")\nparser.add_argument('--repTime', type=float,default=800,\n help=\"replication time in minute\")\nparser.add_argument('--repTime_max_factor', type=float,default=3,\n help=\"replication time factor in repTime unit\")\nparser.add_argument('--dori', type=float,default=5,\n help=\"average distance between origins (in kb)\")\nparser.add_argument('--max_epoch', type=int,default=150,\n help=\"maximum number of epoch for Neural network training\")\nparser.add_argument('--window', type=int,default=401,\n help=\"window size (in resolution unit) for the neural network (must be impair number)\")\nparser.add_argument('--nfilters', type=int,default=50,\n help=\"conv filters\")\nparser.add_argument('--percentile', type=int,default=82,\n help=\"percentile of Delta RFD for I0\")\nparser.add_argument('--chr_sub', type=str,default=\"chr2\",\n help=\"chromosome on which perform the small optimisation\")\nparser.add_argument('--introduction_time', type=float,default=60,\n help=\"caracteristic time of the exponential increase of firing factors \")\nparser.add_argument('--cut_holes', type=int,default=1500,\n help=\"remove parts of the genome where there is no information on length given by the argument (in kb)\")\nparser.add_argument('--masking', type=int,default=200,\n help=\"remove parts of the genome for computing correlation of masking size around holes(in kb)\")\nparser.add_argument('--root', type=str, default=\"./\",\n help=\"where to store the results\")\nparser.add_argument('--threads', type=int,default=8,help=\"number of threads for the DNA simulations\")\nparser.add_argument('--update', action=\"store_true\")\nparser.add_argument('--no-safety', dest=\"safety\",action=\"store_false\")\nparser.add_argument('--RFDonly', dest=\"RFDonly\",action=\"store_true\")\nparser.add_argument('--exclude_noise', dest=\"exclude_noise\",action=\"store_true\")\nparser.add_argument('--show', action=\"store_true\")\nparser.add_argument('--test', action=\"store_true\")\nparser.add_argument('--reduce_lr', action=\"store_true\")\nparser.add_argument('--single', action=\"store_true\",help=\"Compute single molecule information (only for wholecell)\")\nparser.add_argument('--save', action=\"store_true\",help=\"Save individual MRTs and RFDs (only for wholecell)\")\nparser.add_argument('--pearson', action=\"store_true\",help=\"Perform the grid search optimisation on sum of pearson correlation (otherwise on absolute error)\")\nparser.add_argument('--max_factor_reptime', type=float,default=1.41)\nparser.add_argument('--on_input_signal', type=str,default=None)\n\n\nargs = parser.parse_args()\n\n#Get genome length\ndata=pd.read_csv(args.data)\nif args.optiMRT and \"MRT\" not in data.columns:\n print(f\"Missing MRT data in {args.data}\" )\n raise\nif args.optiRFD and \"OKSeq\" not in data.columns:\n print(f\"Missing OKSeq data in {args.data}\" )\n raise\n\nif args.test:\n max_epoch=10\n nsim=50\n fexplo = np.arange(.8,1.21,0.2)\nelse:\n max_epoch=150\n nsim=args.nsim\n delta=(args.max_factor_reptime-0.61)/4\n fexplo = np.arange(.6,args.max_factor_reptime,delta)\n\n\nl_ch = list(set(data.chrom))\nif args.chr_sub not in l_ch:\n print(f\"Chromosome for optimisation (--chr_sub) must be choosen inside this list {l_ch}\")\n raise\nwindow=args.window\nif args.window % 2 != 1:\n window = args.window+1\n\n\ncellcmd=f\" --datafile {args.data} \"\n\n\nprint(\"Estimating Ndiff (not taking into account holes)\")\nndiff = len(data)*args.resolution/(2*args.repTime*args.speed)\nprint(f\"If always actif, the total number of firing factor is {ndiff}\")\nndiff /= len(data)*args.resolution/1000\nprint(f\"USing density {ndiff} in firing factor per Mb\")\n\n\nsmall_sub = f\" --percentile {args.percentile} --mrt_res {args.resolution} --n_jobs {args.threads} --fspeed {args.speed} --visu --experimental --input --resolution {args.resolution} --resolutionpol {args.resolution} \"\nif args.cut_holes != 0:\n small_sub+= f\"--cutholes {args.cut_holes} \"\nif args.masking != 0:\n small_sub+= f\"--masking {args.masking} \"\nif args.introduction_time > 0.20 * args.repTime:\n print(\"Introduction time larger than 20% of the replication time\")\n print(\"If it is not on purpose please set --no-safety\")\n if args.safety:\n raise\nsmall_sub += f\"--introduction_time {args.introduction_time} --dori {args.dori}\"\n\n\n\nkon = 8.625 / (len(data) * args.resolution ) / 3\nprint(f\"Using kon = {kon}\")\n\nstandard_parameters = small_sub + f\" --wholecell --kon {kon} --noise 0.0 --nsim {nsim} \"\n\nif args.exclude_noise:\n #print(\"Laaaaaaaaaaaaa\")\n standard_parameters += \" --exclude_noise_save \"\n\nmaxT = args.repTime * args.repTime_max_factor\n#maxT *= 100\nstandard_parameters += \"--ndiff %.2f %s\"%(ndiff,cellcmd)\n\ncmd=[]\n\nnloop=5\nif args.on_input_signal != None:\n nloop=1\n\nfor loop in range(nloop):\n\n if loop != 0:\n extra_nn = \"\"\n if args.reduce_lr:\n extra_nn = \" --reduce_lr \"\n directory_nn = args.root+f\"/RFD_to_init_nn_{loop}/\"\n cmd += [[f\"python src/repli1d/nn.py {extra_nn} --max_epoch {max_epoch} --add_noise --nfilters {args.nfilters} --listfile {directory}/global_profiles.csv --datafile --marks RFDs MRTs --root {directory_nn} --sm 10 --noenrichment --window {window} --max_epoch {args.max_epoch}\",\n \"\"]]\n #print(sum(data.chr==args.chr_sub)*args.resolution/1000)\n megabase_sub= sum(data.chrom==args.chr_sub)*args.resolution/1000\n #print(megabase_sub)\n ndiff0 = max(int( megabase_sub* ndiff),1)\n ndiffs = \" \".join([str(int(ndiff0*f)) for f in fexplo])\n #ndiffs = \" \".join([str(int(ndiff0*f)) for f in np.arange(1,2,0.5)])\n\n\n directory_opti = args.root+f\"/_RFD_to_init_small_opti_{loop}/\"\n end = int(sum(data.chrom==args.chr_sub) * args.resolution) # in kb\n extra_small = \"\"\n if args.pearson:\n extra_small=\" --pearson \"\n cmd_opti = f\"python src/repli1d/small_opty.py {extra_small} --size_segment {end/1000} --ndiff {ndiffs} --root {directory_opti} --cmd '--kon {8.625/end} {small_sub} --start 0 --end {end} --ch {args.chr_sub} --nsim {nsim} {cellcmd} \"\n\n if loop == 0:\n if args.on_input_signal == None:\n if args.RFDonly:\n cmd_opti += \"--signal peakRFDonly' \"\n else:\n cmd_opti += \"--signal peak' \"\n else:\n cmd_opti += f\"--signal {args.on_input_signal}' \"\n\n\n else:\n cmd_opti += f\"--signal {directory_nn}/nn_global_profiles.csv' --maxT {maxT} \"\n\n cmd += [[cmd_opti,\"\"]]\n\n directory = args.root+f\"/wholecell_{loop}/\"\n\n extra_params=\"\"\n if args.save:\n extra_params += \" --save \"\n if args.single:\n extra_params += \" --single \"\n cmd_wholecell=f\"python src/repli1d/detect_and_simulate.py {extra_params} {standard_parameters} --name {directory} --extra_param {directory_opti}/params.json \"\n\n if loop == 0:\n if args.on_input_signal == None:\n if args.RFDonly:\n cmd_wholecell += \"--signal peakRFDonly \"\n else:\n cmd_wholecell += \"--signal peak\"\n else:\n cmd_wholecell += f\"--signal {args.on_input_signal} \"\n else:\n cmd_wholecell += f\"--signal {directory_nn}/nn_global_profiles.csv\"\n\n cmd += [ [cmd_wholecell,\n directory+\"/global_profiles.csv\"]]\n\n\n\n\nredo = not args.update\nexe= False\nscript = []\nfor cm in cmd:\n if args.show:\n print(cm)\n if type(cm) == list:\n sup=None\n if \"global_profile\" in cm[1]:\n sup = \"python src/repli1d/average_expe.py --dir %s\" % \"/\".join(cm[1].split(\"/\")[:-1])\n if not redo and os.path.exists(cm[1]):\n if args.show:\n print(sup)\n script.append(sup)\n if exe:\n process = subprocess.Popen(sup, shell=True, stdout=subprocess.PIPE)\n process.wait()\n continue\n\n else:\n cm = cm[0]\n\n script.append(cm)\n if exe:\n process = subprocess.Popen(cm, shell=True, stdout=subprocess.PIPE)\n process.wait()\n if sup is not None:\n if args.show:\n print(sup)\n script.append(sup)\n if exe:\n process = subprocess.Popen(sup, shell=True, stdout=subprocess.PIPE)\n process.wait()\n\nwith open(args.name_script,\"w\") as f:\n f.writelines(\"\\n\".join(script))\n"
] | [
[
"numpy.hstack",
"pandas.read_csv",
"numpy.ones_like",
"numpy.abs",
"numpy.multiply",
"numpy.min",
"numpy.isnan",
"numpy.percentile",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.arange",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
koudyk/netneurotools | [
"7631cf8303f1a754dd4df0f209ce4cea50417714"
] | [
"netneurotools/networks.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nFunctions for generating group-level networks from individual measurements\n\"\"\"\n\nimport numpy as np\nfrom scipy.sparse import csgraph\nfrom sklearn.utils.validation import (check_random_state, check_array,\n check_consistent_length)\n\nfrom . import utils\n\n\ndef func_consensus(data, n_boot=1000, ci=95, seed=None):\n \"\"\"\n Calculates thresholded group consensus functional connectivity graph\n\n This function concatenates all time series in `data` and computes a group\n correlation matrix based on this extended time series. It then generates\n length `T` bootstrapped samples from the concatenated matrix and estimates\n confidence intervals for all correlations. Correlations whose sign is\n consistent across bootstraps are retained; inconsistent correlations are\n set to zero.\n\n If `n_boot` is set to 0 or None a simple, group-averaged functional\n connectivity matrix is estimated, instead.\n\n Parameters\n ----------\n data : (N, T, S) array_like\n Pre-processed functional time series, where `N` is the number of nodes,\n `T` is the number of volumes in the time series, and `S` is the number\n of subjects\n n_boot : int, optional\n Number of bootstraps for which to generate correlation. Default: 1000\n ci : (0, 100) float, optional\n Confidence interval for which to assess the reliability of correlations\n with bootstraps. Default: 95\n seed : int, optional\n Random seed. Default: None\n\n Returns\n -------\n consensus : (N, N) numpy.ndarray\n Thresholded, group-level correlation matrix\n\n References\n ----------\n Mišić, B., Betzel, R. F., Nematzadeh, A., Goni, J., Griffa, A., Hagmann,\n P., Flammini, A., Ahn, Y.-Y., & Sporns, O. (2015). Cooperative and\n competitive spreading dynamics on the human connectome. Neuron, 86(6),\n 1518-1529.\n \"\"\"\n\n # check inputs\n rs = check_random_state(seed)\n if ci > 100 or ci < 0:\n raise ValueError(\"`ci` must be between 0 and 100.\")\n\n # group-average functional connectivity matrix desired instead of bootstrap\n if n_boot == 0 or n_boot is None:\n corrs = [np.corrcoef(data[..., sub]) for sub in range(data.shape[-1])]\n return np.mean(corrs, axis=0)\n\n collapsed_data = data.reshape((len(data), -1), order='F')\n consensus = np.corrcoef(collapsed_data)\n\n # only keep the upper triangle for the bootstraps to save on memory usage\n triu_inds = np.triu_indices_from(consensus, k=1)\n bootstrapped_corrmat = np.zeros((len(triu_inds[0]), n_boot))\n\n # generate `n_boot` bootstrap correlation matrices by sampling `t` time\n # points from the concatenated time series\n for boot in range(n_boot):\n inds = rs.randint(collapsed_data.shape[-1], size=data.shape[1])\n bootstrapped_corrmat[..., boot] = \\\n np.corrcoef(collapsed_data[:, inds])[triu_inds]\n\n # extract the CIs from the bootstrapped correlation matrices\n # we don't need the input anymore so overwrite it\n bootstrapped_ci = np.percentile(bootstrapped_corrmat, [100 - ci, ci],\n axis=-1, overwrite_input=True)\n\n # remove unreliable (i.e., CI zero-crossing) correlations\n # if the signs of the bootstrapped confidence intervals are different\n # (i.e., their signs sum to 0), then we want to remove them\n # so, take the logical not of the CI (CI = 0 ---> True) and create a mask\n # then, set all connections from the consensus array inside the mask to 0\n remove_inds = np.logical_not(np.sign(bootstrapped_ci).sum(axis=0))\n mask = np.zeros_like(consensus, dtype=bool)\n mask[triu_inds] = remove_inds\n consensus[mask + mask.T] = 0\n\n return consensus\n\n\ndef _ecdf(data):\n \"\"\"\n Estimates empirical cumulative distribution function of `data`\n\n Taken directly from StackOverflow. See original answer at\n https://stackoverflow.com/questions/33345780.\n\n Parameters\n ----------\n data : array_like\n\n Returns\n -------\n prob : numpy.ndarray\n Cumulative probability\n quantiles : numpy.darray\n Quantiles\n \"\"\"\n\n sample = np.atleast_1d(data)\n\n # find the unique values and their corresponding counts\n quantiles, counts = np.unique(sample, return_counts=True)\n\n # take the cumulative sum of the counts and divide by the sample size to\n # get the cumulative probabilities between 0 and 1\n prob = np.cumsum(counts).astype(float) / sample.size\n\n # match MATLAB\n prob, quantiles = np.append([0], prob), np.append(quantiles[0], quantiles)\n\n return prob, quantiles\n\n\ndef struct_consensus(data, distance, hemiid):\n \"\"\"\n Calculates distance-dependent group consensus structural connectivity graph\n\n Takes as input a weighted stack of connectivity matrices with dimensions\n (N, N, S) where `N` is the number of nodes and `S` is the number of\n matrices or subjects. The matrices must be weighted, and ideally with\n continuous weights (e.g. fractional anisotropy rather than streamline\n count). The second input is a pairwise distance matrix, where distance(i,j)\n is the Euclidean distance between nodes i and j. The final input is an\n (N, 1) vector which labels nodes as belonging to the right (`hemiid==0`) or\n left (`hemiid=1`) hemisphere (note that these values can be flipped as long\n as `hemiid` contains only values of 0 and 1).\n\n This function estimates the average edge length distribution and builds\n a group-averaged connectivity matrix that approximates this distribution\n with density equal to the mean density across subjects.\n\n The algorithm works as follows:\n\n 1. Estimate the cumulative edge length distribution,\n 2. Divide the distribution into M length bins, one for each edge that will\n be added to the group-average matrix, and\n 3. Within each bin, select the edge that is most consistently expressed\n expressed across subjects, breaking ties according to average edge\n weight (which is why the input matrix `data` must be weighted).\n\n The algorithm works separately on within/between hemisphere links.\n\n Parameters\n ----------\n data : (N, N, S) array_like\n Weighted connectivity matrices (i.e., fractional anisotropy), where `N`\n is nodes and `S` is subjects\n distance : (N, N) array_like\n Array where `distance[i, j]` is the Euclidean distance between nodes\n `i` and `j`\n hemiid : (N, 1) array_like\n Hemisphere designation for `N` nodes where a value of 0/1 indicates\n node `N_{i}` is in the right/left hemisphere, respectively\n\n Returns\n -------\n consensus : (N, N) numpy.ndarray\n Binary, group-level connectivity matrix\n\n References\n ----------\n Betzel, R. F., Griffa, A., Hagmann, P., & Mišić, B. (2018). Distance-\n dependent consensus thresholds for generating group-representative\n structural brain networks. Network Neuroscience, 1-22.\n \"\"\"\n\n # confirm input shapes are as expected\n check_consistent_length(data, distance, hemiid)\n try:\n hemiid = check_array(hemiid, ensure_2d=True)\n except ValueError:\n raise ValueError('Provided hemiid must be a 2D array. Reshape your '\n 'data using array.reshape(-1, 1) and try again.')\n\n num_node, _, num_sub = data.shape # info on connectivity matrices\n pos_data = data > 0 # location of + values in matrix\n pos_data_count = pos_data.sum(axis=2) # num sub with + values at each node\n\n with np.errstate(divide='ignore', invalid='ignore'):\n average_weights = data.sum(axis=2) / pos_data_count\n\n # empty array to hold inter/intra hemispheric connections\n consensus = np.zeros((num_node, num_node, 2))\n\n for conn_type in range(2): # iterate through inter/intra hemisphere conn\n if conn_type == 0: # get inter hemisphere edges\n inter_hemi = (hemiid == 0) @ (hemiid == 1).T\n keep_conn = np.logical_or(inter_hemi, inter_hemi.T)\n else: # get intra hemisphere edges\n right_hemi = (hemiid == 0) @ (hemiid == 0).T\n left_hemi = (hemiid == 1) @ (hemiid == 1).T\n keep_conn = np.logical_or(right_hemi @ right_hemi.T,\n left_hemi @ left_hemi.T)\n\n # mask the distance array for only those edges we want to examine\n full_dist_conn = distance * keep_conn\n upper_dist_conn = np.atleast_3d(np.triu(full_dist_conn))\n\n # generate array of weighted (by distance), positive edges across subs\n pos_dist = pos_data * upper_dist_conn\n pos_dist = pos_dist[np.nonzero(pos_dist)]\n\n # determine average # of positive edges across subs\n # we will use this to bin the edge weights\n avg_conn_num = len(pos_dist) / num_sub\n\n # estimate empirical CDF of weighted, positive edges across subs\n cumprob, quantiles = _ecdf(pos_dist)\n cumprob = np.round(cumprob * avg_conn_num).astype(int)\n\n # empty array to hold group-average matrix for current connection type\n # (i.e., inter/intra hemispheric connections)\n group_conn_type = np.zeros((num_node, num_node))\n\n # iterate through bins (for edge weights)\n for n in range(1, int(avg_conn_num) + 1):\n # get current quantile of interest\n curr_quant = quantiles[np.logical_and(cumprob >= (n - 1),\n cumprob < n)]\n if curr_quant.size == 0:\n continue\n\n # find edges in distance connectivity matrix w/i current quantile\n mask = np.logical_and(full_dist_conn >= curr_quant.min(),\n full_dist_conn <= curr_quant.max())\n i, j = np.where(np.triu(mask)) # indices of edges of interest\n\n c = pos_data_count[i, j] # get num sub with + values at edges\n w = average_weights[i, j] # get averaged weight of edges\n\n # find locations of edges most commonly represented across subs\n indmax = np.argwhere(c == c.max())\n\n # determine index of most frequent edge; break ties with higher\n # weighted edge\n if indmax.size == 1: # only one edge found\n group_conn_type[i[indmax], j[indmax]] = 1\n else: # multiple edges found\n indmax = indmax[np.argmax(w[indmax])]\n group_conn_type[i[indmax], j[indmax]] = 1\n\n consensus[:, :, conn_type] = group_conn_type\n\n # collapse across hemispheric connections types and make symmetrical array\n consensus = consensus.sum(axis=2)\n consensus = np.logical_or(consensus, consensus.T).astype(int)\n\n return consensus\n\n\ndef binarize_network(network, retain=10, keep_diag=False):\n \"\"\"\n Keeps top `retain` % of connections in `network` and binarizes\n\n Uses the upper triangle for determining connection percentage, which may\n result in disconnected nodes. If this behavior is not desired see\n :py:func:`netneurotools.networks.threshold_network`.\n\n Parameters\n ----------\n network : (N, N) array_like\n Input graph\n retain : [0, 100] float, optional\n Percent connections to retain. Default: 10\n keep_diag : bool, optional\n Whether to keep the diagonal instead of setting it to 0. Default: False\n\n Returns\n -------\n binarized : (N, N) numpy.ndarray\n Binarized, thresholded graph\n\n See Also\n --------\n netneurotools.networks.threshold_network\n \"\"\"\n\n if retain < 0 or retain > 100:\n raise ValueError('Value provided for `retain` is outside [0, 100]: {}'\n .format(retain))\n\n prctile = 100 - retain\n triu = utils.get_triu(network)\n thresh = np.percentile(triu, prctile, axis=0, keepdims=True)\n binarized = np.array(network > thresh, dtype=int)\n\n if not keep_diag:\n binarized[np.diag_indices(len(binarized))] = 0\n\n return binarized\n\n\ndef threshold_network(network, retain=10):\n \"\"\"\n Keeps top `retain` % of connections in `network` and binarizes\n\n Uses a minimum spanning tree to ensure that no nodes are disconnected from\n the resulting thresholded graph\n\n Parameters\n ----------\n network : (N, N) array_like\n Input graph\n retain : [0, 100] float, optional\n Percent connections to retain. Default: 10\n\n Returns\n -------\n thresholded : (N, N) numpy.ndarray\n Binarized, thresholded graph\n\n See Also\n --------\n netneurotools.networks.binarize_network\n \"\"\"\n\n if retain < 0 or retain > 100:\n raise ValueError('Value provided for `retain` must be a percent '\n 'in range [0, 100]. Provided: {}'.format(retain))\n\n # get number of nodes in graph and invert weights (MINIMUM spanning tree)\n nodes = len(network)\n graph = np.triu(network * -1)\n\n # find MST and count # of edges in graph\n mst = csgraph.minimum_spanning_tree(graph).todense()\n mst_edges = np.sum(mst != 0)\n\n # determine # of remaining edges and ensure we're not over the limit\n remain = int((retain / 100) * ((nodes * (nodes - 1)) / 2)) - mst_edges\n if remain < 0:\n raise ValueError('Minimum spanning tree with {} edges exceeds desired '\n 'connection density of {}% ({} edges). Cannot '\n 'proceed with graph creation.'\n .format(mst_edges, retain, remain + mst_edges))\n\n # zero out edges already in MST and then get indices of next best edges\n graph -= mst\n inds = utils.get_triu(graph).argsort()[:remain]\n inds = tuple(e[inds] for e in np.triu_indices_from(graph, k=1))\n\n # add edges to MST, symmetrize, and convert to binary matrix\n mst[inds] = graph[inds]\n mst = np.array((mst + mst.T) != 0, dtype=int)\n\n return mst\n"
] | [
[
"numpy.cumsum",
"numpy.round",
"numpy.zeros_like",
"numpy.mean",
"numpy.unique",
"numpy.atleast_1d",
"numpy.argmax",
"scipy.sparse.csgraph.minimum_spanning_tree",
"numpy.triu",
"numpy.zeros",
"numpy.nonzero",
"numpy.triu_indices_from",
"numpy.logical_or",
"numpy.append",
"sklearn.utils.validation.check_random_state",
"numpy.errstate",
"numpy.corrcoef",
"numpy.array",
"numpy.logical_and",
"numpy.sum",
"sklearn.utils.validation.check_array",
"numpy.percentile",
"numpy.sign",
"sklearn.utils.validation.check_consistent_length"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dankiy/2019_IT | [
"21afdc44913dccf6746879fd075d20098db599cb"
] | [
"task4.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nnp.random.seed(0)\n\nM, N = 10, 5\n\ndef is_pareto_efficient(X):\n is_efficient = np.ones(len(X), dtype = bool)\n for i, c in enumerate(X):\n if is_efficient[i]:\n is_efficient[is_efficient] = np.any(X[is_efficient] > c, axis=1) \n is_efficient[i] = True \n return is_efficient\n\nX = np.random.sample((M, N))\n\neff = is_pareto_efficient(X)\nax = plt.subplot(111, projection=\"polar\")\nplt.thetagrids(np.arange(0, 360, 360/N))\n\nfor i in range(len(eff)):\n if eff[i] == True:\n ax.plot(np.append(np.arange(0, N, 1), 0) * 2 * math.pi/N, np.append(X[i, :], X[i, 0]), color=\"r\")\n else:\n ax.plot(np.append(np.arange(0, N, 1) * 2 * math.pi/N, 0), np.append(X[i, :], X[i, 0]), color=\"b\")\n"
] | [
[
"numpy.random.seed",
"numpy.arange",
"numpy.append",
"matplotlib.pyplot.subplot",
"numpy.any",
"numpy.random.sample"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
paulgowdy/l2m | [
"c1eb190a9117c249094c2ee8af74f7ee1b6e655f"
] | [
"collect_experience_2.py"
] | [
"from osim.env import L2M2019Env\nfrom osim.control.osim_loco_reflex_song2019 import OsimReflexCtrl\nimport numpy as np\nimport pickle\n\nmode = '2D'\ndifficulty = 1\nvisualize=False\nseed=None\nsim_dt = 0.01\nsim_t = 5\ntimstep_limit = int(round(sim_t/sim_dt))\n\n\nINIT_POSE = np.array([\n 1.699999999999999956e+00, # forward speed\n .5, # rightward speed\n 9.023245653983965608e-01, # pelvis height\n 2.012303881285582852e-01, # trunk lean\n 0*np.pi/180, # [right] hip adduct\n -6.952390849304798115e-01, # hip flex\n -3.231075259785813891e-01, # knee extend\n 1.709011708233401095e-01, # ankle flex\n 0*np.pi/180, # [left] hip adduct\n -5.282323914341899296e-02, # hip flex\n -8.041966456860847323e-01, # knee extend\n -1.745329251994329478e-01]) # ankle flex\n\nif mode is '2D':\n params = np.loadtxt('params2d.txt')\nelif mode is '3D':\n params = np.loadtxt('params_3D_init.txt')\n\n\n\n\n\nlocoCtrl = OsimReflexCtrl(mode=mode, dt=sim_dt)\n\ncontrol_env = L2M2019Env(visualize=visualize, seed=seed, difficulty=difficulty)\ncontrol_env.change_model(model=mode, difficulty=difficulty, seed=seed)\nobs_dict_action = control_env.reset(project=True, seed=seed, obs_as_dict=True, init_pose=INIT_POSE)\ncontrol_env.spec.timestep_limit = timstep_limit\n\nobs_env = L2M2019Env(visualize=False, seed=seed, difficulty=difficulty)\nobs_env.change_model(model=mode, difficulty=difficulty, seed=seed)\nobs_dict_record = obs_env.reset(project=False, seed=seed, obs_as_dict=False, init_pose=INIT_POSE)\nobs_env.spec.timestep_limit = timstep_limit\n\nwith open('norm_sample.p', 'rb') as f:\n\n norm_sample = pickle.load(f)\n\n means = norm_sample[0]\n stds = norm_sample[1]\n\n\ndef process_obs_dict(obs_dict):\n\n '''\n for k in obs_dict.keys():\n\n print(k)\n\n joint_pos\n joint_vel\n joint_acc\n body_pos\n body_vel\n body_acc\n body_pos_rot\n body_vel_rot\n body_acc_rot\n forces\n muscles\n markers\n misc\n v_tgt_field\n\n '''\n\n\n\n\n\n v_tgt = obs_dict['v_tgt_field']\n #print(v_tgt.shape) 2,11,11\n v_tgt = v_tgt.flatten() / 10.0\n\n\n new_obs = list(v_tgt)\n\n pelvis_pos = obs_dict['body_pos']['pelvis']\n\n new_obs.extend(pelvis_pos)\n\n for k in obs_dict['body_pos'].keys():\n\n if k != 'pelvis':\n\n #print(obs_dict['body_pos'][k])\n #print([a - b for a, b in zip(obs_dict['body_pos'][k], pelvis_pos)])\n #print('')\n\n new_obs.extend([a - b for a, b in zip(obs_dict['body_pos'][k], pelvis_pos)])\n\n #'muscles', 'misc'\n # , 'forces'\n\n for k in ['joint_pos', 'joint_vel', 'joint_acc', 'body_vel', 'body_acc', 'body_pos_rot', 'body_vel_rot', 'body_acc_rot']:\n\n for sub_k in obs_dict[k].keys():\n\n new_obs.extend(obs_dict[k][sub_k])\n\n new_obs = [a - b for a,b in zip(new_obs, means)]\n new_obs = [float(a)/float(b) for a,b in zip( new_obs, stds)]\n\n\n\n return new_obs\n\n\n\n\n\ntotal_reward = 0\nt = 0\ni = 0\n\nobs_collect = []\naction_collect = []\n\nwhile True:\n i += 1\n t += sim_dt\n\n proc_obs = process_obs_dict(obs_dict_record)\n\n #print(proc_obs)\n\n locoCtrl.set_control_params(params)\n\n action = locoCtrl.update(obs_dict_action)\n\n obs_collect.append(proc_obs)\n action_collect.append(action)\n\n obs_dict_action, reward, done, info = control_env.step(action, project = True, obs_as_dict=True)\n obs_dict_record, reward_obs, done_obs, info_obs = obs_env.step(action, project = False, obs_as_dict=False)\n\n print(i, reward)\n #print(action)\n #print(len(obs_dict_record))\n\n print('')\n total_reward += reward\n\n if done:\n break\n\nprint(' score={} time={}sec'.format(total_reward, t))\n\nobs_collect = np.array(obs_collect)\naction_collect = np.array(action_collect)\n\nprint(obs_collect.shape)\nprint(action_collect.shape)\n\nwith open('saved_experience_normed.p', 'wb') as f:\n\n pickle.dump([obs_collect, action_collect], f)\n"
] | [
[
"numpy.array",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Aerochip7/gan | [
"d3648c0f3996bd9e5564c05a44ff4215e5156cbd",
"d3648c0f3996bd9e5564c05a44ff4215e5156cbd",
"d3648c0f3996bd9e5564c05a44ff4215e5156cbd",
"d3648c0f3996bd9e5564c05a44ff4215e5156cbd"
] | [
"tensorflow_gan/examples/mnist/conditional_eval.py",
"tensorflow_gan/python/estimator/stargan_estimator_test.py",
"tensorflow_gan/examples/cifar/eval_lib.py",
"tensorflow_gan/python/losses/losses_impl.py"
] | [
"# coding=utf-8\n# Copyright 2022 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Evaluates a conditional TF-GAN trained MNIST model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow_gan.examples.mnist import conditional_eval_lib\n\nflags.DEFINE_string('checkpoint_dir', '/tmp/mnist/',\n 'Directory where the model was written to.')\n\nflags.DEFINE_string('eval_dir', '/tmp/mnist/',\n 'Directory where the results are saved to.')\n\nflags.DEFINE_integer('num_images_per_class', 10,\n 'Number of images to generate per class.')\n\nflags.DEFINE_integer('noise_dims', 64,\n 'Dimensions of the generator noise vector')\n\nflags.DEFINE_integer(\n 'max_number_of_evaluations', None,\n 'Number of times to run evaluation. If `None`, run '\n 'forever.')\n\nflags.DEFINE_boolean('write_to_disk', True, 'If `True`, run images to disk.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n hparams = conditional_eval_lib.HParams(FLAGS.checkpoint_dir, FLAGS.eval_dir,\n FLAGS.num_images_per_class,\n FLAGS.noise_dims,\n FLAGS.max_number_of_evaluations,\n FLAGS.write_to_disk)\n conditional_eval_lib.evaluate(hparams, run_eval_loop=True)\n\n\nif __name__ == '__main__':\n tf.disable_v2_behavior()\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2022 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for TF-GAN's stargan_estimator.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport shutil\nimport tempfile\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport six\n\nimport tensorflow as tf\nimport tensorflow_gan as tfgan\n\n# Private functions to test.\nfrom tensorflow_gan.python.estimator.stargan_estimator import get_estimator_spec\nfrom tensorflow_gan.python.estimator.stargan_estimator import get_gan_model\n\n\ndef dummy_generator_fn(input_data, input_data_domain_label, mode):\n del input_data_domain_label, mode\n\n return tf.compat.v1.get_variable('dummy_g', initializer=0.5) * input_data\n\n\ndef dummy_discriminator_fn(input_data, num_domains, mode):\n del mode\n\n hidden = tf.compat.v1.layers.flatten(input_data)\n output_src = tf.reduce_mean(input_tensor=hidden, axis=1)\n output_cls = tf.compat.v1.layers.dense(hidden, num_domains, name='debug')\n\n return output_src, output_cls\n\n\nclass StarGetGANModelTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Tests that `StarGetGANModel` produces the correct model.\"\"\"\n\n @parameterized.named_parameters(('train', tf.estimator.ModeKeys.TRAIN),\n ('eval', tf.estimator.ModeKeys.EVAL),\n ('predict', tf.estimator.ModeKeys.PREDICT))\n def test_get_gan_model(self, mode):\n with tf.Graph().as_default():\n input_data = tf.ones([6, 4, 4, 3])\n input_data_domain_label = tf.one_hot([0] * 6, 5)\n gan_model = get_gan_model(\n mode,\n dummy_generator_fn,\n dummy_discriminator_fn,\n input_data,\n input_data_domain_label,\n add_summaries=False)\n\n self.assertEqual(input_data, gan_model.input_data)\n self.assertIsNotNone(gan_model.generated_data)\n self.assertIsNotNone(gan_model.generated_data_domain_target)\n self.assertLen(gan_model.generator_variables, 1)\n self.assertIsNotNone(gan_model.generator_scope)\n self.assertIsNotNone(gan_model.generator_fn)\n if mode == tf.estimator.ModeKeys.PREDICT:\n self.assertIsNone(gan_model.input_data_domain_label)\n self.assertEqual(input_data_domain_label,\n gan_model.generated_data_domain_target)\n self.assertIsNone(gan_model.reconstructed_data)\n self.assertIsNone(gan_model.discriminator_input_data_source_predication)\n self.assertIsNone(\n gan_model.discriminator_generated_data_source_predication)\n self.assertIsNone(gan_model.discriminator_input_data_domain_predication)\n self.assertIsNone(\n gan_model.discriminator_generated_data_domain_predication)\n self.assertIsNone(gan_model.discriminator_variables)\n self.assertIsNone(gan_model.discriminator_scope)\n self.assertIsNone(gan_model.discriminator_fn)\n else:\n self.assertEqual(input_data_domain_label,\n gan_model.input_data_domain_label)\n self.assertIsNotNone(gan_model.reconstructed_data.shape)\n self.assertIsNotNone(\n gan_model.discriminator_input_data_source_predication)\n self.assertIsNotNone(\n gan_model.discriminator_generated_data_source_predication)\n self.assertIsNotNone(\n gan_model.discriminator_input_data_domain_predication)\n self.assertIsNotNone(\n gan_model.discriminator_generated_data_domain_predication)\n self.assertLen(gan_model.discriminator_variables, 2) # 1 FC layer\n self.assertIsNotNone(gan_model.discriminator_scope)\n self.assertIsNotNone(gan_model.discriminator_fn)\n\n\ndef get_dummy_gan_model():\n \"\"\"Similar to get_gan_model().\"\"\"\n # TODO(joelshor): Find a better way of creating a variable scope.\n with tf.compat.v1.variable_scope('generator') as gen_scope:\n gen_var = tf.compat.v1.get_variable('dummy_var', initializer=0.0)\n with tf.compat.v1.variable_scope('discriminator') as dis_scope:\n dis_var = tf.compat.v1.get_variable('dummy_var', initializer=0.0)\n return tfgan.StarGANModel(\n input_data=tf.ones([1, 2, 2, 3]),\n input_data_domain_label=tf.ones([1, 2]),\n generated_data=tf.ones([1, 2, 2, 3]),\n generated_data_domain_target=tf.ones([1, 2]),\n reconstructed_data=tf.ones([1, 2, 2, 3]),\n discriminator_input_data_source_predication=tf.ones([1]) * dis_var,\n discriminator_generated_data_source_predication=tf.ones(\n [1]) * gen_var * dis_var,\n discriminator_input_data_domain_predication=tf.ones([1, 2]) * dis_var,\n discriminator_generated_data_domain_predication=tf.ones([1, 2]) * gen_var\n * dis_var,\n generator_variables=[gen_var],\n generator_scope=gen_scope,\n generator_fn=None,\n discriminator_variables=[dis_var],\n discriminator_scope=dis_scope,\n discriminator_fn=None)\n\n\ndef dummy_loss_fn(gan_model):\n loss = tf.reduce_sum(\n input_tensor=gan_model.discriminator_input_data_domain_predication -\n gan_model.discriminator_generated_data_domain_predication)\n loss += tf.reduce_sum(input_tensor=gan_model.input_data -\n gan_model.generated_data)\n return tfgan.GANLoss(loss, loss)\n\n\ndef get_metrics(gan_model):\n return {\n 'mse_custom_metric':\n tf.compat.v1.metrics.mean_squared_error(gan_model.input_data,\n gan_model.generated_data)\n }\n\n\nclass GetEstimatorSpecTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Tests that the EstimatorSpec is constructed appropriately.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(GetEstimatorSpecTest, cls).setUpClass()\n cls._generator_optimizer = tf.compat.v1.train.GradientDescentOptimizer(1.0)\n cls._discriminator_optimizer = tf.compat.v1.train.GradientDescentOptimizer(\n 1.0)\n\n @parameterized.named_parameters(('train', tf.estimator.ModeKeys.TRAIN),\n ('eval', tf.estimator.ModeKeys.EVAL),\n ('predict', tf.estimator.ModeKeys.PREDICT))\n def test_get_estimator_spec(self, mode):\n with tf.Graph().as_default():\n self._gan_model = get_dummy_gan_model()\n spec = get_estimator_spec(\n mode,\n self._gan_model,\n loss_fn=dummy_loss_fn,\n get_eval_metric_ops_fn=get_metrics,\n generator_optimizer=self._generator_optimizer,\n discriminator_optimizer=self._discriminator_optimizer)\n\n self.assertEqual(mode, spec.mode)\n if mode == tf.estimator.ModeKeys.PREDICT:\n self.assertEqual(self._gan_model.generated_data, spec.predictions)\n elif mode == tf.estimator.ModeKeys.TRAIN:\n self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar\n self.assertIsNotNone(spec.train_op)\n self.assertIsNotNone(spec.training_hooks)\n elif mode == tf.estimator.ModeKeys.EVAL:\n self.assertEqual(self._gan_model.generated_data, spec.predictions)\n self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar\n self.assertIsNotNone(spec.eval_metric_ops)\n\n\n# TODO(joelshor): Add pandas tf.test\nclass StarGANEstimatorIntegrationTest(tf.test.TestCase):\n\n def setUp(self):\n super(StarGANEstimatorIntegrationTest, self).setUp()\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n super(StarGANEstimatorIntegrationTest, self).tearDown()\n if self._model_dir:\n tf.compat.v1.summary.FileWriterCache.clear()\n shutil.rmtree(self._model_dir)\n\n def _test_complete_flow(self,\n train_input_fn,\n eval_input_fn,\n predict_input_fn,\n prediction_size,\n lr_decay=False):\n\n def make_opt():\n gstep = tf.compat.v1.train.get_or_create_global_step()\n lr = tf.compat.v1.train.exponential_decay(1.0, gstep, 10, 0.9)\n return tf.compat.v1.train.GradientDescentOptimizer(lr)\n\n gopt = make_opt if lr_decay else tf.compat.v1.train.GradientDescentOptimizer(\n 1.0)\n dopt = make_opt if lr_decay else tf.compat.v1.train.GradientDescentOptimizer(\n 1.0)\n est = tfgan.estimator.StarGANEstimator(\n generator_fn=dummy_generator_fn,\n discriminator_fn=dummy_discriminator_fn,\n loss_fn=dummy_loss_fn,\n generator_optimizer=gopt,\n discriminator_optimizer=dopt,\n get_eval_metric_ops_fn=get_metrics,\n model_dir=self._model_dir)\n\n # TRAIN\n num_steps = 10\n est.train(train_input_fn, steps=num_steps)\n\n # EVALUTE\n scores = est.evaluate(eval_input_fn)\n self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])\n self.assertIn('loss', six.iterkeys(scores))\n self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],\n scores['loss'])\n self.assertIn('mse_custom_metric', six.iterkeys(scores))\n\n # PREDICT\n predictions = np.array([x for x in est.predict(predict_input_fn)])\n\n self.assertAllEqual(prediction_size, predictions.shape)\n\n @staticmethod\n def _numpy_input_fn_wrapper(numpy_input_fn, batch_size, label_size):\n \"\"\"Wrapper to remove the dictionary in numpy_input_fn.\n\n NOTE:\n We create the domain_label here because the model expect a fully define\n batch_size from the input.\n\n Args:\n numpy_input_fn: input_fn created from numpy_io\n batch_size: (int) number of items for each batch\n label_size: (int) number of domains\n\n Returns:\n a new input_fn\n \"\"\"\n\n def new_input_fn():\n features = numpy_input_fn()\n return features['x'], tf.one_hot([0] * batch_size, label_size)\n\n return new_input_fn\n\n def test_numpy_input_fn(self):\n \"\"\"Tests complete flow with numpy_input_fn.\"\"\"\n batch_size = 5\n img_size = 8\n channel_size = 3\n label_size = 3\n image_data = np.zeros([batch_size, img_size, img_size, channel_size],\n dtype=np.float32)\n train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={'x': image_data},\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={'x': image_data}, batch_size=batch_size, shuffle=False)\n predict_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={'x': image_data}, shuffle=False)\n\n train_input_fn = self._numpy_input_fn_wrapper(train_input_fn, batch_size,\n label_size)\n eval_input_fn = self._numpy_input_fn_wrapper(eval_input_fn, batch_size,\n label_size)\n predict_input_fn = self._numpy_input_fn_wrapper(predict_input_fn,\n batch_size, label_size)\n\n predict_input_fn = tfgan.estimator.stargan_prediction_input_fn_wrapper(\n predict_input_fn)\n\n self._test_complete_flow(\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n prediction_size=[batch_size, img_size, img_size, channel_size])\n\n\nclass StarGANEstimatorParamsTest(tf.test.TestCase):\n\n def setUp(self):\n super(StarGANEstimatorParamsTest, self).setUp()\n self._model_dir = self.get_temp_dir()\n\n def tearDown(self):\n super(StarGANEstimatorParamsTest, self).tearDown()\n tf.compat.v1.summary.FileWriterCache.clear()\n\n def test_params_used(self):\n def train_input_fn(params):\n self.assertIn('batch_size', params)\n data = np.zeros([params['batch_size'], 4], dtype=np.float32)\n return data, data\n\n est = tfgan.estimator.StarGANEstimator(\n generator_fn=dummy_generator_fn,\n discriminator_fn=dummy_discriminator_fn,\n loss_fn=dummy_loss_fn,\n generator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(1.0),\n discriminator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(\n 1.0),\n model_dir=self._model_dir,\n params={'batch_size': 4})\n\n est.train(train_input_fn, steps=1)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2022 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Evaluates a TF-GAN trained CIFAR model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow.compat.v1 as tf\nimport tensorflow_gan as tfgan\nfrom tensorflow_gan.examples import evaluation_helper as evaluation\n\nfrom tensorflow_gan.examples.cifar import data_provider\nfrom tensorflow_gan.examples.cifar import networks\nfrom tensorflow_gan.examples.cifar import util\n\nHParams = collections.namedtuple('HParams', [\n 'master', 'checkpoint_dir', 'eval_dir', 'num_images_generated',\n 'num_inception_images', 'eval_real_images',\n 'eval_frechet_inception_distance', 'max_number_of_evaluations',\n 'write_to_disk'\n])\n\n\ndef evaluate(hparams, run_eval_loop=True):\n \"\"\"Runs an evaluation loop.\n\n Args:\n hparams: An HParams instance containing the eval hyperparameters.\n run_eval_loop: Whether to run the full eval loop. Set to False for testing.\n \"\"\"\n # Fetch and generate images to run through Inception.\n with tf.name_scope('inputs'):\n real_data, _ = data_provider.provide_data(\n 'test', hparams.num_images_generated, shuffle=False)\n generated_data = _get_generated_data(hparams.num_images_generated)\n\n # Compute Frechet Inception Distance.\n if hparams.eval_frechet_inception_distance:\n fid = util.get_frechet_inception_distance(real_data, generated_data,\n hparams.num_images_generated,\n hparams.num_inception_images)\n tf.summary.scalar('frechet_inception_distance', fid)\n\n # Compute normal Inception scores.\n if hparams.eval_real_images:\n inc_score = util.get_inception_scores(real_data,\n hparams.num_images_generated,\n hparams.num_inception_images)\n else:\n inc_score = util.get_inception_scores(generated_data,\n hparams.num_images_generated,\n hparams.num_inception_images)\n tf.summary.scalar('inception_score', inc_score)\n\n # Create ops that write images to disk.\n image_write_ops = None\n if hparams.num_images_generated >= 100 and hparams.write_to_disk:\n reshaped_imgs = tfgan.eval.image_reshaper(generated_data[:100], num_cols=10)\n uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)\n image_write_ops = tf.io.write_file(\n '%s/%s' % (hparams.eval_dir, 'unconditional_cifar10.png'),\n tf.image.encode_png(uint8_images[0]))\n\n # For unit testing, use `run_eval_loop=False`.\n if not run_eval_loop: return\n evaluation.evaluate_repeatedly(\n hparams.checkpoint_dir,\n master=hparams.master,\n hooks=[\n evaluation.SummaryAtEndHook(hparams.eval_dir),\n evaluation.StopAfterNEvalsHook(1)\n ],\n eval_ops=image_write_ops,\n max_number_of_evaluations=hparams.max_number_of_evaluations)\n\n\ndef _get_generated_data(num_images_generated):\n \"\"\"Get generated images.\"\"\"\n noise = tf.random.normal([num_images_generated, 64])\n generator_inputs = noise\n generator_fn = networks.generator\n # In order for variables to load, use the same variable scope as in the\n # train job.\n with tf.variable_scope('Generator'):\n data = generator_fn(generator_inputs, is_training=False)\n\n return data\n",
"# coding=utf-8\n# Copyright 2022 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Losses that are useful for training GANs.\n\nThe losses belong to two main groups, but there are others that do not:\n1) xxxxx_generator_loss\n2) xxxxx_discriminator_loss\n\nExample:\n1) wasserstein_generator_loss\n2) wasserstein_discriminator_loss\n\nOther example:\nwasserstein_gradient_penalty\n\nAll losses must be able to accept 1D or 2D Tensors, so as to be compatible with\npatchGAN style losses (https://arxiv.org/abs/1611.07004).\n\nTo make these losses usable in the TF-GAN framework, please create a tuple\nversion of the losses with `losses_utils.py`.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_gan.python import contrib_utils as contrib\n\n\n__all__ = [\n 'acgan_discriminator_loss',\n 'acgan_generator_loss',\n 'least_squares_discriminator_loss',\n 'least_squares_generator_loss',\n 'modified_discriminator_loss',\n 'modified_generator_loss',\n 'minimax_discriminator_loss',\n 'minimax_generator_loss',\n 'relativistic_discriminator_loss',\n 'relativistic_generator_loss',\n 'wasserstein_discriminator_loss',\n 'wasserstein_hinge_generator_loss',\n 'wasserstein_hinge_discriminator_loss',\n 'wasserstein_generator_loss',\n 'wasserstein_gradient_penalty',\n 'mutual_information_penalty',\n 'combine_adversarial_loss',\n 'cycle_consistency_loss',\n]\n\n\ndef _to_float(tensor):\n return tf.cast(tensor, tf.float32)\n\n\n# Wasserstein losses from `Wasserstein GAN` (https://arxiv.org/abs/1701.07875).\ndef wasserstein_generator_loss(\n discriminator_gen_outputs,\n weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Wasserstein generator loss for GANs.\n\n See `Wasserstein GAN` (https://arxiv.org/abs/1701.07875) for more details.\n\n Args:\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_gen_outputs`, and must be broadcastable to\n `discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or\n the same as the corresponding dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add detailed summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n \"\"\"\n with tf.compat.v1.name_scope(scope, 'generator_wasserstein_loss',\n (discriminator_gen_outputs, weights)) as scope:\n discriminator_gen_outputs = _to_float(discriminator_gen_outputs)\n\n loss = - discriminator_gen_outputs\n loss = tf.compat.v1.losses.compute_weighted_loss(loss, weights, scope,\n loss_collection, reduction)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('generator_wass_loss', loss)\n\n return loss\n\n\ndef wasserstein_discriminator_loss(\n discriminator_real_outputs,\n discriminator_gen_outputs,\n real_weights=1.0,\n generated_weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Wasserstein discriminator loss for GANs.\n\n See `Wasserstein GAN` (https://arxiv.org/abs/1701.07875) for more details.\n\n Args:\n discriminator_real_outputs: Discriminator output on real data.\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n real_weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_real_outputs`, and must be broadcastable to\n `discriminator_real_outputs` (i.e., all dimensions must be either `1`, or\n the same as the corresponding dimension).\n generated_weights: Same as `real_weights`, but for\n `discriminator_gen_outputs`.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n \"\"\"\n with tf.compat.v1.name_scope(\n scope, 'discriminator_wasserstein_loss',\n (discriminator_real_outputs, discriminator_gen_outputs, real_weights,\n generated_weights)) as scope:\n discriminator_real_outputs = _to_float(discriminator_real_outputs)\n discriminator_gen_outputs = _to_float(discriminator_gen_outputs)\n discriminator_real_outputs.shape.assert_is_compatible_with(\n discriminator_gen_outputs.shape)\n\n loss_on_generated = tf.compat.v1.losses.compute_weighted_loss(\n discriminator_gen_outputs,\n generated_weights,\n scope,\n loss_collection=None,\n reduction=reduction)\n loss_on_real = tf.compat.v1.losses.compute_weighted_loss(\n discriminator_real_outputs,\n real_weights,\n scope,\n loss_collection=None,\n reduction=reduction)\n loss = loss_on_generated - loss_on_real\n tf.compat.v1.losses.add_loss(loss, loss_collection)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('discriminator_gen_wass_loss',\n loss_on_generated)\n tf.compat.v1.summary.scalar('discriminator_real_wass_loss', loss_on_real)\n tf.compat.v1.summary.scalar('discriminator_wass_loss', loss)\n\n return loss\n\n\nwasserstein_hinge_generator_loss = wasserstein_generator_loss\nwasserstein_hinge_generator_loss.__name__ = 'wasserstein_hinge_generator_loss'\n\n\ndef wasserstein_hinge_discriminator_loss(\n discriminator_real_outputs,\n discriminator_gen_outputs,\n real_weights=1.0,\n generated_weights=1.0,\n real_hinge=1.0,\n generated_hinge=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Hinged wasserstein discriminator loss for GANs.\n\n See `Spectral Normalization for Generative Adversarial Networks`\n (https://arxiv.org/abs/1802.05957).\n\n Args:\n discriminator_real_outputs: Discriminator output on real data.\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n real_weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_real_outputs`, and must be broadcastable to\n `discriminator_real_outputs` (i.e., all dimensions must be either `1`, or\n the same as the corresponding dimension).\n generated_weights: Same as `real_weights`, but for\n `discriminator_gen_outputs`.\n real_hinge: Hinge for the logits from the real data.\n generated_hinge: Hinge for the logits from the generated data.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n \"\"\"\n with tf.compat.v1.name_scope(\n scope, 'discriminator_wasserstein_hinge_loss',\n (discriminator_real_outputs, discriminator_gen_outputs, real_weights,\n generated_weights)) as scope:\n discriminator_real_outputs = _to_float(discriminator_real_outputs)\n discriminator_gen_outputs = _to_float(discriminator_gen_outputs)\n discriminator_real_outputs.shape.assert_is_compatible_with(\n discriminator_gen_outputs.shape)\n\n # Compute the hinge.\n hinged_real = tf.nn.relu(real_hinge - discriminator_real_outputs)\n hinged_gen = tf.nn.relu(generated_hinge + discriminator_gen_outputs)\n\n # Average.\n loss_on_real = tf.compat.v1.losses.compute_weighted_loss(\n hinged_real,\n real_weights,\n scope,\n loss_collection=None,\n reduction=reduction)\n loss_on_generated = tf.compat.v1.losses.compute_weighted_loss(\n hinged_gen,\n generated_weights,\n scope,\n loss_collection=None,\n reduction=reduction)\n loss = loss_on_generated + loss_on_real\n tf.compat.v1.losses.add_loss(loss, loss_collection)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('discriminator_gen_wass_hinge_loss',\n loss_on_generated)\n tf.compat.v1.summary.scalar('discriminator_real_wass_hinge_loss',\n loss_on_real)\n tf.compat.v1.summary.scalar('discriminator_wass_hinge_loss', loss)\n\n return loss\n\n\n# ACGAN losses from `Conditional Image Synthesis With Auxiliary Classifier GANs`\n# (https://arxiv.org/abs/1610.09585).\ndef acgan_discriminator_loss(\n discriminator_real_classification_logits,\n discriminator_gen_classification_logits,\n one_hot_labels,\n label_smoothing=0.0,\n real_weights=1.0,\n generated_weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"ACGAN loss for the discriminator.\n\n The ACGAN loss adds a classification loss to the conditional discriminator.\n Therefore, the discriminator must output a tuple consisting of\n (1) the real/fake prediction and\n (2) the logits for the classification (usually the last conv layer,\n flattened).\n\n For more details:\n ACGAN: https://arxiv.org/abs/1610.09585\n\n Args:\n discriminator_real_classification_logits: Classification logits for real\n data.\n discriminator_gen_classification_logits: Classification logits for generated\n data.\n one_hot_labels: A Tensor holding one-hot labels for the batch.\n label_smoothing: A float in [0, 1]. If greater than 0, smooth the labels for\n \"discriminator on real data\" as suggested in\n https://arxiv.org/pdf/1701.00160\n real_weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_real_outputs`, and must be broadcastable to\n `discriminator_real_outputs` (i.e., all dimensions must be either `1`, or\n the same as the corresponding dimension).\n generated_weights: Same as `real_weights`, but for\n `discriminator_gen_classification_logits`.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. Shape depends on `reduction`.\n\n Raises:\n TypeError: If the discriminator does not output a tuple.\n \"\"\"\n with tf.compat.v1.name_scope(\n scope, 'acgan_discriminator_loss',\n (discriminator_real_classification_logits,\n discriminator_gen_classification_logits, one_hot_labels)) as scope:\n loss_on_generated = tf.compat.v1.losses.softmax_cross_entropy(\n one_hot_labels,\n discriminator_gen_classification_logits,\n weights=generated_weights,\n scope=scope,\n loss_collection=None,\n reduction=reduction)\n loss_on_real = tf.compat.v1.losses.softmax_cross_entropy(\n one_hot_labels,\n discriminator_real_classification_logits,\n weights=real_weights,\n label_smoothing=label_smoothing,\n scope=scope,\n loss_collection=None,\n reduction=reduction)\n loss = loss_on_generated + loss_on_real\n tf.compat.v1.losses.add_loss(loss, loss_collection)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('discriminator_gen_ac_loss',\n loss_on_generated)\n tf.compat.v1.summary.scalar('discriminator_real_ac_loss', loss_on_real)\n tf.compat.v1.summary.scalar('discriminator_ac_loss', loss)\n\n return loss\n\n\ndef acgan_generator_loss(\n discriminator_gen_classification_logits,\n one_hot_labels,\n weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"ACGAN loss for the generator.\n\n The ACGAN loss adds a classification loss to the conditional discriminator.\n Therefore, the discriminator must output a tuple consisting of\n (1) the real/fake prediction and\n (2) the logits for the classification (usually the last conv layer,\n flattened).\n\n For more details:\n ACGAN: https://arxiv.org/abs/1610.09585\n\n Args:\n discriminator_gen_classification_logits: Classification logits for generated\n data.\n one_hot_labels: A Tensor holding one-hot labels for the batch.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_gen_classification_logits`, and must be broadcastable to\n `discriminator_gen_classification_logits` (i.e., all dimensions must be\n either `1`, or the same as the corresponding dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. Shape depends on `reduction`.\n\n Raises:\n ValueError: if arg module not either `generator` or `discriminator`\n TypeError: if the discriminator does not output a tuple.\n \"\"\"\n with tf.compat.v1.name_scope(\n scope, 'acgan_generator_loss',\n (discriminator_gen_classification_logits, one_hot_labels)) as scope:\n loss = tf.compat.v1.losses.softmax_cross_entropy(\n one_hot_labels,\n discriminator_gen_classification_logits,\n weights=weights,\n scope=scope,\n loss_collection=loss_collection,\n reduction=reduction)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('generator_ac_loss', loss)\n\n return loss\n\n\n# Wasserstein Gradient Penalty losses from `Improved Training of Wasserstein\n# GANs` (https://arxiv.org/abs/1704.00028).\n\n\ndef wasserstein_gradient_penalty(\n real_data,\n generated_data,\n generator_inputs,\n discriminator_fn,\n discriminator_scope,\n epsilon=1e-10,\n target=1.0,\n one_sided=False,\n weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"The gradient penalty for the Wasserstein discriminator loss.\n\n See `Improved Training of Wasserstein GANs`\n (https://arxiv.org/abs/1704.00028) for more details.\n\n Args:\n real_data: Real data.\n generated_data: Output of the generator.\n generator_inputs: Exact argument to pass to the generator, which is used\n as optional conditioning to the discriminator.\n discriminator_fn: A discriminator function that conforms to TF-GAN API.\n discriminator_scope: If not `None`, reuse discriminators from this scope.\n epsilon: A small positive number added for numerical stability when\n computing the gradient norm.\n target: Optional Python number or `Tensor` indicating the target value of\n gradient norm. Defaults to 1.0.\n one_sided: If `True`, penalty proposed in https://arxiv.org/abs/1709.08894\n is used. Defaults to `False`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `real_data` and `generated_data`, and must be broadcastable to\n them (i.e., all dimensions must be either `1`, or the same as the\n corresponding dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n\n Raises:\n ValueError: If the rank of data Tensors is unknown.\n RuntimeError: If TensorFlow is executing eagerly.\n \"\"\"\n if tf.executing_eagerly():\n raise RuntimeError('Can\\'t use `tf.gradient` when executing eagerly.')\n with tf.compat.v1.name_scope(scope, 'wasserstein_gradient_penalty',\n (real_data, generated_data)) as scope:\n real_data = tf.convert_to_tensor(value=real_data)\n generated_data = tf.convert_to_tensor(value=generated_data)\n if real_data.shape.ndims is None:\n raise ValueError('`real_data` can\\'t have unknown rank.')\n if generated_data.shape.ndims is None:\n raise ValueError('`generated_data` can\\'t have unknown rank.')\n\n differences = generated_data - real_data\n batch_size = (tf.compat.dimension_value(differences.shape.dims[0]) or\n tf.shape(input=differences)[0])\n alpha_shape = [batch_size] + [1] * (differences.shape.ndims - 1)\n alpha = tf.random.uniform(shape=alpha_shape)\n interpolates = real_data + (alpha * differences)\n\n with tf.compat.v1.name_scope(\n ''): # Clear scope so update ops are added properly.\n # Reuse variables if variables already exists.\n with tf.compat.v1.variable_scope(\n discriminator_scope, 'gpenalty_dscope',\n reuse=tf.compat.v1.AUTO_REUSE):\n disc_interpolates = discriminator_fn(interpolates, generator_inputs)\n\n if isinstance(disc_interpolates, tuple):\n # ACGAN case: disc outputs more than one tensor\n disc_interpolates = disc_interpolates[0]\n\n gradients = tf.gradients(ys=disc_interpolates, xs=interpolates)[0]\n gradient_squares = tf.reduce_sum(\n input_tensor=tf.square(gradients),\n axis=list(range(1, gradients.shape.ndims)))\n # Propagate shape information, if possible.\n if isinstance(batch_size, int):\n gradient_squares.set_shape([\n batch_size] + gradient_squares.shape.as_list()[1:])\n # For numerical stability, add epsilon to the sum before taking the square\n # root. Note tf.norm does not add epsilon.\n slopes = tf.sqrt(gradient_squares + epsilon)\n penalties = slopes / target - 1.0\n if one_sided:\n penalties = tf.maximum(0., penalties)\n penalties_squared = tf.square(penalties)\n penalty = tf.compat.v1.losses.compute_weighted_loss(\n penalties_squared,\n weights,\n scope=scope,\n loss_collection=loss_collection,\n reduction=reduction)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('gradient_penalty_loss', penalty)\n\n return penalty\n\n\n# Original losses from `Generative Adversarial Nets`\n# (https://arxiv.org/abs/1406.2661).\n\n\ndef minimax_discriminator_loss(\n discriminator_real_outputs,\n discriminator_gen_outputs,\n label_smoothing=0.25,\n real_weights=1.0,\n generated_weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Original minimax discriminator loss for GANs, with label smoothing.\n\n Note that the authors don't recommend using this loss. A more practically\n useful loss is `modified_discriminator_loss`.\n\n L = - real_weights * log(sigmoid(D(x)))\n - generated_weights * log(1 - sigmoid(D(G(z))))\n\n See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more\n details.\n\n Args:\n discriminator_real_outputs: Discriminator output on real data.\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n label_smoothing: The amount of smoothing for positive labels. This technique\n is taken from `Improved Techniques for Training GANs`\n (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.\n real_weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `real_data`, and must be broadcastable to `real_data` (i.e., all\n dimensions must be either `1`, or the same as the corresponding\n dimension).\n generated_weights: Same as `real_weights`, but for `generated_data`.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n \"\"\"\n with tf.compat.v1.name_scope(\n scope, 'discriminator_minimax_loss',\n (discriminator_real_outputs, discriminator_gen_outputs, real_weights,\n generated_weights, label_smoothing)) as scope:\n\n # -log((1 - label_smoothing) - sigmoid(D(x)))\n loss_on_real = tf.compat.v1.losses.sigmoid_cross_entropy(\n tf.ones_like(discriminator_real_outputs),\n discriminator_real_outputs,\n real_weights,\n label_smoothing,\n scope,\n loss_collection=None,\n reduction=reduction)\n # -log(- sigmoid(D(G(x))))\n loss_on_generated = tf.compat.v1.losses.sigmoid_cross_entropy(\n tf.zeros_like(discriminator_gen_outputs),\n discriminator_gen_outputs,\n generated_weights,\n scope=scope,\n loss_collection=None,\n reduction=reduction)\n\n loss = loss_on_real + loss_on_generated\n tf.compat.v1.losses.add_loss(loss, loss_collection)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('discriminator_gen_minimax_loss',\n loss_on_generated)\n tf.compat.v1.summary.scalar('discriminator_real_minimax_loss',\n loss_on_real)\n tf.compat.v1.summary.scalar('discriminator_minimax_loss', loss)\n\n return loss\n\n\ndef minimax_generator_loss(\n discriminator_gen_outputs,\n label_smoothing=0.0,\n weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Original minimax generator loss for GANs.\n\n Note that the authors don't recommend using this loss. A more practically\n useful loss is `modified_generator_loss`.\n\n L = log(sigmoid(D(x))) + log(1 - sigmoid(D(G(z))))\n\n See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more\n details.\n\n Args:\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n label_smoothing: The amount of smoothing for positive labels. This technique\n is taken from `Improved Techniques for Training GANs`\n (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_gen_outputs`, and must be broadcastable to\n `discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or\n the same as the corresponding dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n \"\"\"\n with tf.compat.v1.name_scope(scope, 'generator_minimax_loss') as scope:\n loss = - minimax_discriminator_loss(\n tf.ones_like(discriminator_gen_outputs),\n discriminator_gen_outputs, label_smoothing, weights, weights, scope,\n loss_collection, reduction, add_summaries=False)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('generator_minimax_loss', loss)\n\n return loss\n\n\ndef modified_discriminator_loss(\n discriminator_real_outputs,\n discriminator_gen_outputs,\n label_smoothing=0.25,\n real_weights=1.0,\n generated_weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Same as minimax discriminator loss.\n\n See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more\n details.\n\n Args:\n discriminator_real_outputs: Discriminator output on real data.\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n label_smoothing: The amount of smoothing for positive labels. This technique\n is taken from `Improved Techniques for Training GANs`\n (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.\n real_weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_gen_outputs`, and must be broadcastable to\n `discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or\n the same as the corresponding dimension).\n generated_weights: Same as `real_weights`, but for\n `discriminator_gen_outputs`.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n \"\"\"\n return minimax_discriminator_loss(\n discriminator_real_outputs,\n discriminator_gen_outputs,\n label_smoothing,\n real_weights,\n generated_weights,\n scope or 'discriminator_modified_loss',\n loss_collection,\n reduction,\n add_summaries)\n\n\ndef modified_generator_loss(\n discriminator_gen_outputs,\n label_smoothing=0.0,\n weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Modified generator loss for GANs.\n\n L = -log(sigmoid(D(G(z))))\n\n This is the trick used in the original paper to avoid vanishing gradients\n early in training. See `Generative Adversarial Nets`\n (https://arxiv.org/abs/1406.2661) for more details.\n\n Args:\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n label_smoothing: The amount of smoothing for positive labels. This technique\n is taken from `Improved Techniques for Training GANs`\n (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_gen_outputs`, and must be broadcastable to `labels` (i.e.,\n all dimensions must be either `1`, or the same as the corresponding\n dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n \"\"\"\n with tf.compat.v1.name_scope(scope, 'generator_modified_loss',\n [discriminator_gen_outputs]) as scope:\n loss = tf.compat.v1.losses.sigmoid_cross_entropy(\n tf.ones_like(discriminator_gen_outputs), discriminator_gen_outputs,\n weights, label_smoothing, scope, loss_collection, reduction)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('generator_modified_loss', loss)\n\n return loss\n\n\n# Least Squares loss from `Least Squares Generative Adversarial Networks`\n# (https://arxiv.org/abs/1611.04076).\n\n\ndef least_squares_generator_loss(\n discriminator_gen_outputs,\n real_label=1,\n weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Least squares generator loss.\n\n This loss comes from `Least Squares Generative Adversarial Networks`\n (https://arxiv.org/abs/1611.04076).\n\n L = 1/2 * (D(G(z)) - `real_label`) ** 2\n\n where D(y) are discriminator logits.\n\n Args:\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n real_label: The value that the generator is trying to get the discriminator\n to output on generated data.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_gen_outputs`, and must be broadcastable to\n `discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or\n the same as the corresponding dimension).\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n \"\"\"\n with tf.compat.v1.name_scope(\n scope, 'lsq_generator_loss',\n (discriminator_gen_outputs, real_label)) as scope:\n discriminator_gen_outputs = _to_float(discriminator_gen_outputs)\n loss = tf.math.squared_difference(discriminator_gen_outputs,\n real_label) / 2.0\n loss = tf.compat.v1.losses.compute_weighted_loss(loss, weights, scope,\n loss_collection, reduction)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('generator_lsq_loss', loss)\n\n return loss\n\n\ndef least_squares_discriminator_loss(\n discriminator_real_outputs,\n discriminator_gen_outputs,\n real_label=1,\n fake_label=0,\n real_weights=1.0,\n generated_weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Least squares discriminator loss.\n\n This loss comes from `Least Squares Generative Adversarial Networks`\n (https://arxiv.org/abs/1611.04076).\n\n L = 1/2 * (D(x) - `real`) ** 2 +\n 1/2 * (D(G(z)) - `fake_label`) ** 2\n\n where D(y) are discriminator logits.\n\n Args:\n discriminator_real_outputs: Discriminator output on real data.\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n real_label: The value that the discriminator tries to output for real data.\n fake_label: The value that the discriminator tries to output for fake data.\n real_weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `discriminator_real_outputs`, and must be broadcastable to\n `discriminator_real_outputs` (i.e., all dimensions must be either `1`, or\n the same as the corresponding dimension).\n generated_weights: Same as `real_weights`, but for\n `discriminator_gen_outputs`.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A loss Tensor. The shape depends on `reduction`.\n \"\"\"\n with tf.compat.v1.name_scope(\n scope, 'lsq_discriminator_loss',\n (discriminator_gen_outputs, real_label)) as scope:\n discriminator_real_outputs = _to_float(discriminator_real_outputs)\n discriminator_gen_outputs = _to_float(discriminator_gen_outputs)\n discriminator_real_outputs.shape.assert_is_compatible_with(\n discriminator_gen_outputs.shape)\n\n real_losses = tf.math.squared_difference(discriminator_real_outputs,\n real_label) / 2.0\n fake_losses = tf.math.squared_difference(discriminator_gen_outputs,\n fake_label) / 2.0\n\n loss_on_real = tf.compat.v1.losses.compute_weighted_loss(\n real_losses,\n real_weights,\n scope,\n loss_collection=None,\n reduction=reduction)\n loss_on_generated = tf.compat.v1.losses.compute_weighted_loss(\n fake_losses,\n generated_weights,\n scope,\n loss_collection=None,\n reduction=reduction)\n\n loss = loss_on_real + loss_on_generated\n tf.compat.v1.losses.add_loss(loss, loss_collection)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('discriminator_gen_lsq_loss', loss_on_generated)\n tf.compat.v1.summary.scalar('discriminator_real_lsq_loss', loss_on_real)\n tf.compat.v1.summary.scalar('discriminator_lsq_loss', loss)\n\n return loss\n\n\n# InfoGAN loss from `InfoGAN: Interpretable Representation Learning by\n# `Information Maximizing Generative Adversarial Nets`\n# https://arxiv.org/abs/1606.03657\n\n\ndef _validate_distributions(distributions):\n \"\"\"Check that input is a distribution.\"\"\"\n if not isinstance(distributions, (list, tuple)):\n raise ValueError('`distributions` must be a list or tuple. Instead, '\n 'found %s.' % type(distributions))\n for x in distributions:\n # We used to check with `isinstance(x, tf.distributions.Distribution)`.\n # However, distributions have migrated to `tfp.distributions.Distribution`,\n # which is a new code repo, so we can't check this way anymore until\n # TF-GAN is migrated to a new repo as well.\n # This new check is not sufficient, but is a useful heuristic for now.\n if not callable(getattr(x, 'log_prob', None)):\n raise ValueError('`distributions` must be a list of `Distributions`. '\n 'Instead, found %s.' % type(x))\n\n\ndef _validate_information_penalty_inputs(\n structured_generator_inputs, predicted_distributions):\n \"\"\"Validate input to `mutual_information_penalty`.\"\"\"\n _validate_distributions(predicted_distributions)\n if len(structured_generator_inputs) != len(predicted_distributions):\n raise ValueError('`structured_generator_inputs` length %i must be the same '\n 'as `predicted_distributions` length %i.' % (\n len(structured_generator_inputs),\n len(predicted_distributions)))\n\n\ndef mutual_information_penalty(\n structured_generator_inputs,\n predicted_distributions,\n weights=1.0,\n scope=None,\n loss_collection=tf.compat.v1.GraphKeys.LOSSES,\n reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,\n add_summaries=False):\n \"\"\"Returns a penalty on the mutual information in an InfoGAN model.\n\n This loss comes from an InfoGAN paper https://arxiv.org/abs/1606.03657.\n\n Args:\n structured_generator_inputs: A list of Tensors representing the random noise\n that must have high mutual information with the generator output. List\n length should match `predicted_distributions`.\n predicted_distributions: A list of `tfp.distributions.Distribution`s.\n Predicted by the recognizer, and used to evaluate the likelihood of the\n structured noise. List length should match `structured_generator_inputs`.\n weights: Optional `Tensor` whose rank is either 0, or the same dimensions as\n `structured_generator_inputs`.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which this loss will be added.\n reduction: A `tf.losses.Reduction` to apply to loss.\n add_summaries: Whether or not to add summaries for the loss.\n\n Returns:\n A scalar Tensor representing the mutual information loss.\n \"\"\"\n _validate_information_penalty_inputs(\n structured_generator_inputs, predicted_distributions)\n\n with tf.compat.v1.name_scope(scope, 'mutual_information_loss') as scope:\n # Calculate the negative log-likelihood of the reconstructed noise.\n log_probs = [\n tf.reduce_mean(input_tensor=dist.log_prob(noise)) for dist, noise in\n zip(predicted_distributions, structured_generator_inputs)\n ]\n loss = -1 * tf.compat.v1.losses.compute_weighted_loss(\n log_probs,\n weights,\n scope,\n loss_collection=loss_collection,\n reduction=reduction)\n\n if add_summaries:\n tf.compat.v1.summary.scalar('mutual_information_penalty', loss)\n\n return loss\n\n\ndef numerically_stable_global_norm(tensor_list):\n \"\"\"Compute the global norm of a list of Tensors, with improved stability.\n\n The global norm computation sometimes overflows due to the intermediate L2\n step. To avoid this, we divide by a cheap-to-compute max over the\n matrix elements.\n\n Args:\n tensor_list: A list of tensors, or `None`.\n\n Returns:\n A scalar tensor with the global norm.\n \"\"\"\n if all(x is None for x in tensor_list):\n return 0.0\n\n list_max = tf.reduce_max(input_tensor=[\n tf.reduce_max(input_tensor=tf.abs(x))\n for x in tensor_list\n if x is not None\n ])\n return list_max * tf.linalg.global_norm(\n [x / list_max for x in tensor_list if x is not None])\n\n\ndef _used_weight(weights_list):\n for weight in weights_list:\n if weight is not None:\n return tf.get_static_value(tf.convert_to_tensor(value=weight))\n\n\ndef _validate_args(weight_factor, gradient_ratio):\n if weight_factor is None and gradient_ratio is None:\n raise ValueError(\n '`weight_factor` and `gradient_ratio` cannot both be `None.`')\n if weight_factor is not None and gradient_ratio is not None:\n raise ValueError(\n '`weight_factor` and `gradient_ratio` cannot both be specified.')\n\n\n# TODO(joelshor): Add ability to pass in gradients, to avoid recomputing.\ndef combine_adversarial_loss(main_loss,\n adversarial_loss,\n weight_factor=None,\n gradient_ratio=None,\n gradient_ratio_epsilon=1e-6,\n variables=None,\n scalar_summaries=True,\n gradient_summaries=True,\n scope=None):\n \"\"\"Utility to combine main and adversarial losses.\n\n This utility combines the main and adversarial losses in one of two ways.\n 1) Fixed coefficient on adversarial loss. Use `weight_factor` in this case.\n 2) Fixed ratio of gradients. Use `gradient_ratio` in this case. This is often\n used to make sure both losses affect weights roughly equally, as in\n https://arxiv.org/pdf/1705.05823.\n\n One can optionally also visualize the scalar and gradient behavior of the\n losses.\n\n Args:\n main_loss: A float Tensor of any shape, indicating the main loss. The size\n of the first dimension must be the same as the first dimension of\n adversarial_loss. If main_loss and adversarial_loss are not compatible\n shapes, both will be mean-reduced to just their first dimension (assumed\n to be the batch dimension).\n adversarial_loss: A float Tensor of any shape, indicating the adversarial\n loss. The size of the first dimension must be the same as the first\n dimension of main_loss. If main_loss and adversarial_loss are not\n compatible shapes, both will be mean-reduced to just their first dimension\n (assumed to be the batch dimension).\n weight_factor: If not `None`, the coefficient by which to multiply the\n adversarial loss. Exactly one of this and `gradient_ratio` must be\n non-None.\n gradient_ratio: If not `None`, the ratio of the magnitude of the gradients.\n Specifically,\n gradient_ratio = grad_mag(main_loss) / grad_mag(adversarial_loss)\n Exactly one of this and `weight_factor` must be non-None.\n gradient_ratio_epsilon: An epsilon to add to the adversarial loss\n coefficient denominator, to avoid division-by-zero.\n variables: List of variables to calculate gradients with respect to. If not\n present, defaults to all trainable variables.\n scalar_summaries: Create scalar summaries of losses. If main_loss and\n adversarial_loss are not scalars, they will be mean-reduced to scalars for\n summary computation.\n gradient_summaries: Create gradient summaries of losses.\n scope: Optional name scope.\n\n Returns:\n A float Tensor indicating the desired combined loss. If main_loss and\n adversarial_loss are both scalars then this will also be a scalar, otherwise\n it will be of shape [main_loss.shape[0]].\n\n Raises:\n ValueError: Malformed input.\n RuntimeError: If `tf.gradients` require computing, and TensorFlow is\n executing eagerly.\n \"\"\"\n _validate_args(weight_factor, gradient_ratio)\n if variables is None:\n variables = contrib.get_trainable_variables()\n\n with tf.compat.v1.name_scope(\n scope, 'adversarial_loss', values=[main_loss, adversarial_loss]):\n # If losses are not the same shape, reduce them to both be shape [batch,].\n if not main_loss.shape.is_compatible_with(adversarial_loss.shape):\n if main_loss.shape[0] != adversarial_loss.shape[0]:\n raise ValueError(\n 'main_loss and adversarial_loss must have the same sized first '\n 'dimension. Found %d and %d.' %\n (main_loss.shape[0], adversarial_loss.shape[0]))\n tf.compat.v1.logging.warning(\n 'Applying mean reduction per-batch-element to main and adversarial '\n 'losses to make shapes compatible. If this is undesirable, ensure '\n 'that the shapes are compatible before passing them into '\n 'combine_adversarial_loss.')\n main_loss = tf.math.reduce_mean(\n input_tensor=main_loss, axis=list(range(1, main_loss.shape.rank)))\n adversarial_loss = tf.math.reduce_mean(\n input_tensor=adversarial_loss,\n axis=list(range(1, adversarial_loss.shape.rank)))\n\n # Compute gradients if we will need them.\n if gradient_summaries or gradient_ratio is not None:\n # `tf.gradients` doesn't work in eager.\n if tf.executing_eagerly():\n raise RuntimeError('`tf.gradients` doesn\\'t work in eager.')\n main_loss_grad_mag = numerically_stable_global_norm(\n tf.gradients(ys=main_loss, xs=variables))\n adv_loss_grad_mag = numerically_stable_global_norm(\n tf.gradients(ys=adversarial_loss, xs=variables))\n\n # Add summaries, if applicable.\n if scalar_summaries:\n tf.compat.v1.summary.scalar('main_loss',\n tf.math.reduce_mean(input_tensor=main_loss))\n tf.compat.v1.summary.scalar(\n 'adversarial_loss',\n tf.math.reduce_mean(input_tensor=adversarial_loss))\n if gradient_summaries:\n tf.compat.v1.summary.scalar('main_loss_gradients', main_loss_grad_mag)\n tf.compat.v1.summary.scalar('adversarial_loss_gradients',\n adv_loss_grad_mag)\n\n # Combine losses in the appropriate way.\n # If `weight_factor` is always `0`, avoid computing the adversarial loss\n # tensor entirely.\n if _used_weight((weight_factor, gradient_ratio)) == 0:\n final_loss = main_loss\n elif weight_factor is not None:\n final_loss = (main_loss +\n tf.stop_gradient(weight_factor) * adversarial_loss)\n elif gradient_ratio is not None:\n grad_mag_ratio = main_loss_grad_mag / (\n adv_loss_grad_mag + gradient_ratio_epsilon)\n adv_coeff = grad_mag_ratio / gradient_ratio\n tf.compat.v1.summary.scalar('adversarial_coefficient', adv_coeff)\n final_loss = (main_loss +\n tf.stop_gradient(adv_coeff) * adversarial_loss)\n\n return final_loss\n\n\ndef cycle_consistency_loss(data_x,\n reconstructed_data_x,\n data_y,\n reconstructed_data_y,\n scope=None,\n add_summaries=False):\n \"\"\"Defines the cycle consistency loss.\n\n The cyclegan model has two partial models where `model_x2y` generator F maps\n data set X to Y, `model_y2x` generator G maps data set Y to X. For a `data_x`\n in data set X, we could reconstruct it by\n * reconstructed_data_x = G(F(data_x))\n Similarly\n * reconstructed_data_y = F(G(data_y))\n\n The cycle consistency loss is about the difference between data and\n reconstructed data, namely\n * loss_x2x = |data_x - G(F(data_x))| (L1-norm)\n * loss_y2y = |data_y - F(G(data_y))| (L1-norm)\n * loss = (loss_x2x + loss_y2y) / 2\n where `loss` is the final result.\n\n For the L1-norm, we follow the original implementation:\n https://github.com/junyanz/CycleGAN/blob/master/models/cycle_gan_model.lua\n we use L1-norm of pixel-wise error normalized by data size such that\n `cycle_loss_weight` can be specified independent of image size.\n\n See https://arxiv.org/abs/1703.10593 for more details.\n\n Args:\n data_x: A `Tensor` of data X.\n reconstructed_data_x: A `Tensor` of reconstructed data X.\n data_y: A `Tensor` of data Y.\n reconstructed_data_y: A `Tensor` of reconstructed data Y.\n scope: The scope for the operations performed in computing the loss.\n Defaults to None.\n add_summaries: Whether or not to add detailed summaries for the loss.\n Defaults to False.\n\n Returns:\n A scalar `Tensor` of cycle consistency loss.\n \"\"\"\n\n with tf.compat.v1.name_scope(\n scope,\n 'cycle_consistency_loss',\n values=[data_x, reconstructed_data_x, data_y, reconstructed_data_y]):\n loss_x2x = tf.compat.v1.losses.absolute_difference(data_x,\n reconstructed_data_x)\n loss_y2y = tf.compat.v1.losses.absolute_difference(data_y,\n reconstructed_data_y)\n loss = (loss_x2x + loss_y2y) / 2.0\n if add_summaries:\n tf.compat.v1.summary.scalar('cycle_consistency_loss_x2x', loss_x2x)\n tf.compat.v1.summary.scalar('cycle_consistency_loss_y2y', loss_y2y)\n tf.compat.v1.summary.scalar('cycle_consistency_loss', loss)\n\n return loss\n\n\ndef relativistic_discriminator_loss(discriminator_real_outputs,\n discriminator_gen_outputs,\n scope=None):\n \"\"\"Relativistic Average GAN discriminator loss.\n\n This loss introduced in `The relativistic discriminator: a key element missing\n from standard GAN` (https://arxiv.org/abs/1807.00734).\n\n D_ra(x, y) = D(x) - E[D(y)]\n L = E[log(D_ra(real, fake))] - E[log(1 - D_ra(fake, real)]\n\n where D(x) and D(y) are discriminator logits, E[] represents the operation\n of taking average for all data in a mini-batch.\n\n Args:\n discriminator_real_outputs: Discriminator output on real data.\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n scope: The scope for the operations performed in computing the loss.\n Returns:\n A loss Tensor.\n \"\"\"\n with tf.compat.v1.name_scope(\n scope,\n 'relativistic_discriminator_loss',\n values=[discriminator_real_outputs, discriminator_gen_outputs]):\n\n def get_logits(x, y):\n return x - tf.reduce_mean(y)\n\n real_logits = get_logits(discriminator_real_outputs,\n discriminator_gen_outputs)\n gen_logits = get_logits(discriminator_gen_outputs,\n discriminator_real_outputs)\n\n real_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=tf.ones_like(real_logits), logits=real_logits))\n gen_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=tf.zeros_like(gen_logits), logits=gen_logits))\n\n return real_loss + gen_loss\n\n\ndef relativistic_generator_loss(discriminator_real_outputs,\n discriminator_gen_outputs,\n scope=None):\n \"\"\"Relativistic Average GAN generator loss.\n\n This loss introduced in `The relativistic discriminator: a key element missing\n from standard GAN` (https://arxiv.org/abs/1807.00734).\n\n D_ra(x, y) = D(x) - E[D(y)]\n L = E[log(1 - D_ra(real, fake))] - E[log(D_ra(fake, real)]\n\n where D(x) and D(y) are discriminator logits, E[] represents the operation\n of taking average for all data in a mini-batch.\n\n Args:\n discriminator_real_outputs: Discriminator output on real data.\n discriminator_gen_outputs: Discriminator output on generated data. Expected\n to be in the range of (-inf, inf).\n scope: The scope for the operations performed in computing the loss.\n\n Returns:\n A loss Tensor.\n \"\"\"\n with tf.compat.v1.name_scope(\n scope,\n 'relativistic_generator_loss',\n values=[discriminator_real_outputs, discriminator_gen_outputs]):\n\n def get_logits(x, y):\n return x - tf.reduce_mean(y)\n\n real_logits = get_logits(discriminator_real_outputs,\n discriminator_gen_outputs)\n gen_logits = get_logits(discriminator_gen_outputs,\n discriminator_real_outputs)\n\n real_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=tf.zeros_like(real_logits), logits=real_logits))\n gen_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=tf.ones_like(gen_logits), logits=gen_logits))\n\n return real_loss + gen_loss\n"
] | [
[
"tensorflow.compat.v1.disable_v2_behavior"
],
[
"tensorflow.Graph",
"tensorflow.reduce_mean",
"tensorflow.compat.v1.estimator.inputs.numpy_input_fn",
"tensorflow.reduce_sum",
"tensorflow.compat.v1.metrics.mean_squared_error",
"tensorflow.compat.v1.get_variable",
"tensorflow.test.main",
"tensorflow.ones",
"tensorflow.compat.v1.summary.FileWriterCache.clear",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.train.exponential_decay",
"tensorflow.compat.v1.layers.flatten",
"numpy.zeros",
"tensorflow.one_hot",
"numpy.array",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.layers.dense",
"tensorflow.compat.v1.train.GradientDescentOptimizer"
],
[
"tensorflow.compat.v1.image.encode_png",
"tensorflow.compat.v1.random.normal",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.name_scope"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.compat.v1.losses.compute_weighted_loss",
"tensorflow.cast",
"tensorflow.compat.v1.logging.warning",
"tensorflow.compat.v1.losses.absolute_difference",
"tensorflow.gradients",
"tensorflow.stop_gradient",
"tensorflow.linalg.global_norm",
"tensorflow.square",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.name_scope",
"tensorflow.executing_eagerly",
"tensorflow.shape",
"tensorflow.random.uniform",
"tensorflow.compat.dimension_value",
"tensorflow.zeros_like",
"tensorflow.compat.v1.losses.softmax_cross_entropy",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.nn.relu",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"tensorflow.math.squared_difference",
"tensorflow.ones_like",
"tensorflow.math.reduce_mean",
"tensorflow.sqrt",
"tensorflow.compat.v1.losses.add_loss",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
fmitch/incubator-tvm | [
"67e3f437af90724a5af0bff67d033d47c8a2edf7"
] | [
"experiments/dv_search_matmul.py"
] | [
"import logging\nimport time\nimport sys\nimport os\nimport numpy as np\nfrom multiprocessing import Pool, cpu_count\nimport random\nimport string\nfrom tensors import *\n\nimport pickle\n\nimport tvm\nimport topi\nfrom topi.testing import conv2d_nchw_python\nfrom tvm import te\nfrom tvm import autotvm\nfrom tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner, DataVolumeTuner\nimport tvm.contrib.graph_runtime as runtime\n#from tvm.autotvm.task.topi_integration import deserialize_args\nfrom collections import namedtuple\nfrom itertools import permutations\n\nimport argparse\n\n#import logging\n#logging.getLogger('autotvm').setLevel(logging.DEBUG)\n#logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))\n\nglobal num_threads\nnum_threads = 32\nos.environ[\"TVM_NUM_THREADS\"] = str(num_threads)\n\nletters = string.digits + string.ascii_letters\n\n\ndef get_matmul_dv(ind):\n config = task.config_space.get(ind)\n d_foot, d_vol = autotvm.tuner.data_volume_estimator.estimate_dv(*get_matmul_extents_info(M,N,K,config,matmul_index))\n return -1*(d_vol[2][:,:,-1].sum(axis=0) * np.array([64/100e9, 64/44e9, 64/25e9])).sum()\n\ndef concurrency_ratio(ind):\n config = task.config_space.get(ind)\n mo_value = np.ceil(M / config['tile_m'].size[-1])\n no_value = np.ceil(N / config['tile_n'].size[-1])\n\n concurrency = mo_value * no_value\n\n return np.floor(concurrency/num_threads) / np.ceil(concurrency/num_threads)\n\ndef get_dv(ind):\n config = task.config_space.get(ind)\n d_foot, d_vol = autotvm.tuner.data_volume_estimator.estimate_dv(*get_extents_info(config))\n return -1*(d_vol[2][:,:,-1].sum(axis=0) * np.array([64/100e9, 64/44e9, 64/25e9])).sum()\n\ndef limited_test(ind):\n tic = time.time()\n lower_llvm_limit = 1\n upper_llvm_limit = 2\n lower_asm_limit = 0.5\n upper_asm_limit = 2\n results = []\n config = task.config_space.get(ind)\n with autotvm.ApplyConfig(config):\n with tvm.target.create(\"llvm -mcpu=core-avx2\"):\n s, arg_bufs = task.func(*task.args)\n op_func = tvm.build(s, arg_bufs)\n build_time = time.time() - tic\n\n ll_source = op_func.get_source()\n\n funcs = ll_source.split('\\n\\n')\n llvm_opint = 0\n asm_opint = 0\n length = 0\n for func in funcs:\n if 'fmuladd.v' in func and len(func) > length:\n length = len(func)\n longest = func\n\n loads = 0\n stores = 0\n fmas = 0\n if length > 0:\n lines = longest.split('\\n')\n for line in lines:\n if 'load <' in line:\n loads += 1\n elif 'store <' in line:\n stores += 1\n elif 'fmuladd.v8' in line:\n fmas += 1\n if loads+stores > 0:\n llvm_opint = fmas / (loads+stores)\n\n if llvm_opint >= lower_llvm_limit and llvm_opint <= upper_llvm_limit:\n tic = time.time()\n asm_source = op_func.get_source('asm')\n asm_time = time.time() - tic\n\n\n funcs = asm_source.split(':\\n')\n length = 0\n for func in funcs:\n if 'vfmadd' in func and len(func) > length:\n length = len(func)\n longest = func\n moves = 0\n fmas = 0\n if length > 0:\n lines = longest.split('\\n')\n for line in lines:\n if 'vmov' in line and 'ymm' in line:\n moves += 1\n elif 'vfmadd' in line and 'ymm' in line:\n fmas += 1\n if '(%r' in line:\n moves += 1\n if moves > 0:\n asm_opint = fmas / moves\n\n if asm_opint >= lower_asm_limit and asm_opint <= upper_asm_limit:\n module_file = os.path.join('/tmp/', ''.join(random.choice(letters) for i in range(10)) + '.o')\n op_func.save(module_file)\n return module_file, llvm_opint, asm_opint, ind,build_time, asm_time\n\n return '', llvm_opint, asm_opint, ind, build_time, 0\n\n\ndef eval_time(ind, module_file):\n config = task.config_space.get(ind)\n with autotvm.ApplyConfig(config):\n with tvm.target.create(\"llvm -mcpu=core-avx2\"):\n s, arg_bufs = task.func(*task.args)\n func = tvm.runtime.load_module(module_file)\n\n a_np = np.random.uniform(size=(N, N))\n b_np = np.random.uniform(size=(N, N))\n c_np = np.zeros((N,N))\n ctx = tvm.cpu()\n a_tvm = tvm.nd.array(a_np.astype(np.float32), ctx=ctx)\n b_tvm = tvm.nd.array(b_np.astype(np.float32), ctx=ctx)\n c_tvm = tvm.nd.array(c_np.astype(np.float32), ctx=ctx)\n\n evaluator = func.time_evaluator(func.entry_name, ctx, repeat=10,number=4,)\n variation = 1\n while variation > 0.05:\n res = np.array(sorted(evaluator(a_tvm, b_tvm, c_tvm).results)[:-5])\n variation = res.std() / res.mean()\n\n #if tuple(arg_bufs[1].shape) == b_tvm.shape:\n # res = evaluator(c_tvm, b_tvm, a_tvm)\n #else:\n # res = evaluator(c_tvm, a_tvm, b_tvm)\n\n return res.mean(), ind\n\ndef tune_kernels(args, trials, cr_limit):\n \n func_create = 'template/matmul'\n\n global task\n task = autotvm.task.create(func_create, \n args=(M,N,K,matmul_index,'float32'), \n target='llvm -mcpu=core-avx2')\n print(task.config_space)\n outer_trials = min(int(1e9), len(task.config_space))\n trials = min(trials, len(task.config_space))\n\n\n pickle_file = 'data/matmul/perm%.2f_timed_asm_matmul%i_%s_%icore_%i.pkl' % (cr_limit, matmul_index, N, num_threads, trials)\n if os.path.exists(pickle_file):\n print('File exists', pickle_file)\n return\n with open(pickle_file, 'rb') as fi:\n inds, res, dv, res_times, asm, llvm = pickle.load(fi)\n best = np.array(res).mean(axis=1).argsort()\n inds = np.array(inds)\n cr = []\n for ind in inds:\n cr.append(concurrency_ratio(ind))\n cr = np.array(cr)\n res = np.array(res).mean(axis=1)\n print(res[best[:10]])\n print(np.array(asm)[best[:10]])\n print(np.array(llvm)[best[:10]])\n print(cr[best[:10]])\n #for ind in inds[best[:10]]:\n # print(task.config_space.get(ind))\n return\n\n pool_threads = 80#cpu_count()\n\n #configs = np.random.choice(len(task.config_space), size=outer_trials, replace=False)\n configs = range(outer_trials)\n\n print('Running Data Volume model...')\n tic = time.time()\n with Pool(pool_threads) as p:\n cr = p.map(concurrency_ratio, configs)\n print('CR for %i configs: %f' % (len(configs), time.time() - tic))\n cr = np.array(cr)\n configs = np.array(configs)[(cr > cr_limit)]\n cr = np.array(cr)[(cr > cr_limit)]\n\n with Pool(pool_threads) as p:\n dv = p.map(get_matmul_dv, configs)\n print('DV for %i configs: %f' % (len(configs), time.time() - tic))\n\n dv = -1*np.array(dv)\n dv_order = dv.argsort()\n configs = configs[dv_order]\n dv = dv[dv_order]\n num_configs = len(configs)\n dv_dict = dict(zip(configs,dv))\n\n best_flops = 0.0\n flops = 0.0\n counter = 0\n print('Running on hardware...')\n sorted_order = np.array(dv).argsort()\n vec_counter = 0\n to_try = np.array(configs)[sorted_order]\n build_counter = 0\n\n inds = []\n results = []\n dv = []\n asm_opints = []\n llvm_opints = []\n result_times = []\n\n asm_times = 0\n while len(results) < trials and build_counter < num_configs:\n inds_to_test = []\n module_files = []\n start_index = build_counter\n\n with Pool(pool_threads) as p:\n for module_file, llvm, asm, ind, build_time, asm_time in p.map(limited_test, to_try[start_index:start_index+100*pool_threads]):\n #for ind in to_try:\n # should_test, ind = limited_test(ind)\n build_counter += 1\n if len(module_file) > 0:\n llvm_opints.append(llvm)\n asm_opints.append(asm)\n inds_to_test.append(ind)\n module_files.append(module_file)\n vec_counter += 1\n #print('Prepping tests: %.2f/%.2f GFLOPS %i/%i (%i), %.1f s \\r' % \n # (flops, best_flops, counter, num_configs,\n # build_counter, time.time()-tic), end='')\n\n #finished_index = np.where(to_try == inds_to_test[-1])[0][0]\n #to_try = to_try[finished_index+1:]\n\n #with Pool(6) as p:\n # for x, ind in p.imap(limited_test, to_try):\n inds_to_test = np.array(inds_to_test)\n for ind, module_file in zip(inds_to_test, module_files):\n x, ind = eval_time(ind, module_file)\n result_times.append(time.time() - tic)\n counter += 1\n mean_time = np.array(x).mean()\n flops = task.flop/(mean_time*1e9)\n best_flops = max(flops, best_flops)\n if best_flops == flops:\n best_ind = ind\n inds.append(ind)\n results.append(x)\n dv.append(dv_dict[ind])\n #print('Testing: %.2f/%.2f GFLOPS %i/%i (%i), %.1f s \\r' % \n # (flops, best_flops, counter, num_configs, \n # build_counter, time.time()-tic), end='')\n os.remove(module_file)\n os.remove(module_file+'.so')\n\n\n print()\n print('Best config:', task.config_space.get(best_ind))\n print('Saving %s' % pickle_file)\n with open(pickle_file, 'wb') as output:\n pickle.dump([inds, results, dv, result_times, asm_opints, llvm_opints],\n output, pickle.HIGHEST_PROTOCOL)\n return\n\ndef tune_and_evaluate():\n\n dilation = 1;\n\n parser = argparse.ArgumentParser(description='Run TC benchmarks in TVM')\n parser.add_argument( '-t','--trials', help=\"Int. Number of trials to sample\", default=2000, type=int)\n parser.add_argument( '-b','--benchmark', help=\"Int. Number of Tensor Contraction benchmark (1-4)\", default=1, type=int)\n\n global M, N, K\n global matmul_index\n\n args = parser.parse_args()\n trials = args.trials\n ind = args.benchmark\n cr_limit = 0.9\n\n for size in [1000,4000]:\n matmul_index = ind\n\n print(\"Tuning TC %i...\" % matmul_index)\n #key = list(benchmarks.keys())[args.benchmark]\n\n M,N,K = [size,size,size]\n \n\n print(\"M, N, K\")\n print(M, N, K)\n tune_kernels(args, trials, cr_limit)\n\n\nif __name__ == \"__main__\":\n tune_and_evaluate()\n"
] | [
[
"numpy.ceil",
"numpy.floor",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LeeElvis/OpenMDAO | [
"0ef1f0eeb934d8cd4ef0a02add6ba3c3a13e6150",
"0ef1f0eeb934d8cd4ef0a02add6ba3c3a13e6150"
] | [
"openmdao/solvers/linear/tests/test_linear_block_gs.py",
"openmdao/components/mux_comp.py"
] | [
"\"\"\"Test the LinearBlockGS linear solver class.\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.solvers.linear.tests.linear_test_base import LinearSolverTests\nfrom openmdao.test_suite.components.sellar import SellarImplicitDis1, SellarImplicitDis2, \\\n SellarDis1withDerivatives, SellarDis2withDerivatives\nfrom openmdao.test_suite.components.expl_comp_simple import TestExplCompSimpleDense\nfrom openmdao.test_suite.components.sellar import SellarDerivatives\nfrom openmdao.utils.assert_utils import assert_near_equal\n\n\nclass SimpleImp(om.ImplicitComponent):\n def setup(self):\n self.add_input('a', val=1.)\n self.add_output('x', val=0.)\n\n self.declare_partials('*', '*')\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n residuals['x'] = 3.0*inputs['a'] + 2.0*outputs['x']\n\n def linearize(self, inputs, outputs, jacobian):\n jacobian['x', 'x'] = 2.0\n jacobian['x', 'a'] = 3.0\n\n\nclass TestBGSSolver(LinearSolverTests.LinearSolverTestCase):\n linear_solver_class = om.LinearBlockGS\n\n def test_globaljac_err(self):\n prob = om.Problem()\n model = prob.model = om.Group(assembled_jac_type='dense')\n model.add_subsystem('x_param', om.IndepVarComp('length', 3.0),\n promotes=['length'])\n model.add_subsystem('mycomp', TestExplCompSimpleDense(),\n promotes=['length', 'width', 'area'])\n\n model.linear_solver = self.linear_solver_class(assemble_jac=True)\n prob.setup()\n\n with self.assertRaises(RuntimeError) as context:\n prob.run_model()\n\n self.assertEqual(str(context.exception),\n \"Linear solver LinearBlockGS in Group (<model>) doesn't support assembled jacobians.\")\n\n def test_simple_implicit(self):\n # This verifies that we can perform lgs around an implicit comp and get the right answer\n # as long as we slot a non-lgs linear solver on that component.\n\n prob = om.Problem()\n model = prob.model\n model.add_subsystem('p', om.IndepVarComp('a', 5.0))\n comp = model.add_subsystem('comp', SimpleImp())\n model.connect('p.a', 'comp.a')\n\n model.linear_solver = self.linear_solver_class()\n comp.linear_solver = om.DirectSolver()\n\n prob.setup(check=False, mode='fwd')\n prob.run_model()\n\n deriv = prob.compute_totals(of=['comp.x'], wrt=['p.a'])\n self.assertEqual(deriv['comp.x', 'p.a'], -1.5)\n\n def test_implicit_cycle(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 1.0))\n model.add_subsystem('d1', SellarImplicitDis1())\n model.add_subsystem('d2', SellarImplicitDis2())\n model.connect('d1.y1', 'd2.y1')\n model.connect('d2.y2', 'd1.y2')\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n model.nonlinear_solver.options['maxiter'] = 5\n model.linear_solver = self.linear_solver_class()\n\n prob.setup()\n prob.set_solver_print(level=0)\n\n prob.run_model()\n res = model._residuals.get_norm()\n\n # Newton is kinda slow on this for some reason, this is how far it gets with directsolver too.\n self.assertLess(res, 2.0e-2)\n\n def test_implicit_cycle_precon(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 1.0))\n model.add_subsystem('d1', SellarImplicitDis1())\n model.add_subsystem('d2', SellarImplicitDis2())\n model.connect('d1.y1', 'd2.y1')\n model.connect('d2.y2', 'd1.y2')\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n model.nonlinear_solver.options['maxiter'] = 5\n model.nonlinear_solver.linesearch = om.BoundsEnforceLS()\n model.linear_solver = om.ScipyKrylov()\n model.linear_solver.precon = self.linear_solver_class()\n\n prob.setup()\n\n prob['d1.y1'] = 4.0\n prob.set_solver_print()\n prob.run_model()\n res = model._residuals.get_norm()\n\n # Newton is kinda slow on this for some reason, this is how far it gets with directsolver too.\n self.assertLess(res, 2.0e-2)\n\n def test_full_desvar_with_index_obj_relevance_bug(self):\n prob = om.Problem()\n sub = prob.model.add_subsystem('sub', SellarDerivatives())\n prob.model.nonlinear_solver = om.NonlinearBlockGS()\n prob.model.linear_solver = om.LinearBlockGS()\n sub.nonlinear_solver = om.NonlinearBlockGS()\n sub.linear_solver = om.LinearBlockGS()\n\n prob.model.add_design_var('sub.z', lower=-100, upper=100)\n prob.model.add_objective('sub.z', index=1)\n\n prob.set_solver_print(level=0)\n\n prob.setup()\n\n # We don't call run_driver() here because we don't\n # actually want the optimizer to run\n prob.run_model()\n\n derivs = prob.compute_totals(of=['sub.z'], wrt=['sub.z'])\n\n assert_near_equal(derivs[('sub.z', 'sub.z')], [[0., 1.]])\n\n\nclass TestBGSSolverFeature(unittest.TestCase):\n\n def test_specify_solver(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.linear_solver = om.LinearBlockGS()\n model.nonlinear_solver = om.NonlinearBlockGS()\n\n prob.setup()\n\n prob.set_val('x', 1.)\n prob.set_val('z', np.array([5.0, 2.0]))\n\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_near_equal(J['obj', 'z'][0][0], 9.61001056, .00001)\n assert_near_equal(J['obj', 'z'][0][1], 1.78448534, .00001)\n\n def test_feature_maxiter(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = om.NonlinearBlockGS()\n\n model.linear_solver = om.LinearBlockGS()\n model.linear_solver.options['maxiter'] = 2\n\n prob.setup()\n\n prob.set_val('x', 1.)\n prob.set_val('z', np.array([5.0, 2.0]))\n\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_near_equal(J['obj', 'z'][0][0], 9.60230118004, .00001)\n assert_near_equal(J['obj', 'z'][0][1], 1.78022500547, .00001)\n\n def test_feature_atol(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = om.NonlinearBlockGS()\n\n model.linear_solver = om.LinearBlockGS()\n model.linear_solver.options['atol'] = 1.0e-3\n\n prob.setup()\n\n prob.set_val('x', 1.)\n prob.set_val('z', np.array([5.0, 2.0]))\n\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_near_equal(J['obj', 'z'][0][0], 9.61016296175, .00001)\n assert_near_equal(J['obj', 'z'][0][1], 1.78456955704, .00001)\n\n def test_feature_rtol(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = om.NonlinearBlockGS()\n\n model.linear_solver = om.LinearBlockGS()\n model.linear_solver.options['rtol'] = 1.0e-3\n\n prob.setup()\n\n prob.set_val('x', 1.)\n prob.set_val('z', np.array([5.0, 2.0]))\n\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_near_equal(J['obj', 'z'][0][0], 9.61016296175, .00001)\n assert_near_equal(J['obj', 'z'][0][1], 1.78456955704, .00001)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"Definition of the Mux Component.\"\"\"\n\n\nimport numpy as np\n\nfrom openmdao.core.explicitcomponent import ExplicitComponent\n\n\nclass MuxComp(ExplicitComponent):\n \"\"\"\n Mux one or more inputs along a given axis.\n\n Attributes\n ----------\n _vars : dict\n Container mapping name of variables to be muxed with additional data.\n _input_names : dict\n Container mapping name of variables to be muxed with associated inputs.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Instantiate MuxComp and populate private members.\n\n Parameters\n ----------\n **kwargs : dict\n Arguments to be passed to the component initialization method.\n \"\"\"\n super(MuxComp, self).__init__(**kwargs)\n\n self._vars = {}\n self._input_names = {}\n\n def initialize(self):\n \"\"\"\n Declare options.\n \"\"\"\n self.options.declare('vec_size', types=int, default=2,\n desc='The number of elements to be combined into an output.')\n\n def add_var(self, name, val=1.0, shape=None, units=None, desc='', axis=0):\n \"\"\"\n Add an output variable to be muxed, and all associated input variables.\n\n Parameters\n ----------\n name : str\n name of the variable in this component's namespace.\n val : float or list or tuple or ndarray or Iterable\n The initial value of the variable being added in user-defined units.\n Default is 1.0.\n shape : int or tuple or list or None\n Shape of the input variables to be muxed, only required if val is not an array.\n Default is None.\n units : str or None\n Units in which this input variable will be provided to the component\n during execution. Default is None, which means it is unitless.\n desc : str\n description of the variable\n axis : int\n The axis along which the elements will be stacked. Note that N-dimensional inputs\n cannot be stacked along an axis greater than N.\n \"\"\"\n self._vars[name] = {'val': val, 'shape': shape, 'units': units, 'desc': desc, 'axis': axis}\n\n opts = self.options\n vec_size = opts['vec_size']\n\n options = self._vars[name]\n\n kwgs = dict(options)\n in_shape = np.asarray(options['val']).shape \\\n if options['shape'] is None else options['shape']\n in_size = np.prod(in_shape)\n out_shape = list(in_shape)\n out_shape.insert(options['axis'], vec_size)\n kwgs.pop('shape')\n ax = kwgs.pop('axis')\n\n in_dimension = len(in_shape)\n\n if ax > in_dimension:\n raise ValueError('{3}: Cannot mux a {0}D inputs for {2} along axis greater '\n 'than {0} ({1})'.format(in_dimension, ax, name, self.msginfo))\n\n self.add_output(name=name,\n val=options['val'],\n shape=out_shape,\n units=options['units'],\n desc=options['desc'])\n\n self._input_names[name] = []\n\n for i in range(vec_size):\n in_name = '{0}_{1}'.format(name, i)\n self._input_names[name].append(in_name)\n\n self.add_input(name=in_name, shape=in_shape, **kwgs)\n\n in_templates = [np.zeros(in_shape, dtype=int) for _ in range(vec_size)]\n\n rs = []\n cs = []\n\n for j in range(in_size):\n in_templates[i].flat[:] = 0\n in_templates[i].flat[j] = 1\n temp_out = np.stack(in_templates, axis=ax)\n cs.append(j)\n rs.append(int(np.nonzero(temp_out.ravel())[0]))\n\n self.declare_partials(of=name, wrt=in_name, rows=rs, cols=cs, val=1.0)\n\n def compute(self, inputs, outputs):\n \"\"\"\n Mux the inputs into the appropriate outputs.\n\n Parameters\n ----------\n inputs : Vector\n unscaled, dimensional input variables read via inputs[key]\n outputs : Vector\n unscaled, dimensional output variables read via outputs[key]\n \"\"\"\n opts = self.options\n vec_size = opts['vec_size']\n\n for var in self._vars:\n ax = self._vars[var]['axis']\n invar = self._input_names[var]\n vals = [inputs[invar[i]] for i in range(vec_size)]\n outputs[var][...] = np.stack(vals, axis=ax)\n"
] | [
[
"numpy.array"
],
[
"numpy.asarray",
"numpy.stack",
"numpy.zeros",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
STScI-MIRI/miricoord | [
"d378c24f4b8d649fb15d557c6350ab5070afba66"
] | [
"miricoord/lrs/lrs_pipetools.py"
] | [
"#\n\"\"\"\nUseful python tools for working with the MIRI LRS; calls a specific version\nof the tools specified below.\n\nThis version of the tools hooks into the JWST Calibration\nPipeline code to do the heavy lifting. Note that this\nmeans performance may be affected by what version of\nthe pipeline you are running!! It does, however, use\noffline versions of the CRDS reference files contained\nwithin this github repository.\n\nThis is mostly useful for testing the pipeline rather than\nfor creating reference files.\n\nConvert JWST v2,v3 locations (in arcsec) to MIRI Imager SCA x,y pixel locations.\nNote that the pipeline uses a 0-indexed pixel convention\nwhile SIAF uses 1-indexed pixels.\n\nBy default, calling a function in here will use the default version of the linked\nCDP-specific tools. This can be overridden by calling set_toolversion(version).\n\nAuthor: David R. Law ([email protected])\n\nREVISION HISTORY:\n17-Dec-2018 Written by David Law ([email protected])\n\"\"\"\n\nimport os as os\nimport sys\nimport numpy as np\nfrom astropy.modeling import models\nfrom asdf import AsdfFile\nfrom jwst import datamodels\nfrom jwst.assign_wcs import miri\nfrom numpy.testing import assert_allclose\nimport pdb\n\n#############################\n\n# Set the tools version. Default is CDP-7 (there is no CDP-7b)\ndef set_toolversion(version):\n # If the toolversion global was already set, delete it\n try:\n del globals()['tv']\n except:\n pass\n\n # Define toolversion as global scope within lrs_tools\n global tv\n # Import appropriate version\n if (version == 'default'):\n import miricoord.lrs.toolversions.lrs_pipetools_cdp7 as tv\n elif (version == 'cdp7'):\n import miricoord.lrs.toolversions.lrs_pipetools_cdp7 as tv\n else:\n print('Invalid tool version specified!')\n \n return\n\n#############################\n\n# Return the tools version\ndef version():\n # Determine whether the CDP toolversion has been set. If not, set to default.\n try:\n sys.getrefcount(tv)\n except:\n set_toolversion('default')\n \n return tv.version()\n\n#############################\n\n# Return a model for the detector pixel to v2,v3,lambda distortion\n# Note that stype must be a single string (slit or slitless)\ndef xytov2v3lam_model(stype,**kwargs):\n # Determine whether the CDP toolversion has been set. If not, set to default.\n try:\n sys.getrefcount(tv)\n except:\n set_toolversion('default')\n \n model=tv.xytov2v3lam_model(stype,**kwargs)\n\n return model\n\n#############################\n\n# Convert 0-indexed subarray pixels to v2,v3 in arcsec using the model\n# Note that stype must be a single string (slit or slitless)\ndef xytov2v3lam(x,y,stype,**kwargs):\n model=xytov2v3lam_model(stype,**kwargs)\n\n v2,v3,lam=model(x,y)\n\n return v2,v3,lam\n\n#############################\n\n# Convert v2,v3,lambda in arcsec to 0-indexed subarray pixels using the model\n# Note that stype must be a single string (slit or slitless)\ndef v2v3lamtoxy(v2,v3,lam,stype,**kwargs):\n model=xytov2v3lam_model(stype,**kwargs)\n \n x,y=model.inverse(v2,v3,lam)\n\n return x,y\n\n#############################\n\n# Test the forward and reverse transforms\ndef testtransform():\n # Determine whether the CDP toolversion has been set. If not, set to default.\n try:\n sys.getrefcount(tv)\n except:\n set_toolversion('default')\n\n # Get test data from a generating function\n x,y,v2,v3,lam,stype=tv.testdata()\n\n ntype=len(stype)\n # Loop over the slit and slitless varieties of test data\n for i in range(0,ntype):\n thisx,thisy,thisv2,thisv3,thislam,thisstype=x[i],y[i],v2[i],v3[i],lam[i],stype[i]\n v2new,v3new,lamnew=xytov2v3lam(thisx,thisy,thisstype)\n xnew,ynew=v2v3lamtoxy(thisv2,thisv3,thislam,thisstype)\n\n # Assert that reference values and newly-created values are close\n assert_allclose(thisx,xnew,atol=0.05)\n assert_allclose(thisy,ynew,atol=0.05)\n assert_allclose(thisv2,v2new,atol=0.05)\n assert_allclose(thisv3,v3new,atol=0.05)\n assert_allclose(thislam,lamnew,atol=0.05)\n \n return\n"
] | [
[
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lylhw13/thread-pool | [
"e982392728dfabd50f5549e932b1b90f772d8d31"
] | [
"test/plot.py"
] | [
"str = '''\njobnum is 1, thread_num is 2\njobnum is 10, thread_num is 3\njobnum is 9, thread_num is 3\njobnum is 8, thread_num is 3\njobnum is 7, thread_num is 3\njobnum is 6, thread_num is 3\njobnum is 15, thread_num is 6\njobnum is 14, thread_num is 6\njobnum is 13, thread_num is 6\njobnum is 12, thread_num is 6\njobnum is 11, thread_num is 6\njobnum is 10, thread_num is 6\njobnum is 9, thread_num is 6\njobnum is 28, thread_num is 9\njobnum is 27, thread_num is 9\njobnum is 26, thread_num is 9\njobnum is 25, thread_num is 9\njobnum is 24, thread_num is 9\njobnum is 33, thread_num is 16\njobnum is 32, thread_num is 16\njobnum is 31, thread_num is 16\njobnum is 30, thread_num is 16\njobnum is 29, thread_num is 16\njobnum is 28, thread_num is 16\njobnum is 27, thread_num is 16\njobnum is 26, thread_num is 16\njobnum is 25, thread_num is 16\njobnum is 24, thread_num is 16\njobnum is 23, thread_num is 16\njobnum is 22, thread_num is 16\njobnum is 21, thread_num is 16\njobnum is 20, thread_num is 16\njobnum is 19, thread_num is 16\njobnum is 28, thread_num is 16\njobnum is 27, thread_num is 16\njobnum is 26, thread_num is 16\njobnum is 25, thread_num is 16\njobnum is 24, thread_num is 16\njobnum is 23, thread_num is 16\njobnum is 22, thread_num is 16\njobnum is 21, thread_num is 16\njobnum is 20, thread_num is 16\njobnum is 19, thread_num is 16\njobnum is 18, thread_num is 16\njobnum is 17, thread_num is 16\njobnum is 16, thread_num is 16\njobnum is 15, thread_num is 15\njobnum is 14, thread_num is 14\njobnum is 13, thread_num is 13\njobnum is 12, thread_num is 12\njobnum is 11, thread_num is 11\njobnum is 10, thread_num is 10\njobnum is 19, thread_num is 11\njobnum is 18, thread_num is 11\njobnum is 27, thread_num is 16\njobnum is 26, thread_num is 16\njobnum is 25, thread_num is 16\njobnum is 24, thread_num is 16\njobnum is 23, thread_num is 16\njobnum is 22, thread_num is 16\njobnum is 21, thread_num is 16\njobnum is 20, thread_num is 16\njobnum is 19, thread_num is 16\njobnum is 18, thread_num is 16\njobnum is 17, thread_num is 16\njobnum is 16, thread_num is 16\njobnum is 15, thread_num is 15\njobnum is 14, thread_num is 14\njobnum is 13, thread_num is 14\njobnum is 22, thread_num is 14\njobnum is 21, thread_num is 14\njobnum is 29, thread_num is 16\njobnum is 28, thread_num is 16\njobnum is 27, thread_num is 16\njobnum is 26, thread_num is 16\njobnum is 25, thread_num is 16\njobnum is 24, thread_num is 16\njobnum is 23, thread_num is 16\njobnum is 22, thread_num is 16\njobnum is 21, thread_num is 16\njobnum is 20, thread_num is 16\njobnum is 19, thread_num is 16\njobnum is 18, thread_num is 16\njobnum is 17, thread_num is 16\njobnum is 16, thread_num is 16\njobnum is 15, thread_num is 15\njobnum is 14, thread_num is 14\njobnum is 13, thread_num is 14\njobnum is 12, thread_num is 13\njobnum is 11, thread_num is 11\njobnum is 10, thread_num is 10\njobnum is 9, thread_num is 9\njobnum is 8, thread_num is 8\njobnum is 7, thread_num is 7\njobnum is 6, thread_num is 6\njobnum is 5, thread_num is 5\njobnum is 4, thread_num is 4\njobnum is 3, thread_num is 3\njobnum is 2, thread_num is 2\njobnum is 1, thread_num is 2\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nif __name__ == \"__main__\":\n jobnum = []\n threadnum = []\n for line in str.split('\\n'):\n if line:\n data = line.replace(',', ' ').split()\n jobnum.append(int(data[2]))\n threadnum.append(int(data[-1]))\n\n t = np.arange(0, len(jobnum), 1)\n fig, ax = plt.subplots()\n ax.plot(t, jobnum, label=\"job num\")\n ax.plot(t, threadnum, label=\"thread num\")\n ax.set(title = \"dynamic thread num\")\n ax.legend()\n ax.grid()\n fig.savefig(\"dynamic.png\")\n plt.show()"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cmougan/Novartis2020 | [
"390f34efa6bbc1e168f4e58d2d335c7cfa7d865e",
"390f34efa6bbc1e168f4e58d2d335c7cfa7d865e"
] | [
"pre-datathon/models/basic_mae_cb.py",
"NN_files/tools/postprocessing.py"
] | [
"\n\nimport numpy as np\nfrom catboost import CatBoostRegressor\nfrom sklearn.datasets import load_boston\nfrom sklearn.metrics import mean_absolute_error\n\nfrom tools.catboost_custom import MaeObjective\n\nnp.random.seed(42)\n\n\nif __name__ == \"__main__\":\n\n X, y = load_boston(return_X_y=True)\n # Using this, it learns\n cb = CatBoostRegressor(\n loss_function=MaeObjective(),\n # loss_function=\"MAE\",\n eval_metric='MAE'\n )\n\n cb.fit(\n X,\n y,\n )\n\n print(mean_absolute_error(cb.predict(X), y))\n",
"import pandas as pd\nimport numpy as np\n\n\ndef postprocess_submission(submission_df, solve_submission_issues=True):\n\n join_on = [\"country\", \"brand\", \"month_num\"]\n keep = join_on + [\"volume\"]\n\n df_vol = pd.read_csv(\"../data/gx_volume.csv\").loc[:, keep]\n\n both_ds = submission_df.merge(\n df_vol,\n on=join_on,\n how=\"left\",\n )\n\n both_ds.loc[both_ds[\"volume\"].notnull(), \"prediction\"] = both_ds[both_ds[\"volume\"].notnull()][\"volume\"].values\n both_ds.loc[both_ds[\"volume\"].notnull(), \"pred_95_high\"] = both_ds[both_ds[\"volume\"].notnull()][\"volume\"].values + 0.01\n both_ds.loc[both_ds[\"volume\"].notnull(), \"pred_95_low\"] = both_ds[both_ds[\"volume\"].notnull()][\"volume\"].values - 0.01\n\n final_cols = join_on + [\"pred_95_low\", \"prediction\", \"pred_95_high\"]\n\n final_df = both_ds.loc[:, final_cols]\n\n if solve_submission_issues:\n\n if (final_df.pred_95_low > final_df.pred_95_high).any():\n raise(\"Stop please, upper < lower\")\n\n cond_lower_mean = final_df.pred_95_low > final_df.prediction\n if cond_lower_mean.any():\n print(\"Solving lower > mean\")\n final_df.loc[cond_lower_mean, \"prediction\"] = \\\n final_df.loc[cond_lower_mean, \"pred_95_low\"] + 0.01\n\n cond_upper_mean = final_df.prediction > final_df.pred_95_high\n if cond_upper_mean.any():\n print(\"Solving upper < mean\")\n final_df.loc[cond_upper_mean, \"prediction\"] = \\\n final_df.loc[cond_upper_mean, \"pred_95_high\"] - 0.01\n\n\n return final_df"
] | [
[
"numpy.random.seed",
"sklearn.datasets.load_boston"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
VietDunghacker/mmdetection | [
"9e97878b2c5247bebe8ec406752941ffc8083871",
"9e97878b2c5247bebe8ec406752941ffc8083871",
"9e97878b2c5247bebe8ec406752941ffc8083871"
] | [
"mmdet/models/dense_heads/embedding_rpn_head.py",
"mmdet/models/necks/opa_fpn.py",
"mmdet/models/backbones/swin.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import BaseModule\n\nfrom mmdet.models.builder import HEADS\nfrom ...core import bbox_cxcywh_to_xyxy\n\n\[email protected]_module()\nclass EmbeddingRPNHead(BaseModule):\n\t\"\"\"RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .\n\n\tUnlike traditional RPNHead, this module does not need FPN input, but just\n\tdecode `init_proposal_bboxes` and expand the first dimension of\n\t`init_proposal_bboxes` and `init_proposal_features` to the batch_size.\n\n\tArgs:\n\t\tnum_proposals (int): Number of init_proposals. Default 100.\n\t\tproposal_feature_channel (int): Channel number of\n\t\t\tinit_proposal_feature. Defaults to 256.\n\t\tinit_cfg (dict or list[dict], optional): Initialization config dict.\n\t\t\tDefault: None\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t num_proposals=100,\n\t\t\t\t proposal_feature_channel=256,\n\t\t\t\t init_cfg=None,\n\t\t\t\t **kwargs):\n\t\tassert init_cfg is None, 'To prevent abnormal initialization ' \\\n\t\t\t\t\t\t\t\t 'behavior, init_cfg is not allowed to be set'\n\t\tsuper(EmbeddingRPNHead, self).__init__(init_cfg)\n\t\tself.num_proposals = num_proposals\n\t\tself.proposal_feature_channel = proposal_feature_channel\n\t\tself._init_layers()\n\n\tdef _init_layers(self):\n\t\t\"\"\"Initialize a sparse set of proposal boxes and proposal features.\"\"\"\n\t\tself.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)\n\t\tself.init_proposal_features = nn.Embedding(self.num_proposals, self.proposal_feature_channel)\n\n\tdef init_weights(self):\n\t\t\"\"\"Initialize the init_proposal_bboxes as normalized.\n\n\t\t[c_x, c_y, w, h], and we initialize it to the size of the entire\n\t\timage.\n\t\t\"\"\"\n\t\tsuper(EmbeddingRPNHead, self).init_weights()\n\t\tnn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)\n\t\tnn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)\n\n\tdef _decode_init_proposals(self, imgs, img_metas):\n\t\t\"\"\"Decode init_proposal_bboxes according to the size of images and\n\t\texpand dimension of init_proposal_features to batch_size.\n\n\t\tArgs:\n\t\t\timgs (list[Tensor]): List of FPN features.\n\t\t\timg_metas (list[dict]): List of meta-information of\n\t\t\t\timages. Need the img_shape to decode the init_proposals.\n\n\t\tReturns:\n\t\t\tTuple(Tensor):\n\n\t\t\t\t- proposals (Tensor): Decoded proposal bboxes,\n\t\t\t\t has shape (batch_size, num_proposals, 4).\n\t\t\t\t- init_proposal_features (Tensor): Expanded proposal\n\t\t\t\t features, has shape\n\t\t\t\t (batch_size, num_proposals, proposal_feature_channel).\n\t\t\t\t- imgs_whwh (Tensor): Tensor with shape\n\t\t\t\t (batch_size, 4), the dimension means\n\t\t\t\t [img_width, img_height, img_width, img_height].\n\t\t\"\"\"\n\t\tproposals = self.init_proposal_bboxes.weight.clone()\n\t\tproposals = bbox_cxcywh_to_xyxy(proposals)\n\t\tnum_imgs = len(imgs[0])\n\t\timgs_whwh = []\n\t\tfor meta in img_metas:\n\t\t\th, w, _ = meta['img_shape']\n\t\t\timgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))\n\t\timgs_whwh = torch.cat(imgs_whwh, dim=0)\n\t\timgs_whwh = imgs_whwh[:, None, :]\n\n\t\t# imgs_whwh has shape (batch_size, 1, 4)\n\t\t# The shape of proposals change from (num_proposals, 4)\n\t\t# to (batch_size ,num_proposals, 4)\n\t\tproposals = proposals * imgs_whwh\n\n\t\tinit_proposal_features = self.init_proposal_features.weight.clone()\n\t\tinit_proposal_features = init_proposal_features[None].expand(num_imgs, *init_proposal_features.size())\n\t\treturn proposals, init_proposal_features, imgs_whwh\n\n\tdef forward_dummy(self, img, img_metas):\n\t\t\"\"\"Dummy forward function.\n\n\t\tUsed in flops calculation.\n\t\t\"\"\"\n\t\treturn self._decode_init_proposals(img, img_metas)\n\n\tdef forward_train(self, img, img_metas):\n\t\t\"\"\"Forward function in training stage.\"\"\"\n\t\treturn self._decode_init_proposals(img, img_metas)\n\n\tdef simple_test_rpn(self, img, img_metas):\n\t\t\"\"\"Forward function in testing stage.\"\"\"\n\t\treturn self._decode_init_proposals(img, img_metas)\n\n\tdef simple_test(self, img, img_metas):\n\t\t\"\"\"Forward function in testing stage.\"\"\"\n\t\traise NotImplementedError\n\n\tdef aug_test_rpn(self, feats, img_metas):\n\t\traise NotImplementedError(\n\t\t\t'EmbeddingRPNHead does not support test-time augmentation')\n",
"import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmcv.cnn import ConvModule, xavier_init, kaiming_init\nfrom mmcv.runner import BaseModule, auto_fp16\nfrom ..builder import NECKS\n\nfrom .sepc import PConvModule\nfrom mmdet.models.utils import ModulatedSEPCConv\n\nOPS = {\n\t'none': lambda in_channels, out_channels: None_(),\n\t'skip_connect': lambda in_channels, out_channels: Skip_(),\n\t'TD': lambda in_channels, out_channels: TopDown(in_channels, out_channels),\n\t'BU': lambda in_channels, out_channels: BottomUp(in_channels, out_channels),\n\t'FS': lambda in_channels, out_channels: FuseSplit(in_channels, out_channels),\n\t'SE': lambda in_channels, out_channels: PConvModule(\n\t\tin_channels,\n\t\tout_channels,\n\t\tibn=True,\n\t\tnorm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n\t\tnorm_eval=False,\n\t\tpart_deform=True),\n}\n\nclass TopDown(nn.Module):\n\tdef __init__(self,\n\t\t\t\t in_channels,\n\t\t\t\t out_channels,\n\t\t\t\t ):\n\t\tsuper(TopDown, self).__init__()\n\t\tself.tdm_convs = nn.ModuleList()\n\t\tfor i in range(4):\n\t\t\ttdm_conv = ModulatedSEPCConv(in_channels, out_channels, 3, padding=1, part_deform=True)\n\t\t\tself.tdm_convs.append(tdm_conv)\n\n\tdef forward(self, inputs):\n\t\t# build top-down path\n\t\ttopdown = []\n\t\ttopdownconv = self.tdm_convs[-1](1, inputs[-1])\n\t\tif topdownconv.shape[2:] != inputs[-1].shape:\n\t\t\ttopdownconv = F.interpolate(topdownconv, size=inputs[-1].shape[2:], mode='nearest')\n\n\t\ttopdown.append(topdownconv)\n\t\tfor i in range(3, 0, -1):\n\t\t\ttemp = self.tdm_convs[i - 1](i - 1, inputs[i - 1] + F.interpolate(topdownconv.clone(), size=inputs[i - 1].shape[2:], mode='nearest'))\n\t\t\ttopdown.insert(0, temp)\n\t\t\ttopdownconv = temp\n\t\treturn topdown\n\nclass BottomUp(nn.Module):\n\tdef __init__(self,\n\t\t\t\t in_channels,\n\t\t\t\t out_channels,\n\t\t\t\t ):\n\t\tsuper(BottomUp, self).__init__()\n\t\tself.bun_convs = nn.ModuleList()\n\t\tfor i in range(4):\n\t\t\tbun_conv = ModulatedSEPCConv(in_channels, out_channels, 3, padding=1, part_deform=True)\n\t\t\tself.bun_convs.append(bun_conv)\n\n\tdef forward(self, inputs):\n\t\t# build bottom-up path\n\t\tbottomup = []\n\t\tfor i in range(4):\n\t\t\tif i == 0:\n\t\t\t\tbum = inputs[0]\n\t\t\telif i == 3:\n\t\t\t\tbb = F.max_pool2d(bottomup[-1].clone(), 2, stride=2)\n\t\t\t\tif bb.shape[2:] != inputs[-1].shape[2:]:\n\t\t\t\t\tbb = F.interpolate(bb, size=inputs[-1].shape[2:], mode='nearest')\n\t\t\t\tbum = bb + inputs[-1]\n\t\t\telse:\n\t\t\t\tbum = inputs[i] + F.max_pool2d(bottomup[i - 1].clone(), 2, stride=2)\n\t\t\tbottomup.append(self.bun_convs[i](i, bum))\n\n\t\treturn bottomup\n\nclass FuseSplit(nn.Module):\n\tdef __init__(self, in_channels, out_channels, ):\n\t\tsuper(FuseSplit, self).__init__()\n\t\tself.fuse = nn.ModuleList([ModulatedSEPCConv(out_channels * 2, out_channels, 3, padding=1, part_deform=True)] * 2)\n\t\tself.in_channels = in_channels\n\t\tself.out_channels = out_channels\n\n\tdef forward(self, inputs):\n\t\t# build fusing-splitting path\n\t\tfusesplit = []\n\t\tfuse1 = inputs[1] + F.max_pool2d(inputs[0], 2, stride=2)\n\t\tfuse2 = F.interpolate(inputs[-1], size=inputs[2].shape[2:], mode='nearest') + inputs[2]\n\t\tfuseconv1 = self.fuse[0](1, torch.cat([fuse1.clone(), F.interpolate(fuse2.clone(), size=fuse1.shape[2:], mode='nearest')], 1))\n\t\tfuseconv2 = self.fuse[1](1, torch.cat([F.max_pool2d(fuse1.clone(), 2, stride=2), fuse2.clone()], 1))\n\n\t\tfusesplit.append(F.interpolate(fuseconv1.clone(), size=inputs[0].shape[2:], mode='nearest'))\n\t\tfusesplit.append(fuseconv1)\n\t\tfusesplit.append(fuseconv2)\n\t\tfusesplit.append(F.max_pool2d(fuseconv2.clone(), 2, stride=2, ceil_mode=False))\n\t\tif fusesplit[-1].shape[2:] != inputs[-1].shape[2:]:\n\t\t\tfusesplit[-1] = F.interpolate(fusesplit[-1].clone(), size=inputs[-1].shape[2:], mode='nearest')\n\t\treturn fusesplit\n\nclass None_(nn.Module):\n\tdef __init__(self,):\n\t\tsuper(None_, self).__init__()\n\t\tself.size =0\n\t\tself.fp = 0\n\t\t\t\n\tdef forward(self, inputs):\n\t\touts = []\n\t\tfor x in inputs:\n\t\t\touts.append(x.new_zeros(x.shape))\n\t\treturn outs\n\nclass Skip_(nn.Module):\n\tdef __init__(self):\n\t\tsuper(Skip_, self).__init__()\n\t\tself.size = 0\n\t\tself.fp = 0\n\n\tdef forward(self, inputs):\n\t\treturn inputs\n\[email protected]_module()\nclass OPA_FPN(BaseModule):\n\tdef __init__(self,\n\t\t\t\t in_channels,\n\t\t\t\t out_channels,\n\t\t\t\t num_outs,\n\t\t\t\t num_stacks=5,\n\t\t\t\t start_level=0,\n\t\t\t\t end_level=-1,\n\t\t\t\t add_extra_convs=False,\n\t\t\t\t relu_before_extra_convs=False,\n\t\t\t\t no_norm_on_lateral=False,\n\t\t\t\t conv_cfg=None,\n\t\t\t\t norm_cfg=None,\n\t\t\t\t act_cfg=None,\n\t\t\t\t primitives = ['none', 'skip_connect', 'TD', 'BU', 'FS', 'SE'],\n\t\t\t\t paths=None,\n\t\t\t\t init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')):\n\t\tsuper(OPA_FPN, self).__init__(init_cfg=init_cfg)\n\t\tassert isinstance(in_channels, list)\n\n\t\tself.in_channels = in_channels\n\t\tself.out_channels = out_channels\n\t\tself.num_ins = len(in_channels)\n\t\tself.num_outs = num_outs\n\t\tself.act_cfg = act_cfg\n\t\tself.relu_before_extra_convs = relu_before_extra_convs\n\t\tself.no_norm_on_lateral = no_norm_on_lateral\n\t\tself.fp16_enabled = False\n\t\t\n\t\tself.num_stacks = num_stacks\n\t\tself.primitives = primitives\n\t\tif end_level == -1:\n\t\t\tself.backbone_end_level = self.num_ins\n\t\t\tassert num_outs >= self.num_ins - start_level\n\t\telse:\n\t\t\t# if end_level < inputs, no extra level is allowed\n\t\t\tself.backbone_end_level = end_level\n\t\t\tassert end_level <= len(in_channels)\n\t\t\tassert num_outs == end_level - start_level\n\t\tself.start_level = start_level\n\t\tself.end_level = end_level\n\t\tself.add_extra_convs = add_extra_convs\n\n\t\tself.lateral_convs = nn.ModuleList()\n\t\tself.fpn_convs = nn.ModuleList()\n\t\tself.information_path = nn.ModuleList()\n\n\t\tself.features = nn.ModuleList()\n\t\tself.paths = paths\n\t\tfor path in self.paths:\n\t\t\tself.features.append(OPS[path](out_channels, out_channels))\n\n\t\tself.topcontext = nn.Sequential(\n\t\t\tConvModule(\n\t\t\t\tout_channels,\n\t\t\t\tout_channels,\n\t\t\t\t1,\n\t\t\t\tpadding=0,\n\t\t\t\tstride=1,\n\t\t\t\tconv_cfg=conv_cfg,\n\t\t\t\tnorm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n\t\t\t\tact_cfg=self.act_cfg,\n\t\t\t\tinplace=False),\n\t\t\tnn.AdaptiveAvgPool2d(1))\n\n\t\tfor i in range(self.start_level, self.backbone_end_level):\n\t\t\tl_conv = ConvModule(\n\t\t\t\tin_channels[i],\n\t\t\t\tout_channels,\n\t\t\t\t1,\n\t\t\t\tconv_cfg=conv_cfg,\n\t\t\t\tnorm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n\t\t\t\tact_cfg=self.act_cfg,\n\t\t\t\tinplace=False)\n\t\t\tself.lateral_convs.append(l_conv)\n\n\t\t# add extra conv layers (e.g., RetinaNet)\n\t\textra_levels = num_outs - self.backbone_end_level + self.start_level\n\t\tif self.add_extra_convs and extra_levels >= 1:\n\t\t\tfor i in range(extra_levels):\n\t\t\t\textra_fpn_conv = ConvModule(\n\t\t\t\t\tout_channels,\n\t\t\t\t\tout_channels,\n\t\t\t\t\t3,\n\t\t\t\t\tstride=2,\n\t\t\t\t\tpadding=1,\n\t\t\t\t\tconv_cfg=conv_cfg,\n\t\t\t\t\tnorm_cfg=norm_cfg,\n\t\t\t\t\tact_cfg=act_cfg,\n\t\t\t\t\tinplace=False)\n\t\t\t\tself.fpn_convs.append(extra_fpn_conv)\n\t# default init_weights for conv(msra) and norm in ConvModule\n\tdef init_weights(self):\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Conv2d):\n\t\t\t\txavier_init(m, distribution='uniform')\n\n\t@auto_fp16()\n\tdef forward(self, inputs, architecture=None):\n\t\tassert len(inputs) == len(self.in_channels)\n\t\t# build laterals\n\n\t\tlaterals = [\n\t\t\tlateral_conv(inputs[i + self.start_level])\n\t\t\tfor i, lateral_conv in enumerate(self.lateral_convs)\n\t\t]\n\t\tif self.add_extra_convs:\n\t\t\tlaterals.append(self.fpn_convs[-2](laterals[-1]))\n\t\tused_backbone_levels = len(laterals)\n\t\ttop = F.interpolate(self.topcontext(laterals[-1]), size=laterals[-1].shape[2:], mode='nearest')\n\t\tlaterals[-1] = top + laterals[-1]\n\n\t\tinfo_paths = []\n\t\tinfo_paths.append(laterals)\n\n\t\tfor step in range(self.num_stacks):\n\t\t\t_step = step * (step + 1) // 2\n\t\t\tlaterals_mid = [laterals[i].new_zeros(laterals[i].shape) for i in range(4)]\n\t\t\tfor j in range(step+1):\n\t\t\t\ttemp = self.features[_step + j](info_paths[j])\n\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tlaterals_mid[i] += temp[i]\n\t\t\tinfo_paths.append(laterals_mid)\n\n\t\touts = info_paths[-1]\n\n\t\tfor i in range(1, len(info_paths)-1):\n\t\t\tout = info_paths[i]\n\t\t\tfor j in range(4):\n\t\t\t\touts[j] += out[j]\n\n\t\t# part 2: add extra levels\n\t\tif self.num_outs > len(outs):\n\t\t\t# use max pool to get more levels on top of outputs\n\t\t\t# (e.g., Faster R-CNN, Mask R-CNN)\n\t\t\tif not self.add_extra_convs:\n\t\t\t\tfor i in range(self.num_outs - used_backbone_levels):\n\t\t\t\t\touts.append(F.max_pool2d(outs[-1], 1, stride=2))\n\t\t\t# add conv layers on top of original feature maps (RetinaNet)\n\t\t\telse:\n\t\t\t\tif self.relu_before_extra_convs:\n\t\t\t\t\touts.append(self.fpn_convs[-1](F.relu(outs[-1])))\n\t\t\t\telse:\n\t\t\t\t\touts.append(self.fpn_convs[-1](outs[-1]))\n\t\treturn tuple(outs)\n",
"import warnings\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_norm_layer, constant_init, trunc_normal_init\nfrom mmcv.cnn.bricks.transformer import FFN, build_dropout\nfrom mmcv.runner import BaseModule, ModuleList, _load_checkpoint\nfrom mmcv.utils import to_2tuple\n\nfrom ...utils import get_root_logger\nfrom ..builder import BACKBONES\nfrom ..utils.ckpt_convert import swin_converter\nfrom ..utils.transformer import PatchEmbed, PatchMerging\n\n\nclass WindowMSA(BaseModule):\n\t\"\"\"Window based multi-head self-attention (W-MSA) module with relative\n\tposition bias.\n\n\tArgs:\n\t\tembed_dims (int): Number of input channels.\n\t\tnum_heads (int): Number of attention heads.\n\t\twindow_size (tuple[int]): The height and width of the window.\n\t\tqkv_bias (bool, optional): If True, add a learnable bias to q, k, v.\n\t\t\tDefault: True.\n\t\tqk_scale (float | None, optional): Override default qk scale of\n\t\t\thead_dim ** -0.5 if set. Default: None.\n\t\tattn_drop_rate (float, optional): Dropout ratio of attention weight.\n\t\t\tDefault: 0.0\n\t\tproj_drop_rate (float, optional): Dropout ratio of output. Default: 0.\n\t\tinit_cfg (dict | None, optional): The Config for initialization.\n\t\t\tDefault: None.\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t embed_dims,\n\t\t\t\t num_heads,\n\t\t\t\t window_size,\n\t\t\t\t qkv_bias=True,\n\t\t\t\t qk_scale=None,\n\t\t\t\t attn_drop_rate=0.,\n\t\t\t\t proj_drop_rate=0.,\n\t\t\t\t init_cfg=None):\n\n\t\tsuper().__init__()\n\t\tself.embed_dims = embed_dims\n\t\tself.window_size = window_size # Wh, Ww\n\t\tself.num_heads = num_heads\n\t\thead_embed_dims = embed_dims // num_heads\n\t\tself.scale = qk_scale or head_embed_dims**-0.5\n\t\tself.init_cfg = init_cfg\n\n\t\t# define a parameter table of relative position bias\n\t\tself.relative_position_bias_table = nn.Parameter(\n\t\t\ttorch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),\n\t\t\t\t\t\tnum_heads)) # 2*Wh-1 * 2*Ww-1, nH\n\n\t\t# About 2x faster than original impl\n\t\tWh, Ww = self.window_size\n\t\trel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)\n\t\trel_position_index = rel_index_coords + rel_index_coords.T\n\t\trel_position_index = rel_position_index.flip(1).contiguous()\n\t\tself.register_buffer('relative_position_index', rel_position_index)\n\n\t\tself.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(attn_drop_rate)\n\t\tself.proj = nn.Linear(embed_dims, embed_dims)\n\t\tself.proj_drop = nn.Dropout(proj_drop_rate)\n\n\t\tself.softmax = nn.Softmax(dim=-1)\n\n\tdef init_weights(self):\n\t\ttrunc_normal_init(self.relative_position_bias_table, std=0.02)\n\n\tdef forward(self, x, mask=None):\n\t\t\"\"\"\n\t\tArgs:\n\n\t\t\tx (tensor): input features with shape of (num_windows*B, N, C)\n\t\t\tmask (tensor | None, Optional): mask with shape of (num_windows,\n\t\t\t\tWh*Ww, Wh*Ww), value should be between (-inf, 0].\n\t\t\"\"\"\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads,\n\t\t\t\t\t\t\t\t C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\t# make torchscript happy (cannot use tensor as tuple)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]\n\n\t\tq = q * self.scale\n\t\tattn = (q @ k.transpose(-2, -1))\n\n\t\trelative_position_bias = self.relative_position_bias_table[\n\t\t\tself.relative_position_index.view(-1)].view(\n\t\t\t\tself.window_size[0] * self.window_size[1],\n\t\t\t\tself.window_size[0] * self.window_size[1],\n\t\t\t\t-1) # Wh*Ww,Wh*Ww,nH\n\t\trelative_position_bias = relative_position_bias.permute(\n\t\t\t2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww\n\t\tattn = attn + relative_position_bias.unsqueeze(0)\n\n\t\tif mask is not None:\n\t\t\tnW = mask.shape[0]\n\t\t\tattn = attn.view(B // nW, nW, self.num_heads, N,\n\t\t\t\t\t\t\t N) + mask.unsqueeze(1).unsqueeze(0)\n\t\t\tattn = attn.view(-1, self.num_heads, N, N)\n\t\tattn = self.softmax(attn)\n\n\t\tattn = self.attn_drop(attn)\n\n\t\tx = (attn @ v).transpose(1, 2).reshape(B, N, C)\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\t@staticmethod\n\tdef double_step_seq(step1, len1, step2, len2):\n\t\tseq1 = torch.arange(0, step1 * len1, step1)\n\t\tseq2 = torch.arange(0, step2 * len2, step2)\n\t\treturn (seq1[:, None] + seq2[None, :]).reshape(1, -1)\n\n\nclass ShiftWindowMSA(BaseModule):\n\t\"\"\"Shifted Window Multihead Self-Attention Module.\n\n\tArgs:\n\t\tembed_dims (int): Number of input channels.\n\t\tnum_heads (int): Number of attention heads.\n\t\twindow_size (int): The height and width of the window.\n\t\tshift_size (int, optional): The shift step of each window towards\n\t\t\tright-bottom. If zero, act as regular window-msa. Defaults to 0.\n\t\tqkv_bias (bool, optional): If True, add a learnable bias to q, k, v.\n\t\t\tDefault: True\n\t\tqk_scale (float | None, optional): Override default qk scale of\n\t\t\thead_dim ** -0.5 if set. Defaults: None.\n\t\tattn_drop_rate (float, optional): Dropout ratio of attention weight.\n\t\t\tDefaults: 0.\n\t\tproj_drop_rate (float, optional): Dropout ratio of output.\n\t\t\tDefaults: 0.\n\t\tdropout_layer (dict, optional): The dropout_layer used before output.\n\t\t\tDefaults: dict(type='DropPath', drop_prob=0.).\n\t\tinit_cfg (dict, optional): The extra config for initialization.\n\t\t\tDefault: None.\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t embed_dims,\n\t\t\t\t num_heads,\n\t\t\t\t window_size,\n\t\t\t\t shift_size=0,\n\t\t\t\t qkv_bias=True,\n\t\t\t\t qk_scale=None,\n\t\t\t\t attn_drop_rate=0,\n\t\t\t\t proj_drop_rate=0,\n\t\t\t\t dropout_layer=dict(type='DropPath', drop_prob=0.),\n\t\t\t\t init_cfg=None):\n\t\tsuper().__init__(init_cfg)\n\n\t\tself.window_size = window_size\n\t\tself.shift_size = shift_size\n\t\tassert 0 <= self.shift_size < self.window_size\n\n\t\tself.w_msa = WindowMSA(\n\t\t\tembed_dims=embed_dims,\n\t\t\tnum_heads=num_heads,\n\t\t\twindow_size=to_2tuple(window_size),\n\t\t\tqkv_bias=qkv_bias,\n\t\t\tqk_scale=qk_scale,\n\t\t\tattn_drop_rate=attn_drop_rate,\n\t\t\tproj_drop_rate=proj_drop_rate,\n\t\t\tinit_cfg=None)\n\n\t\tself.drop = build_dropout(dropout_layer)\n\n\tdef forward(self, query, hw_shape):\n\t\tB, L, C = query.shape\n\t\tH, W = hw_shape\n\t\tassert L == H * W, 'input feature has wrong size'\n\t\tquery = query.view(B, H, W, C)\n\n\t\t# pad feature maps to multiples of window size\n\t\tpad_r = (self.window_size - W % self.window_size) % self.window_size\n\t\tpad_b = (self.window_size - H % self.window_size) % self.window_size\n\t\tquery = F.pad(query, (0, 0, 0, pad_r, 0, pad_b))\n\t\tH_pad, W_pad = query.shape[1], query.shape[2]\n\n\t\t# cyclic shift\n\t\tif self.shift_size > 0:\n\t\t\tshifted_query = torch.roll(\n\t\t\t\tquery,\n\t\t\t\tshifts=(-self.shift_size, -self.shift_size),\n\t\t\t\tdims=(1, 2))\n\n\t\t\t# calculate attention mask for SW-MSA\n\t\t\timg_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device)\n\t\t\th_slices = (slice(0, -self.window_size),\n\t\t\t\t\t\tslice(-self.window_size,\n\t\t\t\t\t\t\t -self.shift_size), slice(-self.shift_size, None))\n\t\t\tw_slices = (slice(0, -self.window_size),\n\t\t\t\t\t\tslice(-self.window_size,\n\t\t\t\t\t\t\t -self.shift_size), slice(-self.shift_size, None))\n\t\t\tcnt = 0\n\t\t\tfor h in h_slices:\n\t\t\t\tfor w in w_slices:\n\t\t\t\t\timg_mask[:, h, w, :] = cnt\n\t\t\t\t\tcnt += 1\n\n\t\t\t# nW, window_size, window_size, 1\n\t\t\tmask_windows = self.window_partition(img_mask)\n\t\t\tmask_windows = mask_windows.view(\n\t\t\t\t-1, self.window_size * self.window_size)\n\t\t\tattn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n\t\t\tattn_mask = attn_mask.masked_fill(attn_mask != 0,\n\t\t\t\t\t\t\t\t\t\t\t float(-100.0)).masked_fill(\n\t\t\t\t\t\t\t\t\t\t\t\t attn_mask == 0, float(0.0))\n\t\telse:\n\t\t\tshifted_query = query\n\t\t\tattn_mask = None\n\n\t\t# nW*B, window_size, window_size, C\n\t\tquery_windows = self.window_partition(shifted_query)\n\t\t# nW*B, window_size*window_size, C\n\t\tquery_windows = query_windows.view(-1, self.window_size**2, C)\n\n\t\t# W-MSA/SW-MSA (nW*B, window_size*window_size, C)\n\t\tattn_windows = self.w_msa(query_windows, mask=attn_mask)\n\n\t\t# merge windows\n\t\tattn_windows = attn_windows.view(-1, self.window_size,\n\t\t\t\t\t\t\t\t\t\t self.window_size, C)\n\n\t\t# B H' W' C\n\t\tshifted_x = self.window_reverse(attn_windows, H_pad, W_pad)\n\t\t# reverse cyclic shift\n\t\tif self.shift_size > 0:\n\t\t\tx = torch.roll(\n\t\t\t\tshifted_x,\n\t\t\t\tshifts=(self.shift_size, self.shift_size),\n\t\t\t\tdims=(1, 2))\n\t\telse:\n\t\t\tx = shifted_x\n\n\t\tif pad_r > 0 or pad_b:\n\t\t\tx = x[:, :H, :W, :].contiguous()\n\n\t\tx = x.view(B, H * W, C)\n\n\t\tx = self.drop(x)\n\t\treturn x\n\n\tdef window_reverse(self, windows, H, W):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\twindows: (num_windows*B, window_size, window_size, C)\n\t\t\tH (int): Height of image\n\t\t\tW (int): Width of image\n\t\tReturns:\n\t\t\tx: (B, H, W, C)\n\t\t\"\"\"\n\t\twindow_size = self.window_size\n\t\tB = int(windows.shape[0] / (H * W / window_size / window_size))\n\t\tx = windows.view(B, H // window_size, W // window_size, window_size,\n\t\t\t\t\t\t window_size, -1)\n\t\tx = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n\t\treturn x\n\n\tdef window_partition(self, x):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tx: (B, H, W, C)\n\t\tReturns:\n\t\t\twindows: (num_windows*B, window_size, window_size, C)\n\t\t\"\"\"\n\t\tB, H, W, C = x.shape\n\t\twindow_size = self.window_size\n\t\tx = x.view(B, H // window_size, window_size, W // window_size,\n\t\t\t\t window_size, C)\n\t\twindows = x.permute(0, 1, 3, 2, 4, 5).contiguous()\n\t\twindows = windows.view(-1, window_size, window_size, C)\n\t\treturn windows\n\n\nclass SwinBlock(BaseModule):\n\t\"\"\"\"\n\tArgs:\n\t\tembed_dims (int): The feature dimension.\n\t\tnum_heads (int): Parallel attention heads.\n\t\tfeedforward_channels (int): The hidden dimension for FFNs.\n\t\twindow_size (int, optional): The local window scale. Default: 7.\n\t\tshift (bool, optional): whether to shift window or not. Default False.\n\t\tqkv_bias (bool, optional): enable bias for qkv if True. Default: True.\n\t\tqk_scale (float | None, optional): Override default qk scale of\n\t\t\thead_dim ** -0.5 if set. Default: None.\n\t\tdrop_rate (float, optional): Dropout rate. Default: 0.\n\t\tattn_drop_rate (float, optional): Attention dropout rate. Default: 0.\n\t\tdrop_path_rate (float, optional): Stochastic depth rate. Default: 0.\n\t\tact_cfg (dict, optional): The config dict of activation function.\n\t\t\tDefault: dict(type='GELU').\n\t\tnorm_cfg (dict, optional): The config dict of normalization.\n\t\t\tDefault: dict(type='LN').\n\t\twith_cp (bool, optional): Use checkpoint or not. Using checkpoint\n\t\t\twill save some memory while slowing down the training speed.\n\t\t\tDefault: False.\n\t\tinit_cfg (dict | list | None, optional): The init config.\n\t\t\tDefault: None.\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t embed_dims,\n\t\t\t\t num_heads,\n\t\t\t\t feedforward_channels,\n\t\t\t\t window_size=7,\n\t\t\t\t shift=False,\n\t\t\t\t qkv_bias=True,\n\t\t\t\t qk_scale=None,\n\t\t\t\t drop_rate=0.,\n\t\t\t\t attn_drop_rate=0.,\n\t\t\t\t drop_path_rate=0.,\n\t\t\t\t act_cfg=dict(type='GELU'),\n\t\t\t\t norm_cfg=dict(type='LN'),\n\t\t\t\t with_cp=False,\n\t\t\t\t init_cfg=None):\n\n\t\tsuper(SwinBlock, self).__init__()\n\n\t\tself.init_cfg = init_cfg\n\t\tself.with_cp = with_cp\n\n\t\tself.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]\n\t\tself.attn = ShiftWindowMSA(\n\t\t\tembed_dims=embed_dims,\n\t\t\tnum_heads=num_heads,\n\t\t\twindow_size=window_size,\n\t\t\tshift_size=window_size // 2 if shift else 0,\n\t\t\tqkv_bias=qkv_bias,\n\t\t\tqk_scale=qk_scale,\n\t\t\tattn_drop_rate=attn_drop_rate,\n\t\t\tproj_drop_rate=drop_rate,\n\t\t\tdropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n\t\t\tinit_cfg=None)\n\n\t\tself.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]\n\t\tself.ffn = FFN(\n\t\t\tembed_dims=embed_dims,\n\t\t\tfeedforward_channels=feedforward_channels,\n\t\t\tnum_fcs=2,\n\t\t\tffn_drop=drop_rate,\n\t\t\tdropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n\t\t\tact_cfg=act_cfg,\n\t\t\tadd_identity=True,\n\t\t\tinit_cfg=None)\n\n\tdef forward(self, x, hw_shape):\n\n\t\tdef _inner_forward(x):\n\t\t\tidentity = x\n\t\t\tx = self.norm1(x)\n\t\t\tx = self.attn(x, hw_shape)\n\n\t\t\tx = x + identity\n\n\t\t\tidentity = x\n\t\t\tx = self.norm2(x)\n\t\t\tx = self.ffn(x, identity=identity)\n\n\t\t\treturn x\n\n\t\tif self.with_cp and x.requires_grad:\n\t\t\tx = cp.checkpoint(_inner_forward, x)\n\t\telse:\n\t\t\tx = _inner_forward(x)\n\n\t\treturn x\n\n\nclass SwinBlockSequence(BaseModule):\n\t\"\"\"Implements one stage in Swin Transformer.\n\n\tArgs:\n\t\tembed_dims (int): The feature dimension.\n\t\tnum_heads (int): Parallel attention heads.\n\t\tfeedforward_channels (int): The hidden dimension for FFNs.\n\t\tdepth (int): The number of blocks in this stage.\n\t\twindow_size (int, optional): The local window scale. Default: 7.\n\t\tqkv_bias (bool, optional): enable bias for qkv if True. Default: True.\n\t\tqk_scale (float | None, optional): Override default qk scale of\n\t\t\thead_dim ** -0.5 if set. Default: None.\n\t\tdrop_rate (float, optional): Dropout rate. Default: 0.\n\t\tattn_drop_rate (float, optional): Attention dropout rate. Default: 0.\n\t\tdrop_path_rate (float | list[float], optional): Stochastic depth\n\t\t\trate. Default: 0.\n\t\tdownsample (BaseModule | None, optional): The downsample operation\n\t\t\tmodule. Default: None.\n\t\tact_cfg (dict, optional): The config dict of activation function.\n\t\t\tDefault: dict(type='GELU').\n\t\tnorm_cfg (dict, optional): The config dict of normalization.\n\t\t\tDefault: dict(type='LN').\n\t\twith_cp (bool, optional): Use checkpoint or not. Using checkpoint\n\t\t\twill save some memory while slowing down the training speed.\n\t\t\tDefault: False.\n\t\tinit_cfg (dict | list | None, optional): The init config.\n\t\t\tDefault: None.\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t embed_dims,\n\t\t\t\t num_heads,\n\t\t\t\t feedforward_channels,\n\t\t\t\t depth,\n\t\t\t\t window_size=7,\n\t\t\t\t qkv_bias=True,\n\t\t\t\t qk_scale=None,\n\t\t\t\t drop_rate=0.,\n\t\t\t\t attn_drop_rate=0.,\n\t\t\t\t drop_path_rate=0.,\n\t\t\t\t downsample=None,\n\t\t\t\t act_cfg=dict(type='GELU'),\n\t\t\t\t norm_cfg=dict(type='LN'),\n\t\t\t\t with_cp=False,\n\t\t\t\t init_cfg=None):\n\t\tsuper().__init__(init_cfg=init_cfg)\n\n\t\tif isinstance(drop_path_rate, list):\n\t\t\tdrop_path_rates = drop_path_rate\n\t\t\tassert len(drop_path_rates) == depth\n\t\telse:\n\t\t\tdrop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)]\n\n\t\tself.blocks = ModuleList()\n\t\tfor i in range(depth):\n\t\t\tblock = SwinBlock(\n\t\t\t\tembed_dims=embed_dims,\n\t\t\t\tnum_heads=num_heads,\n\t\t\t\tfeedforward_channels=feedforward_channels,\n\t\t\t\twindow_size=window_size,\n\t\t\t\tshift=False if i % 2 == 0 else True,\n\t\t\t\tqkv_bias=qkv_bias,\n\t\t\t\tqk_scale=qk_scale,\n\t\t\t\tdrop_rate=drop_rate,\n\t\t\t\tattn_drop_rate=attn_drop_rate,\n\t\t\t\tdrop_path_rate=drop_path_rates[i],\n\t\t\t\tact_cfg=act_cfg,\n\t\t\t\tnorm_cfg=norm_cfg,\n\t\t\t\twith_cp=with_cp,\n\t\t\t\tinit_cfg=None)\n\t\t\tself.blocks.append(block)\n\n\t\tself.downsample = downsample\n\n\tdef forward(self, x, hw_shape):\n\t\tfor block in self.blocks:\n\t\t\tx = block(x, hw_shape)\n\n\t\tif self.downsample:\n\t\t\tx_down, down_hw_shape = self.downsample(x, hw_shape)\n\t\t\treturn x_down, down_hw_shape, x, hw_shape\n\t\telse:\n\t\t\treturn x, hw_shape, x, hw_shape\n\n\[email protected]_module()\nclass SwinTransformer(BaseModule):\n\t\"\"\" Swin Transformer\n\tA PyTorch implement of : `Swin Transformer:\n\tHierarchical Vision Transformer using Shifted Windows` -\n\t\thttps://arxiv.org/abs/2103.14030\n\n\tInspiration from\n\thttps://github.com/microsoft/Swin-Transformer\n\n\tArgs:\n\t\tpretrain_img_size (int | tuple[int]): The size of input image when\n\t\t\tpretrain. Defaults: 224.\n\t\tin_channels (int): The num of input channels.\n\t\t\tDefaults: 3.\n\t\tembed_dims (int): The feature dimension. Default: 96.\n\t\tpatch_size (int | tuple[int]): Patch size. Default: 4.\n\t\twindow_size (int): Window size. Default: 7.\n\t\tmlp_ratio (int): Ratio of mlp hidden dim to embedding dim.\n\t\t\tDefault: 4.\n\t\tdepths (tuple[int]): Depths of each Swin Transformer stage.\n\t\t\tDefault: (2, 2, 6, 2).\n\t\tnum_heads (tuple[int]): Parallel attention heads of each Swin\n\t\t\tTransformer stage. Default: (3, 6, 12, 24).\n\t\tstrides (tuple[int]): The patch merging or patch embedding stride of\n\t\t\teach Swin Transformer stage. (In swin, we set kernel size equal to\n\t\t\tstride.) Default: (4, 2, 2, 2).\n\t\tout_indices (tuple[int]): Output from which stages.\n\t\t\tDefault: (0, 1, 2, 3).\n\t\tqkv_bias (bool, optional): If True, add a learnable bias to query, key,\n\t\t\tvalue. Default: True\n\t\tqk_scale (float | None, optional): Override default qk scale of\n\t\t\thead_dim ** -0.5 if set. Default: None.\n\t\tpatch_norm (bool): If add a norm layer for patch embed and patch\n\t\t\tmerging. Default: True.\n\t\tdrop_rate (float): Dropout rate. Defaults: 0.\n\t\tattn_drop_rate (float): Attention dropout rate. Default: 0.\n\t\tdrop_path_rate (float): Stochastic depth rate. Defaults: 0.1.\n\t\tuse_abs_pos_embed (bool): If True, add absolute position embedding to\n\t\t\tthe patch embedding. Defaults: False.\n\t\tact_cfg (dict): Config dict for activation layer.\n\t\t\tDefault: dict(type='LN').\n\t\tnorm_cfg (dict): Config dict for normalization layer at\n\t\t\toutput of backone. Defaults: dict(type='LN').\n\t\twith_cp (bool, optional): Use checkpoint or not. Using checkpoint\n\t\t\twill save some memory while slowing down the training speed.\n\t\t\tDefault: False.\n\t\tpretrained (str, optional): model pretrained path. Default: None.\n\t\tconvert_weights (bool): The flag indicates whether the\n\t\t\tpre-trained model is from the original repo. We may need\n\t\t\tto convert some keys to make it compatible.\n\t\t\tDefault: False.\n\t\tfrozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n\t\t\t-1 means not freezing any parameters.\n\t\tinit_cfg (dict, optional): The Config for initialization.\n\t\t\tDefaults to None.\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t pretrain_img_size=224,\n\t\t\t\t in_channels=3,\n\t\t\t\t embed_dims=96,\n\t\t\t\t patch_size=4,\n\t\t\t\t window_size=7,\n\t\t\t\t mlp_ratio=4,\n\t\t\t\t depths=(2, 2, 6, 2),\n\t\t\t\t num_heads=(3, 6, 12, 24),\n\t\t\t\t strides=(4, 2, 2, 2),\n\t\t\t\t out_indices=(0, 1, 2, 3),\n\t\t\t\t qkv_bias=True,\n\t\t\t\t qk_scale=None,\n\t\t\t\t patch_norm=True,\n\t\t\t\t drop_rate=0.,\n\t\t\t\t attn_drop_rate=0.,\n\t\t\t\t drop_path_rate=0.1,\n\t\t\t\t use_abs_pos_embed=False,\n\t\t\t\t act_cfg=dict(type='GELU'),\n\t\t\t\t norm_cfg=dict(type='LN'),\n\t\t\t\t with_cp=False,\n\t\t\t\t pretrained=None,\n\t\t\t\t convert_weights=False,\n\t\t\t\t frozen_stages=-1,\n\t\t\t\t init_cfg=None):\n\t\tself.convert_weights = convert_weights\n\t\tself.frozen_stages = frozen_stages\n\t\tif isinstance(pretrain_img_size, int):\n\t\t\tpretrain_img_size = to_2tuple(pretrain_img_size)\n\t\telif isinstance(pretrain_img_size, tuple):\n\t\t\tif len(pretrain_img_size) == 1:\n\t\t\t\tpretrain_img_size = to_2tuple(pretrain_img_size[0])\n\t\t\tassert len(pretrain_img_size) == 2, \\\n\t\t\t\tf'The size of image should have length 1 or 2, ' \\\n\t\t\t\tf'but got {len(pretrain_img_size)}'\n\n\t\tassert not (init_cfg and pretrained), 'init_cfg and pretrained cannot be specified at the same time'\n\t\tif isinstance(pretrained, str):\n\t\t\twarnings.warn('DeprecationWarning: pretrained is deprecated, '\n\t\t\t\t\t\t 'please use \"init_cfg\" instead')\n\t\t\tself.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n\t\telif pretrained is None:\n\t\t\tself.init_cfg = init_cfg\n\t\telse:\n\t\t\traise TypeError('pretrained must be a str or None')\n\n\t\tsuper(SwinTransformer, self).__init__(init_cfg=init_cfg)\n\n\t\tnum_layers = len(depths)\n\t\tself.out_indices = out_indices\n\t\tself.use_abs_pos_embed = use_abs_pos_embed\n\n\t\tassert strides[0] == patch_size, 'Use non-overlapping patch embed.'\n\n\t\tself.patch_embed = PatchEmbed(\n\t\t\tin_channels=in_channels,\n\t\t\tembed_dims=embed_dims,\n\t\t\tconv_type='Conv2d',\n\t\t\tkernel_size=patch_size,\n\t\t\tstride=strides[0],\n\t\t\tnorm_cfg=norm_cfg if patch_norm else None,\n\t\t\tinit_cfg=None)\n\n\t\tif self.use_abs_pos_embed:\n\t\t\tpatch_row = pretrain_img_size[0] // patch_size\n\t\t\tpatch_col = pretrain_img_size[1] // patch_size\n\t\t\tnum_patches = patch_row * patch_col\n\t\t\tself.absolute_pos_embed = nn.Parameter(torch.zeros((1, num_patches, embed_dims)))\n\n\t\tself.drop_after_pos = nn.Dropout(p=drop_rate)\n\n\t\t# set stochastic depth decay rule\n\t\ttotal_depth = sum(depths)\n\t\tdpr = [\n\t\t\tx.item() for x in torch.linspace(0, drop_path_rate, total_depth)\n\t\t]\n\n\t\tself.stages = ModuleList()\n\t\tin_channels = embed_dims\n\t\tfor i in range(num_layers):\n\t\t\tif i < num_layers - 1:\n\t\t\t\tdownsample = PatchMerging(\n\t\t\t\t\tin_channels=in_channels,\n\t\t\t\t\tout_channels=2 * in_channels,\n\t\t\t\t\tstride=strides[i + 1],\n\t\t\t\t\tnorm_cfg=norm_cfg if patch_norm else None,\n\t\t\t\t\tinit_cfg=None)\n\t\t\telse:\n\t\t\t\tdownsample = None\n\n\t\t\tstage = SwinBlockSequence(\n\t\t\t\tembed_dims=in_channels,\n\t\t\t\tnum_heads=num_heads[i],\n\t\t\t\tfeedforward_channels=mlp_ratio * in_channels,\n\t\t\t\tdepth=depths[i],\n\t\t\t\twindow_size=window_size,\n\t\t\t\tqkv_bias=qkv_bias,\n\t\t\t\tqk_scale=qk_scale,\n\t\t\t\tdrop_rate=drop_rate,\n\t\t\t\tattn_drop_rate=attn_drop_rate,\n\t\t\t\tdrop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])],\n\t\t\t\tdownsample=downsample,\n\t\t\t\tact_cfg=act_cfg,\n\t\t\t\tnorm_cfg=norm_cfg,\n\t\t\t\twith_cp=with_cp,\n\t\t\t\tinit_cfg=None)\n\t\t\tself.stages.append(stage)\n\t\t\tif downsample:\n\t\t\t\tin_channels = downsample.out_channels\n\n\t\tself.num_features = [int(embed_dims * 2**i) for i in range(num_layers)]\n\t\t# Add a norm layer for each output\n\t\tfor i in out_indices:\n\t\t\tlayer = build_norm_layer(norm_cfg, self.num_features[i])[1]\n\t\t\tlayer_name = f'norm{i}'\n\t\t\tself.add_module(layer_name, layer)\n\n\tdef train(self, mode=True):\n\t\t\"\"\"Convert the model into training mode while keep layers freezed.\"\"\"\n\t\tsuper(SwinTransformer, self).train(mode)\n\t\tself._freeze_stages()\n\n\tdef _freeze_stages(self):\n\t\tif self.frozen_stages >= 0:\n\t\t\tself.patch_embed.eval()\n\t\t\tfor param in self.patch_embed.parameters():\n\t\t\t\tparam.requires_grad = False\n\t\t\tif self.use_abs_pos_embed:\n\t\t\t\tself.absolute_pos_embed.requires_grad = False\n\t\t\tself.drop_after_pos.eval()\n\n\t\tfor i in range(1, self.frozen_stages + 1):\n\n\t\t\tif (i - 1) in self.out_indices:\n\t\t\t\tnorm_layer = getattr(self, f'norm{i-1}')\n\t\t\t\tnorm_layer.eval()\n\t\t\t\tfor param in norm_layer.parameters():\n\t\t\t\t\tparam.requires_grad = False\n\n\t\t\tm = self.stages[i - 1]\n\t\t\tm.eval()\n\t\t\tfor param in m.parameters():\n\t\t\t\tparam.requires_grad = False\n\n\tdef init_weights(self):\n\t\tlogger = get_root_logger()\n\t\tif self.init_cfg is None:\n\t\t\tlogger.warn(f'No pre-trained weights for {self.__class__.__name__}, training start from scratch')\n\t\t\tif self.use_abs_pos_embed:\n\t\t\t\ttrunc_normal_init(self.absolute_pos_embed, std=0.02)\n\t\t\tfor m in self.modules():\n\t\t\t\tif isinstance(m, nn.Linear):\n\t\t\t\t\ttrunc_normal_init(m.weight, std=.02)\n\t\t\t\t\tif m.bias is not None:\n\t\t\t\t\t\tconstant_init(m.bias, 0)\n\t\t\t\telif isinstance(m, nn.LayerNorm):\n\t\t\t\t\tconstant_init(m.bias, 0)\n\t\t\t\t\tconstant_init(m.weight, 1.0)\n\t\telse:\n\t\t\tassert 'checkpoint' in self.init_cfg, f'Only support specify `Pretrained` in `init_cfg` in {self.__class__.__name__} '\n\t\t\tckpt = _load_checkpoint(self.init_cfg.checkpoint, logger=logger, map_location='cpu')\n\t\t\tif 'state_dict' in ckpt:\n\t\t\t\t_state_dict = ckpt['state_dict']\n\t\t\telif 'model' in ckpt:\n\t\t\t\t_state_dict = ckpt['model']\n\t\t\telse:\n\t\t\t\t_state_dict = ckpt\n\n\t\t\tstate_dict = OrderedDict()\n\t\t\tfor k, v in _state_dict.items():\n\t\t\t\tif k.startswith('backbone.'):\n\t\t\t\t\tstate_dict[k[9:]] = v\n\t\t\t\telse:\n\t\t\t\t\tstate_dict[k] = v\n\n\t\t\tif self.convert_weights:\n\t\t\t\t# supported loading weight from original repo,\n\t\t\t\tstate_dict = swin_converter(state_dict)\n\n\t\t\t# strip prefix of state_dict\n\t\t\tif list(state_dict.keys())[0].startswith('module.'):\n\t\t\t\tstate_dict = {k[7:]: v for k, v in state_dict.items()}\n\n\t\t\t# reshape absolute position embedding\n\t\t\tif state_dict.get('absolute_pos_embed') is not None:\n\t\t\t\tabsolute_pos_embed = state_dict['absolute_pos_embed']\n\t\t\t\tN1, L, C1 = absolute_pos_embed.size()\n\t\t\t\tN2, C2, H, W = self.absolute_pos_embed.size()\n\t\t\t\tif N1 != N2 or C1 != C2 or L != H * W:\n\t\t\t\t\tlogger.warning('Error in loading absolute_pos_embed, pass')\n\t\t\t\telse:\n\t\t\t\t\tstate_dict['absolute_pos_embed'] = absolute_pos_embed.view(\n\t\t\t\t\t\tN2, H, W, C2).permute(0, 3, 1, 2).contiguous()\n\n\t\t\t# interpolate position bias table if needed\n\t\t\trelative_position_bias_table_keys = [\n\t\t\t\tk for k in state_dict.keys()\n\t\t\t\tif 'relative_position_bias_table' in k\n\t\t\t]\n\t\t\tfor table_key in relative_position_bias_table_keys:\n\t\t\t\ttable_pretrained = state_dict[table_key]\n\t\t\t\ttable_current = self.state_dict()[table_key]\n\t\t\t\tL1, nH1 = table_pretrained.size()\n\t\t\t\tL2, nH2 = table_current.size()\n\t\t\t\tif nH1 != nH2:\n\t\t\t\t\tlogger.warning(f'Error in loading {table_key}, pass')\n\t\t\t\telif L1 != L2:\n\t\t\t\t\tS1 = int(L1**0.5)\n\t\t\t\t\tS2 = int(L2**0.5)\n\t\t\t\t\ttable_pretrained_resized = F.interpolate(\n\t\t\t\t\t\ttable_pretrained.permute(1, 0).reshape(1, nH1, S1, S1),\n\t\t\t\t\t\tsize=(S2, S2),\n\t\t\t\t\t\tmode='bicubic')\n\t\t\t\t\tstate_dict[table_key] = table_pretrained_resized.view(\n\t\t\t\t\t\tnH2, L2).permute(1, 0).contiguous()\n\n\t\t\t# load state_dict\n\t\t\tself.load_state_dict(state_dict, False)\n\n\tdef forward(self, x):\n\t\tx, hw_shape = self.patch_embed(x)\n\n\t\tif self.use_abs_pos_embed:\n\t\t\tx = x + self.absolute_pos_embed\n\t\tx = self.drop_after_pos(x)\n\n\t\touts = []\n\t\tfor i, stage in enumerate(self.stages):\n\t\t\tx, hw_shape, out, out_hw_shape = stage(x, hw_shape)\n\t\t\tif i in self.out_indices:\n\t\t\t\tnorm_layer = getattr(self, f'norm{i}')\n\t\t\t\tout = norm_layer(out)\n\t\t\t\tout = out.view(-1, *out_hw_shape, self.num_features[i]).permute(0, 3, 1, 2).contiguous()\n\t\t\t\touts.append(out)\n\n\t\treturn outs\n"
] | [
[
"torch.nn.init.constant_",
"torch.nn.Embedding",
"torch.cat"
],
[
"torch.nn.ModuleList",
"torch.nn.functional.relu",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate",
"torch.nn.functional.max_pool2d"
],
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.linspace",
"torch.zeros",
"torch.nn.Linear",
"torch.utils.checkpoint.checkpoint",
"torch.arange",
"torch.roll",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shaviv-Hoffman-Lowitz/pylot | [
"d1295a42f0edd79670dc64053824a3e075d433e2"
] | [
"pylot/perception/detection/traffic_light_det_operator.py"
] | [
"\"\"\"Implements an operator that detects traffic lights.\"\"\"\nimport logging\n\nimport erdos\n\nimport numpy as np\n\nimport pylot.utils\nfrom pylot.perception.detection.traffic_light import TrafficLight, \\\n TrafficLightColor\nfrom pylot.perception.detection.utils import BoundingBox2D\nfrom pylot.perception.messages import TrafficLightsMessage\n\nimport tensorflow as tf\n\n\nclass TrafficLightDetOperator(erdos.Operator):\n \"\"\"Detects traffic lights using a TensorFlow model.\n\n The operator receives frames on a camera stream, and runs a model for each\n frame.\n\n Args:\n camera_stream (:py:class:`erdos.ReadStream`): The stream on which\n camera frames are received.\n traffic_lights_stream (:py:class:`erdos.WriteStream`): Stream on which\n the operator sends\n :py:class:`~pylot.perception.messages.TrafficLightsMessage`\n messages.\n flags (absl.flags): Object to be used to access absl flags.\n \"\"\"\n def __init__(self, camera_stream, traffic_lights_stream, flags):\n # Register a callback on the camera input stream.\n camera_stream.add_callback(self.on_frame, [traffic_lights_stream])\n self._logger = erdos.utils.setup_logging(self.config.name,\n self.config.log_file_name)\n self._flags = flags\n self._detection_graph = tf.Graph()\n # Load the model from the model file.\n pylot.utils.set_tf_loglevel(logging.ERROR)\n with self._detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(self._flags.traffic_light_det_model_path,\n 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self._gpu_options = tf.GPUOptions(\n allow_growth=True,\n visible_device_list=str(self._flags.traffic_light_det_gpu_index),\n per_process_gpu_memory_fraction=flags.\n traffic_light_det_gpu_memory_fraction)\n # Create a TensorFlow session.\n self._tf_session = tf.Session(\n graph=self._detection_graph,\n config=tf.ConfigProto(gpu_options=self._gpu_options))\n # Get the tensors we're interested in.\n self._image_tensor = self._detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n self._detection_boxes = self._detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n self._detection_scores = self._detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self._detection_classes = self._detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self._num_detections = self._detection_graph.get_tensor_by_name(\n 'num_detections:0')\n self._labels = {\n 1: TrafficLightColor.GREEN,\n 2: TrafficLightColor.YELLOW,\n 3: TrafficLightColor.RED,\n 4: TrafficLightColor.OFF\n }\n # Serve some junk image to load up the model.\n self.__run_model(np.zeros((108, 192, 3)))\n\n @staticmethod\n def connect(camera_stream):\n \"\"\"Connects the operator to other streams.\n\n Args:\n camera_stream (:py:class:`erdos.ReadStream`): The stream on which\n camera frames are received.\n\n Returns:\n :py:class:`erdos.WriteStream`: Stream on which the operator sends\n :py:class:`~pylot.perception.messages.TrafficLightsMessage`\n messages for traffic lights.\n \"\"\"\n traffic_lights_stream = erdos.WriteStream()\n return [traffic_lights_stream]\n\n @erdos.profile_method()\n def on_frame(self, msg, traffic_lights_stream):\n \"\"\"Invoked whenever a frame message is received on the stream.\n\n Args:\n msg: A :py:class:`~pylot.perception.messages.FrameMessage`.\n obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which\n the operator sends\n :py:class:`~pylot.perception.messages.TrafficLightsMessage`\n messages for traffic lights.\n \"\"\"\n self._logger.debug('@{}: {} received message'.format(\n msg.timestamp, self.config.name))\n assert msg.frame.encoding == 'BGR', 'Expects BGR frames'\n boxes, scores, labels = self.__run_model(\n msg.frame.as_rgb_numpy_array())\n\n traffic_lights = self.__convert_to_detected_tl(\n boxes, scores, labels, msg.frame.camera_setup.height,\n msg.frame.camera_setup.width)\n\n self._logger.debug('@{}: {} detected traffic lights {}'.format(\n msg.timestamp, self.config.name, traffic_lights))\n\n traffic_lights_stream.send(\n TrafficLightsMessage(msg.timestamp, traffic_lights))\n traffic_lights_stream.send(erdos.WatermarkMessage(msg.timestamp))\n\n if self._flags.log_traffic_light_detector_output:\n msg.frame.annotate_with_bounding_boxes(msg.timestamp,\n traffic_lights)\n msg.frame.save(msg.timestamp.coordinates[0], self._flags.data_path,\n 'tl-detector-{}'.format(self.config.name))\n\n def __run_model(self, image_np):\n # Expand dimensions since the model expects images to have\n # shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n (boxes, scores, classes, num) = self._tf_session.run(\n [\n self._detection_boxes, self._detection_scores,\n self._detection_classes, self._num_detections\n ],\n feed_dict={self._image_tensor: image_np_expanded})\n\n num_detections = int(num[0])\n labels = [self._labels[label] for label in classes[0][:num_detections]]\n boxes = boxes[0][:num_detections]\n scores = scores[0][:num_detections]\n return boxes, scores, labels\n\n def __convert_to_detected_tl(self, boxes, scores, labels, height, width):\n traffic_lights = []\n for index in range(len(scores)):\n if scores[\n index] > self._flags.traffic_light_det_min_score_threshold:\n bbox = BoundingBox2D(\n int(boxes[index][1] * width), # x_min\n int(boxes[index][3] * width), # x_max\n int(boxes[index][0] * height), # y_min\n int(boxes[index][2] * height) # y_max\n )\n traffic_lights.append(\n TrafficLight(scores[index],\n labels[index],\n bounding_box=bbox))\n return traffic_lights\n"
] | [
[
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"tensorflow.ConfigProto",
"tensorflow.GraphDef",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ymeng-git/tvm | [
"e53cbe48ca307d14a2359c1f6fe15f4ccfa87c8f"
] | [
"tests/python/contrib/test_ethosu/test_legalize.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-argument\n\nimport pytest\n\npytest.importorskip(\"ethosu.vela\")\n\nimport math\n\nimport numpy as np\nimport tensorflow as tf\nimport tflite.Model\n\nimport tvm\nfrom tvm import relay\nfrom tvm.relay.backend.contrib.ethosu import legalize, preprocess\nfrom tvm.relay import dataflow_pattern\nfrom tvm.relay.op.contrib import ethosu\nfrom tvm.relay.backend.contrib.ethosu import util\nfrom tvm.relay.build_module import bind_params_by_name\n\nfrom . import infra\n\n\ndef partition_ethosu_by_table(mod, pattern_table):\n \"\"\"In case only the legalization part is supported for an operator, we don't\n want to add the operator's pattern to the pattern table so that the compiler\n wouldn't attempt to offload an operator without full stack support.\"\"\"\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.MergeComposite(pattern_table)(mod)\n mod = relay.transform.AnnotateTarget(\"ethos-u\")(mod)\n mod = relay.transform.MergeCompilerRegions()(mod)\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.PartitionGraph()(mod)\n mod = relay.transform.InferType()(mod)\n mod = preprocess.preprocess_ext_io()(mod)\n return mod\n\n\ndef test_split_indices_legalize():\n def create_graph(axis):\n x = relay.var(\"x\", shape=(1, 50, 50, 3))\n x_relu = relay.nn.relu(x)\n split_output = relay.split(x_relu, [5, 20, 45], axis).tuple_value\n return relay.Function([x], split_output)\n\n def expected_mod_axis1():\n expected_ir_string = \"\"\"\n #[version = \"0.0.5\"]\n def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 5, 50, 3), float32],\\\n Tensor[(1, 15, 50, 3), float32],\\\n Tensor[(1, 25, 50, 3), float32],\\\n Tensor[(1, 5, 50, 3), float32]) {\n %0 = nn.relu(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;\n %1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 5, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 5, 50, 3), float32] */;\n %2 = strided_slice(%0, begin=[0, 5, 0, 0], end=[1, 20, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 15, 50, 3), float32] */;\n %3 = strided_slice(%0, begin=[0, 20, 0, 0], end=[1, 45, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 25, 50, 3), float32] */;\n %4 = strided_slice(%0, begin=[0, 45, 0, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 5, 50, 3), float32] */;\n (%1, %2, %3, %4)\n }\n \"\"\"\n return tvm.parser.fromtext(expected_ir_string)\n\n def expected_mod_axis2():\n expected_ir_string = \"\"\"\n #[version = \"0.0.5\"]\n def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 50, 5, 3), float32],\\\n Tensor[(1, 50, 15, 3), float32],\\\n Tensor[(1, 50, 25, 3), float32],\\\n Tensor[(1, 50, 5, 3), float32]) {\n %0 = nn.relu(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;\n %1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 50, 5, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 5, 3), float32] */;\n %2 = strided_slice(%0, begin=[0, 0, 5, 0], end=[1, 50, 20, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 15, 3), float32] */;\n %3 = strided_slice(%0, begin=[0, 0, 20, 0], end=[1, 50, 45, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 25, 3), float32] */;\n %4 = strided_slice(%0, begin=[0, 0, 45, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 5, 3), float32] */;\n (%1, %2, %3, %4)\n }\n \"\"\"\n return tvm.parser.fromtext(expected_ir_string)\n\n mod_axis1 = tvm.IRModule()\n mod_axis1[\"tvmgen_default_ethos_u_main_0\"] = create_graph(1)\n mod_axis1 = legalize.LegalizeSplit()(mod_axis1)\n expected_axis1 = expected_mod_axis1()\n tvm.ir.assert_structural_equal(mod_axis1, expected_axis1)\n\n mod_axis2 = tvm.IRModule()\n mod_axis2[\"tvmgen_default_ethos_u_main_0\"] = create_graph(2)\n mod_axis2 = legalize.LegalizeSplit()(mod_axis2)\n expected_axis2 = expected_mod_axis2()\n tvm.ir.assert_structural_equal(mod_axis2, expected_axis2)\n\n\ndef test_split_sections_legalize():\n def create_graph(axis, sections):\n x = relay.var(\"x\", shape=(1, 50, 50, 3))\n x_abs = relay.abs(x)\n split_output = relay.split(x_abs, sections, axis).tuple_value\n outputs = list()\n for section_idx in range(sections):\n split_single_out = relay.TupleGetItem(split_output, section_idx)\n tanh = relay.tanh(split_single_out)\n outputs.append(tanh)\n tuple_out = relay.Tuple(outputs)\n return relay.Function([x], tuple_out)\n\n def expected_mod_axis1():\n expected_ir_string = \"\"\"\n #[version = \"0.0.5\"]\n def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 10, 50, 3), float32],\\\n Tensor[(1, 10, 50, 3), float32],\\\n Tensor[(1, 10, 50, 3), float32],\\\n Tensor[(1, 10, 50, 3), float32],\\\n Tensor[(1, 10, 50, 3), float32]) {\n %0 = abs(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;\n %1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 10, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %2 = strided_slice(%0, begin=[0, 10, 0, 0], end=[1, 20, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %3 = strided_slice(%0, begin=[0, 20, 0, 0], end=[1, 30, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %4 = strided_slice(%0, begin=[0, 30, 0, 0], end=[1, 40, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %5 = strided_slice(%0, begin=[0, 40, 0, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %6 = (%1, %2, %3, %4, %5);\n %7 = %6.0;\n %8 = tanh(%7) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %9 = %6.1;\n %10 = tanh(%9) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %11 = %6.2;\n %12 = tanh(%11) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %13 = %6.3;\n %14 = tanh(%13) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n %15 = %6.4;\n %16 = tanh(%15) /* ty=Tensor[(1, 10, 50, 3), float32] */;\n (%8, %10, %12, %14, %16)\n }\n \"\"\"\n return tvm.parser.fromtext(expected_ir_string)\n\n def expected_mod_axis2():\n expected_ir_string = \"\"\"\n #[version = \"0.0.5\"]\n def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 50, 10, 3), float32],\\\n Tensor[(1, 50, 10, 3), float32],\\\n Tensor[(1, 50, 10, 3), float32],\\\n Tensor[(1, 50, 10, 3), float32],\\\n Tensor[(1, 50, 10, 3), float32]) {\n %0 = abs(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;\n %1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 50, 10, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %2 = strided_slice(%0, begin=[0, 0, 10, 0], end=[1, 50, 20, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %3 = strided_slice(%0, begin=[0, 0, 20, 0], end=[1, 50, 30, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %4 = strided_slice(%0, begin=[0, 0, 30, 0], end=[1, 50, 40, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %5 = strided_slice(%0, begin=[0, 0, 40, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\\\n /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %6 = (%1, %2, %3, %4, %5);\n %7 = %6.0;\n %8 = tanh(%7) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %9 = %6.1;\n %10 = tanh(%9) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %11 = %6.2;\n %12 = tanh(%11) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %13 = %6.3;\n %14 = tanh(%13) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n %15 = %6.4;\n %16 = tanh(%15) /* ty=Tensor[(1, 50, 10, 3), float32] */;\n (%8, %10, %12, %14, %16)\n }\n \"\"\"\n return tvm.parser.fromtext(expected_ir_string)\n\n mod_axis1 = tvm.IRModule()\n mod_axis1[\"tvmgen_default_ethos_u_main_0\"] = create_graph(1, 5)\n mod_axis1 = legalize.LegalizeSplit()(mod_axis1)\n expected_axis1 = expected_mod_axis1()\n tvm.ir.assert_structural_equal(mod_axis1, expected_axis1)\n\n mod_axis2 = tvm.IRModule()\n mod_axis2[\"tvmgen_default_ethos_u_main_0\"] = create_graph(2, 5)\n mod_axis2 = legalize.LegalizeSplit()(mod_axis2)\n expected_axis2 = expected_mod_axis2()\n tvm.ir.assert_structural_equal(mod_axis2, expected_axis2)\n\n\ndef infer_type_function_pass(func):\n mod = tvm.IRModule()\n mod[\"test\"] = func\n mod = relay.transform.InferType()(mod)\n return mod[\"test\"]\n\n\ndef get_shape_expr(in_expr, out_expr):\n main_f = relay.Function([in_expr], out_expr)\n main_f = infer_type_function_pass(main_f)\n shape = [int(i) for i in main_f.body.checked_type.shape]\n return shape\n\n\nINVERSE_LAYOUT_TRANSFORM_OHWI_MAP = {\n \"HWIO\": [1, 2, 3, 0],\n \"HWOI\": [1, 2, 0, 3],\n \"OWHI\": [0, 1, 2, 3],\n}\n\n\[email protected](\"ifm_shape\", [(1, 299, 299, 3), (1, 55, 55, 3)])\[email protected](\"kernel_shape\", [(3, 2), (1, 3)])\[email protected](\"padding\", [\"SAME\", \"VALID\"])\[email protected](\"strides, dilation\", [((1, 1), (2, 1)), ((3, 2), (1, 1))])\[email protected](\"activation\", [None, \"RELU\"])\ndef test_tflite_conv2d_legalize(ifm_shape, kernel_shape, padding, strides, dilation, activation):\n dtype = \"int8\"\n\n def create_tflite_graph_single():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, input_shape):\n op = tf.nn.conv2d(\n input_shape,\n filters=tf.constant(\n np.random.uniform(size=(kernel_shape[0], kernel_shape[1], 3, 3)),\n dtype=tf.float32,\n ),\n strides=strides,\n padding=padding,\n data_format=\"NHWC\",\n dilations=dilation,\n )\n if activation:\n op = tf.nn.relu(op)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n op = ext_func.body\n ofm_channels = op.attrs.ofm_channels\n\n # check IFM\n ifm = op.args[0].checked_type\n assert list(ifm.shape) == list(ifm_shape)\n assert str(ifm.dtype) == dtype\n assert ifm.shape[3] == ofm_channels\n\n # check OFM\n ofm = op.checked_type\n expected_ofm_shape = infra.compute_ofm_shape(\n ifm_shape, padding, kernel_shape, strides, dilation\n )\n assert list(ofm.shape) == list(expected_ofm_shape)\n assert str(ofm.dtype) == dtype\n assert ofm.shape[3] == ofm_channels\n\n # check weights\n weights_ohwi = op.args[1].data.asnumpy()\n assert str(weights_ohwi.dtype) == dtype\n assert weights_ohwi.shape[0] == ofm_channels\n assert weights_ohwi.shape[1] == kernel_shape[0]\n assert weights_ohwi.shape[2] == kernel_shape[1]\n assert weights_ohwi.shape[3] == 3\n\n # Check that scale_bias matches weight tensor\n assert list(op.args[2].checked_type.shape)[0] == ofm_channels\n\n expected_padding = infra.compute_padding_shape(\n ifm_shape,\n expected_ofm_shape,\n padding,\n (kernel_shape[0], kernel_shape[1]),\n strides,\n dilation,\n )\n assert list(op.attrs.padding) == list(expected_padding)\n assert list(op.attrs.strides) == list(strides)\n assert list(op.attrs.dilation) == list(dilation)\n if activation == \"RELU\":\n assert str(op.attrs.activation) == \"CLIP\"\n\n conv2d_pattern_table = [\n (\n ethosu.QnnConv2DParams.composite_name,\n ethosu.qnn_conv2d_pattern(),\n lambda pat: ethosu.QnnConv2DParams(pat).is_valid(),\n )\n ]\n\n tflite_graph = create_tflite_graph_single()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, conv_params = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n\n mod[\"main\"] = bind_params_by_name(mod[\"main\"], conv_params)\n mod = partition_ethosu_by_table(mod, conv2d_pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.Conv2DRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\"ifm_shape\", [(1, 299, 299, 3), (1, 123, 17, 7)])\[email protected](\"kernel_shape\", [(7, 3), (22, 5)])\[email protected](\"padding\", [\"SAME\", \"VALID\"])\[email protected](\"strides, dilation\", [((1, 1), (2, 1)), ((3, 2), (1, 1))])\[email protected](\"activation\", [\"RELU\", None])\ndef test_tflite_depthwise_conv_2d_legalize(\n ifm_shape, kernel_shape, padding, strides, dilation, activation\n):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def depthwise_conv2d(self, x):\n weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]\n weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)\n # The input strides to the TensorFlow API needs to be of shape 1x4\n tf_strides = [1, strides[0], strides[1], 1]\n op = tf.nn.depthwise_conv2d(\n x, weight, strides=tf_strides, padding=padding, dilations=dilation\n )\n if activation:\n op = tf.nn.relu(op)\n return op\n\n model = Model()\n concrete_func = model.depthwise_conv2d.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n op = ext_func.body\n ofm_channels = op.attrs.ofm_channels\n\n # check IFM\n ifm = op.args[0].checked_type\n assert list(ifm.shape) == list(ifm_shape)\n assert str(ifm.dtype) == dtype\n assert ifm.shape[3] == ofm_channels\n\n # check OFM\n ofm = op.checked_type\n expected_ofm_shape = infra.compute_ofm_shape(\n ifm_shape, padding, kernel_shape, strides, dilation\n )\n assert list(ofm.shape) == list(expected_ofm_shape)\n assert str(ofm.dtype) == dtype\n assert ofm.shape[3] == ofm_channels\n\n # check weights\n weights_ohwi = op.args[1].data.asnumpy()\n assert str(weights_ohwi.dtype) == dtype\n assert weights_ohwi.shape[0] == ofm_channels\n assert weights_ohwi.shape[1] == kernel_shape[0]\n assert weights_ohwi.shape[2] == kernel_shape[1]\n assert weights_ohwi.shape[3] == 1 # only depth multiplier 1 is supported\n\n # Check that scale_bias matches weight tensor\n assert list(op.args[2].checked_type.shape)[0] == ofm_channels\n\n expected_padding = infra.compute_padding_shape(\n ifm_shape, expected_ofm_shape, padding, kernel_shape, strides, dilation\n )\n assert list(op.attrs.padding) == list(expected_padding)\n assert op.attrs.ofm_channels == ofm_channels\n assert list(op.attrs.strides) == list(strides)\n assert list(op.attrs.dilation) == list(dilation)\n if activation == \"RELU\":\n assert str(op.attrs.activation) == \"CLIP\"\n\n depthwise_pattern_table = [\n (\n ethosu.QnnDepthwiseConv2DParams.composite_name,\n ethosu.qnn_depthwise_conv2d_pattern(),\n lambda pat: ethosu.QnnDepthwiseConv2DParams(pat).is_valid(),\n )\n ]\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, params = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n\n mod[\"main\"] = bind_params_by_name(mod[\"main\"], params)\n mod = partition_ethosu_by_table(mod, depthwise_pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.DepthwiseConv2DRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\"pooling_type\", [\"MAX\", \"AVG\"])\[email protected](\"ifm_shape\", [[1, 3, 4, 3], [1, 4, 5, 2]])\[email protected](\n \"pool_shape, strides, activation_function, padding\",\n [([1, 2], [1, 2], \"NONE\", \"SAME\"), ([2, 3], [2, 3], \"RELU\", \"VALID\")],\n)\ndef test_tflite_pool2d_legalize(\n ifm_shape, pooling_type, strides, pool_shape, activation_function, padding\n):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x):\n if pooling_type == \"MAX\":\n op = tf.nn.max_pool(x, pool_shape, strides, padding)\n elif pooling_type == \"AVG\":\n op = tf.nn.avg_pool(x, pool_shape, strides, padding)\n if activation_function == \"RELU\":\n op = tf.nn.relu(op)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n ofm_shape = infra.compute_ofm_shape(ifm_shape, padding, pool_shape, strides)\n op = ext_func.body\n assert list(op.args[0].checked_type.shape) == ifm_shape\n assert op.args[0].checked_type.dtype == dtype\n assert list(op.checked_type.shape) == ofm_shape\n assert op.checked_type.dtype == dtype\n assert op.attrs.pooling_type == pooling_type\n assert list(op.attrs.strides) == strides\n assert list(op.attrs.padding) == infra.compute_padding_shape(\n ifm_shape, ofm_shape, padding, pool_shape, strides\n )\n assert list(op.attrs.pool_shape) == pool_shape\n assert op.attrs.ofm_channels == ifm_shape[3]\n if activation_function == \"RELU\":\n assert str(op.attrs.activation) == \"CLIP\"\n\n if pooling_type == \"MAX\":\n rewriter = legalize.MaxPoolingRewriter()\n pattern_table = [\n (\n ethosu.MaxPool2DParams.composite_name,\n ethosu.qnn_maxpool2d_pattern(),\n lambda pat: ethosu.MaxPool2DParams(pat).is_valid(),\n ),\n ]\n elif pooling_type == \"AVG\":\n rewriter = legalize.AvgPoolingRewriter()\n pattern_table = [\n (\n ethosu.AvgPool2DParams.composite_name,\n ethosu.qnn_avgpool2d_pattern(),\n lambda pat: ethosu.AvgPool2DParams(pat).is_valid(),\n ),\n ]\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"x\": ifm_shape},\n dtype_dict={\"x\": dtype},\n )\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\"operator_type\", [\"ADD\", \"SUB\", \"MUL\", \"MIN\", \"MAX\"])\[email protected](\n \"ifm_shape, ifm2_shape, reversed_operands\",\n [\n ([1, 2, 3, 4], [1, 2, 3, 4], False),\n ([1, 2, 3, 4], [1, 1, 3, 1], False),\n ([1, 1, 3, 1], [1, 2, 3, 4], True),\n ([1, 4, 4], [4, 1], False),\n ([4], [4], False),\n ([4], [1, 2, 3, 4], True),\n ([1, 4, 4], [4, 1], False),\n ],\n)\[email protected](\"activation_function\", [\"NONE\", \"RELU\"])\ndef test_tflite_binary_elemwise_legalize(\n operator_type,\n ifm_shape,\n ifm2_shape,\n reversed_operands,\n activation_function,\n):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x, y):\n if operator_type == \"ADD\":\n op = tf.math.add(x, y)\n elif operator_type == \"SUB\":\n op = tf.math.subtract(x, y)\n elif operator_type == \"MUL\":\n op = tf.math.multiply(x, y)\n elif operator_type == \"MIN\":\n op = tf.math.minimum(x, y)\n elif operator_type == \"MAX\":\n op = tf.math.maximum(x, y)\n if activation_function == \"RELU\":\n op = tf.nn.relu(op)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32), tf.TensorSpec(ifm2_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n data2 = np.random.rand(*tuple(ifm2_shape)) * 2\n yield [data.astype(np.float32), data2.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n out_shape = ifm2_shape if reversed_operands else ifm_shape\n shapes = [ifm_shape, ifm2_shape]\n ifm_index, ifm2_index = (1, 0) if reversed_operands else (0, 1)\n op = ext_func.body\n\n has_reshaped_output = False\n shapes_padded = [[1] * (4 - len(s)) + s for s in shapes]\n out_padded = [1] * (4 - len(out_shape)) + out_shape\n if op.op.name != \"contrib.ethosu.binary_elementwise\":\n has_reshaped_output = True\n op = op.args[0]\n\n assert list(op.args[0].checked_type.shape) == shapes_padded[ifm_index]\n assert list(op.args[1].checked_type.shape) == shapes_padded[ifm2_index]\n assert op.args[0].checked_type.dtype == dtype\n assert list(op.checked_type.shape) == out_padded\n assert op.checked_type.dtype == dtype\n assert op.attrs.operator_type == operator_type\n assert op.attrs.reversed_operands == reversed_operands\n if activation_function == \"RELU\":\n assert str(op.attrs.activation) == \"CLIP\"\n\n if has_reshaped_output:\n assert list(ext_func.body.checked_type.shape) == out_shape\n\n if operator_type == \"ADD\":\n rewriter = legalize.AddRewriter()\n pattern_table = [\n (\n ethosu.AddParams.composite_name,\n ethosu.qnn_add_pattern(),\n lambda pat: ethosu.AddParams(pat).is_valid(),\n ),\n ]\n elif operator_type == \"SUB\":\n rewriter = legalize.SubRewriter()\n pattern_table = [\n (\n ethosu.SubParams.composite_name,\n ethosu.qnn_subtract_pattern(),\n lambda pat: ethosu.SubParams(pat).is_valid(),\n ),\n ]\n elif operator_type == \"MUL\":\n rewriter = legalize.MulRewriter()\n pattern_table = [\n (\n ethosu.MulParams.composite_name,\n ethosu.qnn_mul_pattern(),\n lambda pat: ethosu.MulParams(pat).is_valid(),\n ),\n ]\n elif operator_type == \"MIN\":\n rewriter = legalize.MinRewriter()\n pattern_table = [\n (\n ethosu.MinParams.composite_name,\n ethosu.minimum_pattern(),\n lambda pat: ethosu.MinParams(pat).is_valid(),\n ),\n ]\n elif operator_type == \"MAX\":\n rewriter = legalize.MaxRewriter()\n pattern_table = [\n (\n ethosu.MaxParams.composite_name,\n ethosu.maximum_pattern(),\n lambda pat: ethosu.MaxParams(pat).is_valid(),\n ),\n ]\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"x\": ifm_shape, \"y\": ifm2_shape},\n dtype_dict={\"x\": dtype, \"y\": dtype},\n )\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\ndef test_binary_add_from_constant_scalar():\n dtype = \"uint8\"\n ifm_shape = (1, 4, 4, 8)\n\n def create_graph():\n inp = relay.var(\"input\", shape=ifm_shape, dtype=dtype)\n scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)\n add = relay.qnn.op.add(\n inp,\n scalar,\n relay.const(1.0, dtype=\"float32\"),\n relay.const(0, dtype=\"int32\"),\n relay.const(1.0, dtype=\"float32\"),\n relay.const(0, dtype=\"int32\"),\n relay.const(1.0, dtype=\"float32\"),\n relay.const(0, dtype=\"int32\"),\n )\n func = relay.Function(relay.analysis.free_vars(add), add)\n return tvm.IRModule.from_expr(func)\n\n def verify(ext_func):\n op = ext_func.body\n assert list(op.args[0].checked_type.shape) == [1, 4, 4, 8]\n assert list(op.args[1].checked_type.shape) == [1, 1, 1, 1]\n assert op.args[0].checked_type.dtype == \"uint8\"\n assert list(op.checked_type.shape) == [1, 4, 4, 8]\n assert op.checked_type.dtype == \"uint8\"\n assert op.attrs.operator_type == \"ADD\"\n\n rewriter = legalize.AddRewriter()\n pattern_table = [\n (\n ethosu.AddParams.composite_name,\n ethosu.qnn_add_pattern(),\n lambda pat: ethosu.AddParams(pat).is_valid(),\n ),\n ]\n\n mod = create_graph()\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape, ifm2_shape, reversed_operands\",\n [\n ([1, 2, 3, 4], [1, 2, 3, 4], False),\n ([1, 2, 3, 4], [1, 1, 3, 1], False),\n ([1, 1, 3, 1], [1, 2, 3, 4], True),\n ],\n)\ndef test_ethosu_left_shift_binary_elemwise_legalize(ifm_shape, ifm2_shape, reversed_operands):\n dtype = \"int32\"\n operator_type = \"SHL\"\n\n def create_graph():\n input1 = relay.var(\"x1\", shape=ifm_shape, dtype=dtype)\n input2 = relay.var(\"x2\", shape=ifm2_shape, dtype=dtype)\n c1 = relay.left_shift(input1, input2)\n f = relay.Function([input1, input2], c1)\n mod = tvm.IRModule()\n mod[\"main\"] = f\n return mod\n\n def verify(ext_func):\n out_shape = ifm2_shape if reversed_operands else ifm_shape\n shapes = [ifm_shape, ifm2_shape]\n ifm_index, ifm2_index = (1, 0) if reversed_operands else (0, 1)\n op = ext_func.body\n assert list(op.args[0].checked_type.shape) == shapes[ifm_index]\n assert list(op.args[1].checked_type.shape) == shapes[ifm2_index]\n assert op.args[0].checked_type.dtype == dtype\n assert list(op.checked_type.shape) == out_shape\n assert op.checked_type.dtype == dtype\n assert op.attrs.operator_type == operator_type\n assert op.attrs.reversed_operands == reversed_operands\n assert str(op.attrs.activation) == \"NONE\"\n\n rewriter = legalize.ShlRewriter()\n pattern_table = [\n (\n ethosu.ShlParams.composite_name,\n ethosu.shl_pattern(),\n lambda pat: ethosu.ShlParams(pat).is_valid(),\n ),\n ]\n\n mod = create_graph()\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape, new_shape\",\n [\n ((1, 4, 1, 2), (4, 2)),\n ((1, 5, 1, 20), (100,)),\n ((12, 20), (1, 6, 4, 10)),\n ((30,), (10, 1, 3)),\n ],\n)\ndef test_relay_reshape_legalize(ifm_shape, new_shape):\n\n ifm = relay.var(\"ifm\", shape=ifm_shape, dtype=\"int8\")\n reshape = relay.op.reshape(ifm, new_shape)\n func = relay.Function([ifm], reshape)\n mod = tvm.IRModule()\n mod[\"main\"] = func\n mod = relay.transform.InferType()(mod)\n\n reshape_pattern_table = [\n (\n ethosu.ReshapeParams.composite_name,\n ethosu.reshape_pattern(),\n lambda pat: ethosu.ReshapeParams(pat).is_valid(),\n ),\n ]\n\n mod = partition_ethosu_by_table(mod, reshape_pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.ReshapeRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.NoOpRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod = relay.transform.InferType()(mod)\n\n ext_func = mod[\"tvmgen_default_ethos_u_main_0\"]\n\n identity = ext_func.body\n assert identity.op.name == \"contrib.ethosu.identity\"\n\n # check that the reshape is still there\n reshape = identity.args[0]\n assert reshape.op.name == \"reshape\"\n\n # check that identity's output shape matches reshape's output shape\n assert tuple(identity.checked_type.shape) == new_shape\n\n\[email protected](\n \"ifm_shape, begin, end\",\n [\n ([1, 10, 50, 4], [0, 5, 11, 2], [1, 10, 22, 3]),\n ([1, 101, 35, 27], [0, 5, 11, 2], [1, 10, 22, 3]),\n ([15, 17, 3], [3, 0, 0], [11, 17, 1]),\n ([1, 6043], [0, 704], [1, 800]),\n ],\n)\ndef test_relay_strided_slice_legalize(ifm_shape, begin, end):\n\n ifm = relay.var(\"ifm\", shape=ifm_shape, dtype=\"int8\")\n strided_slice = relay.op.strided_slice(ifm, begin, end)\n func = relay.Function([ifm], strided_slice)\n mod = tvm.IRModule()\n mod[\"main\"] = func\n mod = relay.transform.InferType()(mod)\n\n strided_slice_pattern_table = [\n (\n ethosu.StridedSliceParams.composite_name,\n ethosu.strided_slice_pattern(),\n lambda pat: ethosu.StridedSliceParams(pat).is_valid(),\n ),\n ]\n\n mod = partition_ethosu_by_table(mod, strided_slice_pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.StridedSliceRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.NoOpRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod = relay.transform.InferType()(mod)\n\n ext_func = mod[\"tvmgen_default_ethos_u_main_0\"]\n\n identity = ext_func.body\n assert identity.op.name == \"contrib.ethosu.identity\"\n\n # check that the strided_slice is still there\n strided_slice = identity.args[0]\n assert strided_slice.op.name == \"strided_slice\"\n\n # check that identity's output shape matches strided slice's output shape\n slice_shape = [a - b for a, b in zip(end, begin)]\n assert list(identity.checked_type.shape) == slice_shape\n\n\[email protected](\"operator_type\", [\"ABS\"])\[email protected](\n \"ifm_shape\",\n [[1, 2, 3, 4], [1, 7, 3], [8, 3, 1], [11, 22], [300]],\n)\ndef test_tflite_unary_elemwise_legalize(\n operator_type,\n ifm_shape,\n):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def abs_func(self, x):\n if operator_type == \"ABS\":\n op = tf.math.abs(x)\n return op\n\n model = Model()\n\n # Save the model\n concrete_func = model.abs_func.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n def verify(ext_func):\n out_shape = ifm_shape\n func_body = ext_func.body\n\n # If we legalized the unary elementwise op into 4D\n if func_body.op.name == \"reshape\":\n reshape = func_body\n unary = func_body.args[0]\n reshape2 = unary.args[0]\n\n # Check the input to the reshape\n reshape2_in_shape = [i for i in reshape2.args[0].checked_type.shape]\n assert reshape2_in_shape == ifm_shape\n\n # Check that the unary elementwise operator is 4D after reshape\n assert len(unary.checked_type.shape) == 4\n assert unary.args[0].checked_type.dtype == dtype\n\n # Check that the output of the graph has the same shape as input\n reshape_out_shape = [i for i in reshape.checked_type.shape]\n assert reshape_out_shape == ifm_shape\n assert unary.attrs.operator_type == operator_type\n\n else:\n unary = func_body\n\n # Check the IFM\n assert list(unary.args[0].checked_type.shape) == ifm_shape\n assert unary.args[0].checked_type.dtype == dtype\n\n # Check the OFM\n assert list(unary.checked_type.shape) == out_shape\n assert unary.checked_type.dtype == dtype\n\n # operator type check\n assert unary.attrs.operator_type == operator_type\n\n if operator_type == \"ABS\":\n rewriter = legalize.AbsRewriter()\n pattern_table = [\n (\n ethosu.AbsParams.composite_name,\n ethosu.abs_pattern(),\n lambda pat: ethosu.AbsParams(pat).is_valid(),\n ),\n ]\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n mod = partition_ethosu_by_table(mod, pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\ndef test_tflite_tanh_legalize():\n dtype = \"int8\"\n ifm_shape = (1, 241, 132, 7)\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tanh_func(self, x):\n op = tf.math.tanh(x)\n return op\n\n model = Model()\n concrete_func = model.tanh_func.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, params = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n\n mod = ethosu.partition_for_ethosu(mod, params)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.TanhRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod = relay.transform.InferType()(mod)\n\n func_body = mod[\"tvmgen_default_ethos_u_main_0\"].body\n assert func_body.op.name == \"contrib.ethosu.identity\"\n assert func_body.attrs.activation == \"TANH\"\n assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)\n assert tuple(func_body.args[1].checked_type.shape) == (256,)\n\n\[email protected](\n \"ifm_shape, axis, keep_dims, use_same_quantization\",\n [\n # mean to depthwise + multiply\n [(1, 8, 16, 16), (1, 2), True, False],\n [(1, 8, 16, 16), (2, 1), True, False],\n [(1, 3, 4), (0, 1), True, False],\n [(8, 5), (1, 0), True, False],\n [(1, 65, 2, 1), (1, 2), True, False], # special case when h > 64\n # mean to average pool\n [(1, 8, 16, 16), (1,), True, True],\n [(1, 8, 16, 16), (2,), False, True],\n [(1, 8, 16, 16), (1, 2), False, True],\n [(3, 3, 4), (0,), True, True],\n [(3, 3, 4), (1,), False, True],\n [(8, 5), (0,), False, True],\n [(8, 5), (1,), True, True],\n # mean to depthwise\n [(1, 8, 16, 16), (1,), True, False],\n [(1, 8, 16, 16), (2,), True, False],\n [(1, 8, 16, 16), (1, 2), False, False],\n [(8, 4), (0,), False, False],\n ],\n)\ndef test_mean(ifm_shape, axis, keep_dims, use_same_quantization):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x):\n op = tf.math.reduce_mean(x, axis=axis, keepdims=keep_dims)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n return mod\n\n def create_relay_graph_with_same_quantization():\n ifm = relay.var(\"input\", shape=ifm_shape, dtype=dtype)\n cast = relay.cast(ifm, dtype=\"int32\")\n mean = relay.mean(cast, axis=axis, keepdims=keep_dims)\n requantize = relay.qnn.op.requantize(\n mean,\n input_scale=relay.const(1.0, dtype=\"float32\"),\n input_zero_point=relay.const(0, dtype=\"int32\"),\n output_scale=relay.const(1.0, dtype=\"float32\"),\n output_zero_point=relay.const(0, dtype=\"int32\"),\n )\n\n func = relay.Function(relay.analysis.free_vars(requantize), requantize)\n mod = tvm.IRModule.from_expr(func)\n return mod\n\n def verify(ext_func):\n out_var = ext_func.body\n\n next_op = out_var\n mul_op = None\n pooling_op = None\n depthwise_op = None\n if (\n isinstance(next_op, relay.expr.Call)\n and isinstance(next_op.op, tvm.ir.op.Op)\n and next_op.op.name == \"reshape\"\n ):\n next_op = next_op.args[0]\n if util.is_named_ethosu_op(next_op, \"binary_elementwise\"):\n mul_op = next_op\n next_op = next_op.args[0]\n if util.is_named_ethosu_op(next_op, \"pooling\"):\n pooling_op = next_op\n next_op = next_op.args[0]\n if util.is_named_ethosu_op(next_op, \"depthwise_conv2d\"):\n depthwise_op = next_op\n next_op = next_op.args[0]\n while (\n isinstance(next_op, relay.expr.Call)\n and isinstance(next_op.op, tvm.ir.op.Op)\n and next_op.op.name == \"reshape\"\n ):\n next_op = next_op.args[0]\n in_var = next_op\n\n def calculate_expected_output_shape():\n for i in range(len(ifm_shape)):\n if i in axis:\n if keep_dims:\n yield 1\n else:\n yield ifm_shape[i]\n\n out_shape = tuple(calculate_expected_output_shape())\n\n # check IFM\n assert tuple(in_var.checked_type.shape) == ifm_shape\n assert in_var.checked_type.dtype == dtype\n\n # check OFM\n assert tuple(out_var.checked_type.shape) == out_shape\n assert out_var.checked_type.dtype == dtype\n\n # check expected legalization case\n if axis in [(1, 2), (2, 1), (0, 1), (1, 0)] and keep_dims and dtype == \"int8\":\n assert depthwise_op and mul_op\n assert mul_op.attrs.operator_type == \"MUL\"\n elif pooling_op:\n attrs = pooling_op.attrs\n assert (\n attrs.ifm_scale == attrs.ofm_scale and attrs.ifm_zero_point == attrs.ofm_zero_point\n )\n else:\n assert depthwise_op\n assert not mul_op\n\n rewriter = legalize.MeanRewriter()\n pattern_table = [\n (\n ethosu.MeanParams.composite_name,\n ethosu.mean_pattern(),\n lambda pat: ethosu.MeanParams(pat).is_valid(),\n ),\n ]\n\n mod = (\n create_relay_graph_with_same_quantization()\n if use_same_quantization\n else create_tflite_graph()\n )\n mod = partition_ethosu_by_table(mod, pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"shapes, axis\",\n [\n ([(2, 3), (4, 3)], 0),\n ([(10, 2, 1), (10, 14, 1)], 1),\n ([(10,), (13,), (14,)], 0),\n ([(1, 5, 2, 1), (1, 5, 7, 1), (1, 5, 3, 1)], 2),\n ],\n)\ndef test_tflite_concat_legalize(shapes, axis):\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, shapes, axis):\n op = tf.concat(shapes, axis)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n [tf.TensorSpec(shape, tf.float32) for shape in shapes], axis\n )\n\n def representative_dataset():\n for _ in range(100):\n datas = [np.random.rand(*shape) for shape in shapes]\n yield [data.astype(np.float32) for data in datas]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n\n return tflite_model\n\n def verify(ext_func):\n new_concat_axis = np.sum(shape[axis] for shape in shapes)\n out_shape = list(shapes[0])\n out_shape[axis] = new_concat_axis\n\n op = ext_func.body\n for i, _ in enumerate(shapes):\n assert list(op.args[0][i].checked_type.shape) == list(shapes[i])\n\n assert list(op.checked_type.shape) == out_shape\n assert op.checked_type.dtype == \"int8\"\n\n concat_pattern_table = [\n (\n ethosu.ConcatParams.composite_name,\n ethosu.concat_pattern(),\n lambda pat: ethosu.ConcatParams(pat).is_valid(),\n )\n ]\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n relay_module, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={(\"ifm\" + str(i)): shape for i, shape in enumerate(shapes)},\n dtype_dict={(\"ifm\" + str(i)): \"int8\" for i, _ in enumerate(shapes)},\n )\n mod = partition_ethosu_by_table(relay_module, concat_pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.ConcatRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.NoOpRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod[\"tvmgen_default_ethos_u_main_0\"] = relay.transform.InferType()(mod)[\n \"tvmgen_default_ethos_u_main_0\"\n ]\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\ndef test_tflite_sigmoid_legalize():\n dtype = \"int8\"\n ifm_shape = (1, 237, 91, 7)\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def sigmoid_func(self, x):\n op = tf.math.sigmoid(x)\n return op\n\n model = Model()\n concrete_func = model.sigmoid_func.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n # Convert the model\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_output_type = tf.int8\n converter.inference_input_type = tf.int8\n tflite_model = converter.convert()\n return tflite_model\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, params = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n\n mod = ethosu.partition_for_ethosu(mod, params)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.SigmoidRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n mod = relay.transform.InferType()(mod)\n\n func_body = mod[\"tvmgen_default_ethos_u_main_0\"].body\n assert func_body.op.name == \"contrib.ethosu.identity\"\n assert func_body.attrs.activation == \"SIGMOID\"\n assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)\n assert tuple(func_body.args[1].checked_type.shape) == (256,)\n\n\[email protected](\n \"ifm_shape, num_or_size_splits, axis\",\n [\n ((1, 4, 6, 8), 3, 2),\n ((4, 6, 8), 2, 0),\n ((5, 15), 3, 1),\n ((3, 7), 1, 1),\n ((100,), 25, 0),\n ],\n)\ndef test_tflite_split_legalize(ifm_shape, num_or_size_splits, axis):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x, num_or_size_splits, axis):\n op = tf.split(x, num_or_size_splits, axis=axis)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, tf.float32), num_or_size_splits, axis\n )\n\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n\n return tflite_model\n\n def verify(ext_func):\n # dig out the split\n single_output_split = num_or_size_splits == 1\n split = (\n ext_func.body.tuple_value\n if single_output_split\n else ext_func.body.args[0][0].args[0].tuple_value\n )\n assert split.op.name == \"split\"\n\n # Split is specified by number of equal chunks\n assert split.attrs.indices_or_sections == num_or_size_splits\n\n assert split.attrs.axis == axis\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n mod = ethosu.partition_for_ethosu(mod)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.PartitionedSplitRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = relay.transform.InferType()(mod)[\n \"tvmgen_default_ethos_u_main_0\"\n ]\n\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape, num_or_size_splits, axis\",\n [\n ((1, 4, 6, 8), (1, 3, 4), 3),\n ((10, 18, 4), (1, 4, 3, 2), 0),\n ((22, 7), (4, -1), 1),\n ((25,), (25,), 0),\n ],\n)\ndef test_tflite_split_v_legalize(ifm_shape, num_or_size_splits, axis):\n dtype = \"int8\"\n\n def create_tflite_graph():\n class Model(tf.Module):\n @tf.function\n def tf_function(self, x, num_or_size_splits, axis):\n # TF split gets converted into TFLite's split_v\n op = tf.split(x, num_or_size_splits, axis=axis)\n return op\n\n model = Model()\n concrete_func = model.tf_function.get_concrete_function(\n tf.TensorSpec(ifm_shape, tf.float32), num_or_size_splits, axis\n )\n\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n\n return tflite_model\n\n def verify(ext_func):\n # dig out the split\n single_output_split = len(num_or_size_splits) == 1\n split = (\n ext_func.body.tuple_value\n if single_output_split\n else ext_func.body.args[0][0].args[0].tuple_value\n )\n assert split.op.name == \"split\"\n\n # Split is specified by the size of sections, so converting num_or_size_splits\n # into the indices where the tensor is split at since this is how split is represented\n # in Relay\n split_sections = [] if single_output_split else [num_or_size_splits[0]]\n for split_size in num_or_size_splits[1:-1]:\n sec = split_sections[-1] + split_size\n split_sections.append(sec)\n assert list(split.attrs.indices_or_sections) == split_sections\n\n assert split.attrs.axis == axis\n\n tflite_graph = create_tflite_graph()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n mod = ethosu.partition_for_ethosu(mod)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n legalize.PartitionedSplitRewriter(), mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = relay.transform.InferType()(mod)[\n \"tvmgen_default_ethos_u_main_0\"\n ]\n\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape,ifm_scale,ifm_zp,ofm_scale,ofm_zp\",\n [[(1, 8, 8, 3), 1.0, 0, 1.0, 0], [(1, 20, 30, 3), 1.345, 34, 0.32, -23]],\n)\ndef test_ethosu_requantize(ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp):\n dtype = \"int8\"\n\n def create_model():\n ifm = relay.var(\"ifm\", shape=ifm_shape, dtype=\"int8\")\n requantize = relay.qnn.op.requantize(\n ifm,\n relay.const(ifm_scale, dtype=\"float32\"),\n relay.const(ifm_zp, dtype=\"int32\"),\n relay.const(ofm_scale, dtype=\"float32\"),\n relay.const(ofm_zp, dtype=\"int32\"),\n )\n return tvm.IRModule.from_expr(relay.Function([ifm], requantize))\n\n def verify(ext_func):\n op = ext_func.body\n\n # Check IFM\n ifm = op.args[0].checked_type\n assert list(ifm.shape) == list(ifm_shape)\n assert str(ifm.dtype) == dtype\n\n # Check OFM\n ofm = op.checked_type\n assert list(ofm.shape) == list(ifm_shape)\n assert str(ofm.dtype) == dtype\n\n # Check quantization params\n assert math.isclose(op.attrs.ifm_scale, ifm_scale, abs_tol=1e-7)\n assert op.attrs.ifm_zero_point == ifm_zp\n assert math.isclose(op.attrs.ofm_scale, ofm_scale, abs_tol=1e-7)\n assert op.attrs.ofm_zero_point == ofm_zp\n\n rewriter = legalize.RequantizeRewriter()\n pattern_table = [\n (\n ethosu.RequantizeParams.composite_name,\n ethosu.requantize_pattern(),\n lambda pat: ethosu.RequantizeParams(pat).is_valid(),\n ),\n ]\n\n mod = create_model()\n mod = partition_ethosu_by_table(mod, pattern_table)\n\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\ndef test_multiple_requantize_offload():\n \"\"\"\n Testing requantize offload in the case one requantize operation is part of\n an existing pattern (in this case Mean: cast->mean->requantize) and the\n other is a stand-alone requantize.\n \"\"\"\n\n def create_model():\n ifm = relay.var(\"input\", shape=(1, 3, 3, 4), dtype=\"int8\")\n cast = relay.cast(ifm, dtype=\"int32\")\n mean = relay.mean(cast, axis=1, keepdims=True)\n requantize = relay.qnn.op.requantize(\n mean,\n input_scale=relay.const(1.0, dtype=\"float32\"),\n input_zero_point=relay.const(0, dtype=\"int32\"),\n output_scale=relay.const(1.0, dtype=\"float32\"),\n output_zero_point=relay.const(0, dtype=\"int32\"),\n )\n requantize = relay.qnn.op.requantize(\n requantize,\n input_scale=relay.const(1.0, dtype=\"float32\"),\n input_zero_point=relay.const(0, dtype=\"int32\"),\n output_scale=relay.const(1.0, dtype=\"float32\"),\n output_zero_point=relay.const(0, dtype=\"int32\"),\n )\n return tvm.IRModule.from_expr(relay.Function([ifm], requantize))\n\n def verify(ext_func):\n # If mean operation and separate requantize were offloaded correctly,\n # there should only be a pooling operation followed by an identity\n # operation leagalized.\n op = ext_func.body\n assert op.op.name == \"contrib.ethosu.identity\"\n op = op.args[0]\n assert ext_func.body.args[0].op.name == \"contrib.ethosu.pooling\"\n op = op.args[0]\n assert isinstance(op, relay.Var)\n\n mod = create_model()\n mod = ethosu.partition_for_ethosu(mod)\n mod = legalize.LegalizeEthosU()(mod)\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape,size\",\n [\n [(1, 2, 2, 1), (4, 4)],\n [(1, 4, 7, 3), (8, 14)],\n [(1, 3, 5, 3), (3, 5)],\n ],\n)\ndef test_tflite_resize2d_nearest_neighbor(ifm_shape, size):\n align_corners = False\n dtype = \"int8\"\n\n def create_tflite_graph():\n @tf.function\n def resize_model(x):\n return tf.compat.v1.image.resize_nearest_neighbor(\n x, size, align_corners=align_corners, half_pixel_centers=False\n )\n\n concrete_func = resize_model.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n return mod\n\n def verify(ext_func):\n op = ext_func.body\n in_var = op.args[0]\n\n # check IFM\n assert tuple(in_var.checked_type.shape) == ifm_shape\n assert in_var.checked_type.dtype == dtype\n\n # check OFM\n attrs = dict(op.attrs)\n out_shape = (ifm_shape[0], size[0], size[1], ifm_shape[3])\n assert tuple(op.checked_type.shape) == out_shape\n assert op.checked_type.dtype == dtype\n\n # Check Op attributes\n if size[0] == ifm_shape[1] and size[1] == ifm_shape[2]:\n assert op.op.name == \"contrib.ethosu.identity\"\n else:\n assert attrs[\"pooling_type\"] == \"AVG\"\n assert attrs[\"upscale\"] == \"NEAREST\"\n\n rewriter = legalize.Resize2dRewriter()\n pattern_table = [\n (\n ethosu.Resize2dParams.composite_name,\n ethosu.resize2d_pattern(),\n lambda pat: ethosu.Resize2dParams(pat).is_valid(),\n ),\n ]\n\n mod = create_tflite_graph()\n mod = partition_ethosu_by_table(mod, pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\[email protected](\n \"ifm_shape,size,align_corners\",\n [\n [(1, 2, 2, 1), (4, 4), False],\n [(1, 4, 7, 3), (8, 14), False],\n [(1, 2, 2, 1), (3, 3), True],\n [(1, 4, 7, 3), (7, 13), True],\n [(1, 3, 5, 3), (3, 5), False],\n ],\n)\ndef test_tflite_resize2d_bilinear(ifm_shape, size, align_corners):\n dtype = \"int8\"\n\n def create_tflite_graph():\n @tf.function\n def resize_model(x):\n return tf.compat.v1.image.resize_bilinear(\n x, size, align_corners=align_corners, half_pixel_centers=False\n )\n\n concrete_func = resize_model.get_concrete_function(\n tf.TensorSpec(ifm_shape, dtype=tf.float32)\n )\n\n def representative_dataset():\n for _ in range(100):\n data = np.random.rand(*tuple(ifm_shape))\n yield [data.astype(np.float32)]\n\n converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model = converter.convert()\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)\n\n mod, _ = relay.frontend.from_tflite(\n tflite_model,\n shape_dict={\"input\": ifm_shape},\n dtype_dict={\"input\": dtype},\n )\n return mod\n\n def verify(ext_func):\n op = ext_func.body\n in_var = op.args[0]\n\n # check IFM\n assert tuple(in_var.checked_type.shape) == ifm_shape\n assert in_var.checked_type.dtype == dtype\n\n # check OFM\n attrs = dict(op.attrs)\n out_shape = (ifm_shape[0], size[0], size[1], ifm_shape[3])\n assert tuple(op.checked_type.shape) == out_shape\n assert op.checked_type.dtype == dtype\n\n # Check Op attributes\n if size[0] == ifm_shape[1] and size[1] == ifm_shape[2]:\n assert op.op.name == \"contrib.ethosu.identity\"\n else:\n assert attrs[\"pooling_type\"] == \"AVG\"\n assert attrs[\"upscale\"] == \"NEAREST\"\n\n # Check padding\n if align_corners:\n assert list(attrs[\"padding\"]) == [0, 0, 0, 0]\n else:\n assert list(attrs[\"padding\"]) == [0, 0, 1, 1]\n\n rewriter = legalize.Resize2dRewriter()\n pattern_table = [\n (\n ethosu.Resize2dParams.composite_name,\n ethosu.resize2d_pattern(),\n lambda pat: ethosu.Resize2dParams(pat).is_valid(),\n ),\n ]\n\n mod = create_tflite_graph()\n mod = partition_ethosu_by_table(mod, pattern_table)\n mod[\"tvmgen_default_ethos_u_main_0\"] = dataflow_pattern.rewrite(\n rewriter, mod[\"tvmgen_default_ethos_u_main_0\"]\n )\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n"
] | [
[
"tensorflow.math.abs",
"tensorflow.math.add",
"tensorflow.concat",
"tensorflow.nn.max_pool",
"tensorflow.compat.v1.image.resize_nearest_neighbor",
"tensorflow.math.tanh",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.lite.TFLiteConverter.from_concrete_functions",
"tensorflow.math.multiply",
"tensorflow.math.sigmoid",
"numpy.random.rand",
"tensorflow.compat.v1.image.resize_bilinear",
"tensorflow.nn.avg_pool",
"tensorflow.split",
"numpy.sum",
"tensorflow.math.subtract",
"tensorflow.nn.relu",
"tensorflow.math.maximum",
"tensorflow.math.reduce_mean",
"numpy.ones",
"numpy.random.uniform",
"tensorflow.TensorSpec",
"tensorflow.math.minimum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ischigal/gammapy | [
"c56ca1bb237d9eb4a7a3aed8eaf359206bf0e628",
"c56ca1bb237d9eb4a7a3aed8eaf359206bf0e628"
] | [
"gammapy/modeling/tests/test_fit.py",
"docs/irf/plot_edisp_kernel_param.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Unit tests for the Fit class\"\"\"\nimport pytest\nfrom numpy.testing import assert_allclose\nfrom astropy.table import Table\nfrom gammapy.datasets import Dataset\nfrom gammapy.modeling import Fit, Parameter\nfrom gammapy.modeling.models import Model, Models\nfrom gammapy.utils.testing import requires_dependency\n\npytest.importorskip(\"iminuit\")\n\n\nclass MyModel(Model):\n x = Parameter(\"x\", 2)\n y = Parameter(\"y\", 3e2)\n z = Parameter(\"z\", 4e-2)\n name = \"test\"\n datasets_names = [\"test\"]\n type = \"model\"\n\n\nclass MyDataset(Dataset):\n tag = \"MyDataset\"\n\n def __init__(self, name=\"test\"):\n self._name = name\n self._models = Models([MyModel(x=1.99, y=2.99e3, z=3.99e-2)])\n self.data_shape = (1,)\n self.meta_table = Table()\n\n @property\n def models(self):\n return self._models\n\n def stat_sum(self):\n # self._model.parameters = parameters\n x, y, z = [p.value for p in self.models.parameters]\n x_opt, y_opt, z_opt = 2, 3e2, 4e-2\n return (x - x_opt) ** 2 + (y - y_opt) ** 2 + (z - z_opt) ** 2\n\n def fcn(self):\n x, y, z = [p.value for p in self.models.parameters]\n x_opt, y_opt, z_opt = 2, 3e5, 4e-5\n x_err, y_err, z_err = 0.2, 3e4, 4e-6\n return (\n ((x - x_opt) / x_err) ** 2\n + ((y - y_opt) / y_err) ** 2\n + ((z - z_opt) / z_err) ** 2\n )\n\n def stat_array(self):\n \"\"\"Statistic array, one value per data point.\"\"\"\n\n\n@requires_dependency(\"iminuit\")\n@requires_dependency(\"sherpa\")\[email protected](\"backend\", [\"sherpa\", \"scipy\"])\ndef test_optimize_backend_and_covariance(backend):\n dataset = MyDataset()\n\n if backend == \"scipy\":\n kwargs = {\"method\": \"L-BFGS-B\"}\n else:\n kwargs = {}\n\n kwargs[\"backend\"] = backend\n\n fit = Fit(optimize_opts=kwargs)\n result = fit.run([dataset])\n result = result[\"optimize_result\"]\n\n pars = result.parameters\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-3)\n assert_allclose(pars[\"y\"].value, 3e2, rtol=1e-3)\n assert_allclose(pars[\"z\"].value, 4e-2, rtol=1e-2)\n\n assert_allclose(pars[\"x\"].error, 1, rtol=1e-7)\n assert_allclose(pars[\"y\"].error, 1, rtol=1e-7)\n assert_allclose(pars[\"z\"].error, 1, rtol=1e-7)\n\n correlation = dataset.models.covariance.correlation\n assert_allclose(correlation[0, 1], 0, atol=1e-7)\n assert_allclose(correlation[0, 2], 0, atol=1e-7)\n assert_allclose(correlation[1, 2], 0, atol=1e-7)\n\n\[email protected](\"backend\", [\"minuit\"])\ndef test_run(backend):\n dataset = MyDataset()\n fit = Fit(backend=backend)\n result = fit.run([dataset])\n result = result[\"optimize_result\"]\n pars = result.parameters\n\n assert result.success is True\n\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-3)\n assert_allclose(pars[\"y\"].value, 3e2, rtol=1e-3)\n assert_allclose(pars[\"z\"].value, 4e-2, rtol=1e-3)\n\n assert_allclose(pars[\"x\"].error, 1, rtol=1e-7)\n assert_allclose(pars[\"y\"].error, 1, rtol=1e-7)\n assert_allclose(pars[\"z\"].error, 1, rtol=1e-7)\n\n correlation = dataset.models.covariance.correlation\n assert_allclose(correlation[0, 1], 0, atol=1e-7)\n assert_allclose(correlation[0, 2], 0, atol=1e-7)\n assert_allclose(correlation[1, 2], 0, atol=1e-7)\n\n\n@requires_dependency(\"sherpa\")\[email protected](\"backend\", [\"minuit\", \"sherpa\", \"scipy\"])\ndef test_optimize(backend):\n dataset = MyDataset()\n\n if backend == \"scipy\":\n kwargs = {\"method\": \"L-BFGS-B\"}\n else:\n kwargs = {}\n\n fit = Fit(store_trace=True, backend=backend, optimize_opts=kwargs)\n result = fit.optimize([dataset])\n pars = dataset.models.parameters\n\n assert result.success is True\n assert_allclose(result.total_stat, 0, atol=1)\n\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-3)\n assert_allclose(pars[\"y\"].value, 3e2, rtol=1e-3)\n assert_allclose(pars[\"z\"].value, 4e-2, rtol=1e-2)\n\n assert len(result.trace) == result.nfev\n\n\n# TODO: add some extra covariance tests, in addition to run\n# Probably mainly if error message is OK if optimize didn't run first.\n# def test_covariance():\n\n\[email protected](\"backend\", [\"minuit\"])\ndef test_confidence(backend):\n dataset = MyDataset()\n fit = Fit(backend=backend)\n fit.optimize([dataset])\n result = fit.confidence(datasets=[dataset], parameter=\"x\")\n\n assert result[\"success\"] is True\n assert_allclose(result[\"errp\"], 1)\n assert_allclose(result[\"errn\"], 1)\n\n # Check that original value state wasn't changed\n assert_allclose(dataset.models.parameters[\"x\"].value, 2)\n\n\[email protected](\"backend\", [\"minuit\"])\ndef test_confidence_frozen(backend):\n dataset = MyDataset()\n dataset.models.parameters[\"x\"].frozen = True\n fit = Fit(backend=backend)\n fit.optimize([dataset])\n result = fit.confidence(datasets=[dataset], parameter=\"y\")\n\n assert result[\"success\"] is True\n assert_allclose(result[\"errp\"], 1)\n assert_allclose(result[\"errn\"], 1)\n\n\ndef test_stat_profile():\n dataset = MyDataset()\n fit = Fit()\n fit.run([dataset])\n dataset.models.parameters[\"x\"].scan_n_values = 3\n result = fit.stat_profile(datasets=[dataset], parameter=\"x\")\n\n assert_allclose(result[\"x_scan\"], [0, 2, 4], atol=1e-7)\n assert_allclose(result[\"stat_scan\"], [4, 0, 4], atol=1e-7)\n assert len(result[\"fit_results\"]) == 0\n\n # Check that original value state wasn't changed\n assert_allclose(dataset.models.parameters[\"x\"].value, 2)\n\n\ndef test_stat_profile_reoptimize():\n dataset = MyDataset()\n fit = Fit()\n fit.run([dataset])\n\n dataset.models.parameters[\"y\"].value = 0\n dataset.models.parameters[\"x\"].scan_n_values = 3\n result = fit.stat_profile(datasets=[dataset], parameter=\"x\", reoptimize=True)\n\n assert_allclose(result[\"x_scan\"], [0, 2, 4], atol=1e-7)\n assert_allclose(result[\"stat_scan\"], [4, 0, 4], atol=1e-7)\n assert_allclose(\n result[\"fit_results\"][0].total_stat, result[\"stat_scan\"][0], atol=1e-7\n )\n\n\ndef test_stat_surface():\n dataset = MyDataset()\n fit = Fit()\n fit.run([dataset])\n\n x_values = [1, 2, 3]\n y_values = [2e2, 3e2, 4e2]\n\n dataset.models.parameters[\"x\"].scan_values = x_values\n dataset.models.parameters[\"y\"].scan_values = y_values\n result = fit.stat_surface(datasets=[dataset], x=\"x\", y=\"y\")\n\n assert_allclose(result[\"x_scan\"], x_values, atol=1e-7)\n assert_allclose(result[\"y_scan\"], y_values, atol=1e-7)\n expected_stat = [\n [1.0001e04, 1.0000e00, 1.0001e04],\n [1.0000e04, 0.0000e00, 1.0000e04],\n [1.0001e04, 1.0000e00, 1.0001e04],\n ]\n assert_allclose(list(result[\"stat_scan\"]), expected_stat, atol=1e-7)\n assert len(result[\"fit_results\"]) == 0\n\n # Check that original value state wasn't changed\n assert_allclose(dataset.models.parameters[\"x\"].value, 2)\n assert_allclose(dataset.models.parameters[\"y\"].value, 3e2)\n\n\ndef test_stat_surface_reoptimize():\n dataset = MyDataset()\n fit = Fit()\n fit.run([dataset])\n\n x_values = [1, 2, 3]\n y_values = [2e2, 3e2, 4e2]\n\n dataset.models.parameters[\"z\"].value = 0\n dataset.models.parameters[\"x\"].scan_values = x_values\n dataset.models.parameters[\"y\"].scan_values = y_values\n\n result = fit.stat_surface(\n datasets=[dataset], x=\"x\", y=\"y\", reoptimize=True\n )\n\n assert_allclose(result[\"x_scan\"], x_values, atol=1e-7)\n assert_allclose(result[\"y_scan\"], y_values, atol=1e-7)\n expected_stat = [\n [1.0001e04, 1.0000e00, 1.0001e04],\n [1.0000e04, 0.0000e00, 1.0000e04],\n [1.0001e04, 1.0000e00, 1.0001e04],\n ]\n\n assert_allclose(list(result[\"stat_scan\"]), expected_stat, atol=1e-7)\n assert_allclose(\n result[\"fit_results\"][0][0].total_stat, result[\"stat_scan\"][0][0], atol=1e-7\n )\n\n\ndef test_stat_contour():\n dataset = MyDataset()\n dataset.models.parameters[\"x\"].frozen = True\n fit = Fit(backend=\"minuit\")\n fit.optimize([dataset])\n result = fit.stat_contour(datasets=[dataset], x=\"y\", y=\"z\")\n\n assert result[\"success\"] is True\n\n x = result[\"y\"]\n assert_allclose(len(x), 10)\n assert_allclose(x[0], 299, rtol=1e-5)\n assert_allclose(x[-1], 299.292893, rtol=1e-5)\n y = result[\"z\"]\n assert_allclose(len(y), 10)\n assert_allclose(y[0], 0.04, rtol=1e-5)\n assert_allclose(y[-1], 0.747107, rtol=1e-5)\n\n # Check that original value state wasn't changed\n assert_allclose(dataset.models.parameters[\"y\"].value, 300)\n",
"\"\"\"Plot an energy dispersion using a gaussian parametrisation\"\"\"\nimport matplotlib.pyplot as plt\nfrom gammapy.irf import EDispKernel\nfrom gammapy.maps import MapAxis\n\nenergy_axis = MapAxis.from_energy_bounds(\"1 TeV\", \"10 TeV\", nbin=10)\nenergy_axis_true = MapAxis.from_energy_bounds(\n \"0.5 TeV\", \"30 TeV\", nbin=10, per_decade=True, name=\"energy_true\"\n)\n\n\nedisp = EDispKernel.from_gauss(\n energy_axis=energy_axis,\n energy_axis_true=energy_axis_true,\n sigma=0.1,\n bias=0\n)\nedisp.peek()\nplt.show()\n"
] | [
[
"numpy.testing.assert_allclose"
],
[
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lexelius/contrast | [
"ef7d6d8c51fb922e89c1c46db734e3c09f88a9fc"
] | [
"beamlines/nanomax/macro_attenuate.py"
] | [
"\"\"\"\nModule providing a macro to automatically absorb X percent of the\nbeam using the absorbers at the NanoMAX beamline\n\"\"\"\n\nimport os\nimport numpy as np\nfrom contrast.environment import env, macro, register_shortcut, runCommand\n\n# ToDo\n# - avoid elements with absorption edges close to the current energy\n# - way of printing the closest possible absorption values\n\n@macro\nclass attenuate(object):\n \"\"\"\n Sets the attenuators to absorb X percent of the beam depending \n on the current photon beam enegery.\n\n usage / examples:\n %attenuate # show current attenuator setting / value\n %attenuate 0.2 # attenuate to 20% beam intensity\n %attenuate 0.1 ['Si','Al'] # attenuate to 10% but only use Si and Al\n # ['Al','Ti','Si','Cu','Fe','Mo','Ta','Ag'] \n %attenuate 0.2 how='unsafe' # attenuate to 20% beam intensity without\n # manually confirming the motor movement\n # ... for the usement in macros\n \"\"\"\n\n ############################################################################\n # absorber settings at the NanoMAX beamline - status 2019-10-06 \n ############################################################################\n\n elements = ['Al', 'Ti', 'Si', 'Cu', None, 'Fe', 'Mo', 'Ta', 'Ag']\n position = [-40000, -29000, -18000, -9000, 0, 11000, 21000, 33000, 41000]\n carriers = ['attenuator1_x', 'attenuator2_x', \n 'attenuator3_x', 'attenuator4_x'] \n thickness = [[ 25, 50, 100, 100], # in um\n [ 20, 40, 80, 160],\n [ 250, 500, 1000, 1000],\n [ 20, 40, 80, 160],\n [ 0, 0, 0, 0],\n [ 50, 100, 200, 400],\n [ 15, 30, 60, 120],\n [ 20, 40, 80, 160],\n [ 25, 50, 100, 200]]\n thickness = np.array(thickness)\n\n ############################################################################\n # loading offline data between 5 and 25 keV \n # taken from http://henke.lbl.gov/optical_constants/filter2.html\n ############################################################################\n\n absorption_data = {}\n base = os.path.dirname(os.path.realpath(__file__))\n base += '/attenuation/attenuation_1um_'\n for element in [x for x in elements if not(x==None)]:\n fpath = base + element + '.txt'\n data = np.loadtxt(fpath, skiprows=2)\n absorption_data[element] = data\n\n ############################################################################\n # methods\n ############################################################################\n\n def __init__(self, attenuate_to=None, use_ele=['Al', 'Ti', 'Si', 'Cu', 'Fe', 'Mo', 'Ta', 'Ag'], how='safe', verbosity=3):\n self.attenuate_to = attenuate_to\n self.how = how\n self.verbosity = verbosity\n self.use_ele = use_ele\n self.use_ele.append(None)\n\n def get_current_energy(self):\n runCommand('wms energy')\n #print(env)\n self.photon_energy = env.lastMacroResult\n\n def calculate_transmission_of_1um(self):\n # linear interpolation of T(E) in log log \n self.transmission_1um = {}\n for element in [x for x in self.elements if not(x==None)]:\n T_log = np.interp(x = np.log(self.photon_energy),\n xp = np.log(self.absorption_data[element][:,0]),\n fp = np.log(self.absorption_data[element][:,1]))\n self.transmission_1um[element] = np.exp(T_log)\n\n def calculate_transmission_of_actual_foils(self):\n self.transmission = 1.*np.ones_like(self.thickness)\n for i, element in enumerate(self.elements):\n for j, carrier in enumerate(self.carriers):\n if not(element==None):\n d_um = self.thickness[i,j]\n T = (self.transmission_1um[element])**d_um\n self.transmission[i,j] = 1.*T\n\n def calcualte_possible_permutations(self):\n self.T_tot = [[T1*T2*T3*T4, i1, i2, i3, i4, \n [self.elements[i1], self.elements[i2], \n self.elements[i3], self.elements[i4]]] \n for i1, T1 in enumerate(self.transmission[:,0]) \n for i2, T2 in enumerate(self.transmission[:,1]) \n for i3, T3 in enumerate(self.transmission[:,2]) \n for i4, T4 in enumerate(self.transmission[:,3]) ] \n self.T_tot = np.array(self.T_tot)\n self.T_tot = self.T_tot[np.argsort(self.T_tot[:,0])]\n\n def run_command(self, command):\n runCommand(command)\n\n def get_current_carrier_positions(self):\n carrier_positions = []\n for carrier in sorted(self.carriers):\n runCommand('wms '+carrier)\n carrier_positions.append(env.lastMacroResult)\n return np.array(carrier_positions)\n\n def estiamte_carrier_index(self, position):\n array = np.asarray(self.position)\n idx = (np.abs(array - position)).argmin()\n return idx\n\n def show_current_attenuation(self, printing=True):\n carrier_positions = self.get_current_carrier_positions()\n carrier_indices = np.array([self.estiamte_carrier_index(pos) for \n pos in carrier_positions])\n self.get_current_energy()\n self.calculate_transmission_of_1um()\n self.calculate_transmission_of_actual_foils()\n self.T_currently = 1\n for j, i in enumerate(carrier_indices):\n self.T_currently *= self.transmission[i,j]\n \n if printing:\n print('currently:')\n print(' absorption ', str(1-self.T_currently))\n print(' transmission', str(self.T_currently))\n print('with:')\n for i_carrier, i_pos in enumerate(carrier_indices):\n i_pos = int(i_pos)\n line = ' ' + self.carriers[i_carrier]\n line += ' '+ str(carrier_positions[i_carrier]).rjust(10)\n line += ' #' + str(self.thickness[i_pos, i_carrier]).rjust(5)\n line += ' um of ' + str(self.elements[i_pos])\n print(line)\n\n def input_validation(self):\n if self.attenuate_to == None:\n self.show_current_attenuation()\n return False\n elif not(isinstance(self.attenuate_to, (int, float))):\n print('no number given as attenuation value')\n return False\n else:\n return True\n\n def check_possible_permutations_for_elements(self):\n self.T_allowed = []\n for permutation in self.T_tot:\n works = 0\n for have_to_use in permutation[5]:\n if have_to_use in self.use_ele: works+=1\n if works == len(permutation[5]):\n self.T_allowed.append(permutation)\n self.T_allowed = np.array(self.T_allowed)\n\n def run(self):\n if self.input_validation():\n\n self.get_current_energy()\n self.calculate_transmission_of_1um()\n self.calculate_transmission_of_actual_foils()\n self.calcualte_possible_permutations()\n self.check_possible_permutations_for_elements()\n\n self.T_min = 1.*self.T_allowed[0,0]\n\n try:\n if self.attenuate_to == 'max':\n print('choosing maximal possible attenuation')\n self.T_choosen = 1.*self.T_allowed[0,:]\n self.attenuate_to = 1.-self.T_choosen[0]\n\n # is the choosen absorption value reachable?\n elif ((self.attenuate_to > 1) or \n (round(1-self.T_min,3 ) <= self.attenuate_to)):\n print('absorption of', self.attenuate_to, \n 'cannot be reached')\n print('instead choosing maximum possible attenuation')\n self.T_choosen = 1.*self.T_allowed[0,:]\n\n # which combination gives the closest result?\n else:\n self.T_choosen = list(filter(lambda i: i[0] <= 1-self.attenuate_to, \n self.T_allowed))[-1]\n except ValueError:\n print(\"Oops! That was no valid input\")\n\n # get needed mv motor commands\n commands = []\n for i_carrier, i_pos in enumerate(self.T_choosen[1:1+len(self.carriers)]):\n i_pos = int(i_pos)\n command = 'mv ' + str(self.carriers[i_carrier])\n command += ' ' + str(self.position[i_pos]).ljust(8)\n commands.append(command)\n\n # print an output\n if self.verbosity>=3 or self.how=='safe':\n print('aimed for:')\n print(' absorption ', self.attenuate_to)\n print(' transmission', max(0, 1-self.attenuate_to))\n print(' at currently', self.photon_energy, 'eV')\n print('can achieve:')\n print(' absorption ', str(1-self.T_choosen[0]))\n print(' transmission', str(self.T_choosen[0]))\n print('with motor setting:')\n\n for i_carrier, i_pos in enumerate(self.T_choosen[1:1+len(self.carriers)]):\n i_pos = int(i_pos)\n line = ' ' + commands[i_carrier]\n line += '#' + str(self.thickness[i_pos, i_carrier]).rjust(5)\n line += ' um of ' + str(self.elements[i_pos])\n print(line)\n\n # move motors\n if self.how=='safe':\n yes = ['yes', 'y', '1', 'true']\n user_input = input('Proceed to move motors? [Y/n] ').lower()\n if user_input in yes:\n\n # run all motor movement commands\n for command in commands: self.run_command(command)\n\n # check that the motors have moved to the calculated position\n self.show_current_attenuation(printing=False)\n if self.T_currently != self.T_choosen[0]:\n print('\\x1b[0;49;91[ERROR] mattenuation was NOT set\\x1b[0m')\n else:\n print('\\x1b[0;49;92msuccessfully set the attenuation\\x1b[0m')\n\n else: \n for command in commands: self.run_command(command)\n"
] | [
[
"numpy.log",
"numpy.ones_like",
"numpy.abs",
"numpy.asarray",
"numpy.exp",
"numpy.argsort",
"numpy.array",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
martcous/dipy | [
"6bff5655f03db19bde5aa951ffb91987983a889b",
"6bff5655f03db19bde5aa951ffb91987983a889b",
"6bff5655f03db19bde5aa951ffb91987983a889b",
"6bff5655f03db19bde5aa951ffb91987983a889b",
"6bff5655f03db19bde5aa951ffb91987983a889b"
] | [
"dipy/reconst/tests/test_peak_finding.py",
"dipy/core/tests/test_ndindex.py",
"dipy/reconst/tests/test_cache.py",
"dipy/align/tests/test_metrics.py",
"dipy/denoise/nlmeans.py"
] | [
"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport numpy.testing as npt\nfrom dipy.reconst.recspeed import (local_maxima, remove_similar_vertices,\n search_descending)\nfrom dipy.data import get_sphere, get_data\nfrom dipy.core.sphere import unique_edges, HemiSphere\nfrom dipy.sims.voxel import all_tensor_evecs\n\n\ndef test_local_maxima():\n sphere = get_sphere('symmetric724')\n vertices, faces = sphere.vertices, sphere.faces\n edges = unique_edges(faces)\n\n # Check that the first peak is == max(odf)\n odf = abs(vertices.sum(-1))\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_equal(max(odf), peak_values[0])\n npt.assert_equal(max(odf), odf[peak_index[0]])\n\n # Create an artificial odf with a few peaks\n odf = np.zeros(len(vertices))\n odf[1] = 1.\n odf[143] = 143.\n odf[505] = 505.\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_array_equal(peak_values, [505, 143, 1])\n npt.assert_array_equal(peak_index, [505, 143, 1])\n\n # Check that neighboring points can both be peaks\n odf = np.zeros(len(vertices))\n point1, point2 = edges[0]\n odf[[point1, point2]] = 1.\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_array_equal(peak_values, [1., 1.])\n npt.assert_(point1 in peak_index)\n npt.assert_(point2 in peak_index)\n\n # Repeat with a hemisphere\n hemisphere = HemiSphere(xyz=vertices, faces=faces)\n vertices, edges = hemisphere.vertices, hemisphere.edges\n\n # Check that the first peak is == max(odf)\n odf = abs(vertices.sum(-1))\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_equal(max(odf), peak_values[0])\n npt.assert_equal(max(odf), odf[peak_index[0]])\n\n # Create an artificial odf with a few peaks\n odf = np.zeros(len(vertices))\n odf[1] = 1.\n odf[143] = 143.\n odf[300] = 300.\n peak_value, peak_index = local_maxima(odf, edges)\n npt.assert_array_equal(peak_value, [300, 143, 1])\n npt.assert_array_equal(peak_index, [300, 143, 1])\n\n # Check that neighboring points can both be peaks\n odf = np.zeros(len(vertices))\n point1, point2 = edges[0]\n odf[[point1, point2]] = 1.\n peak_values, peak_index = local_maxima(odf, edges)\n npt.assert_array_equal(peak_values, [1., 1.])\n npt.assert_(point1 in peak_index)\n npt.assert_(point2 in peak_index)\n\n # Should raise an error if odf has nans\n odf[20] = np.nan\n npt.assert_raises(ValueError, local_maxima, odf, edges)\n\n # Should raise an error if edge values are too large to index odf\n edges[0, 0] = 9999\n odf[20] = 0\n npt.assert_raises(IndexError, local_maxima, odf, edges)\n\n\ndef test_remove_similar_peaks():\n vertices = np.array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.],\n [1.1, 1., 0.],\n [0., 2., 1.],\n [2., 1., 0.],\n [1., 0., 0.]])\n norms = np.sqrt((vertices*vertices).sum(-1))\n vertices = vertices/norms[:, None]\n\n # Return unique vertices\n uv = remove_similar_vertices(vertices, .01)\n npt.assert_array_equal(uv, vertices[:6])\n\n # Return vertices with mapping and indices\n uv, mapping, index = remove_similar_vertices(vertices, .01,\n return_mapping=True,\n return_index=True)\n npt.assert_array_equal(uv, vertices[:6])\n npt.assert_array_equal(mapping, list(range(6)) + [0])\n npt.assert_array_equal(index, range(6))\n\n # Test mapping with different angles\n uv, mapping = remove_similar_vertices(vertices, .01, return_mapping=True)\n npt.assert_array_equal(uv, vertices[:6])\n npt.assert_array_equal(mapping, list(range(6)) + [0])\n uv, mapping = remove_similar_vertices(vertices, 30, return_mapping=True)\n npt.assert_array_equal(uv, vertices[:4])\n npt.assert_array_equal(mapping, list(range(4)) + [1, 0, 0])\n uv, mapping = remove_similar_vertices(vertices, 60, return_mapping=True)\n npt.assert_array_equal(uv, vertices[:3])\n npt.assert_array_equal(mapping, list(range(3)) + [0, 1, 0, 0])\n\n # Test index with different angles\n uv, index = remove_similar_vertices(vertices, .01, return_index=True)\n npt.assert_array_equal(uv, vertices[:6])\n npt.assert_array_equal(index, range(6))\n uv, index = remove_similar_vertices(vertices, 30, return_index=True)\n npt.assert_array_equal(uv, vertices[:4])\n npt.assert_array_equal(index, range(4))\n uv, index = remove_similar_vertices(vertices, 60, return_index=True)\n npt.assert_array_equal(uv, vertices[:3])\n npt.assert_array_equal(index, range(3))\n\n\ndef test_search_descending():\n a = np.linspace(10., 1., 10)\n\n npt.assert_equal(search_descending(a, 1.), 1)\n npt.assert_equal(search_descending(a, .89), 2)\n npt.assert_equal(search_descending(a, .79), 3)\n\n # Test small array\n npt.assert_equal(search_descending(a[:1], 1.), 1)\n npt.assert_equal(search_descending(a[:1], 0.), 1)\n npt.assert_equal(search_descending(a[:1], .5), 1)\n\n # Test very small array\n npt.assert_equal(search_descending(a[:0], 1.), 0)\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n",
"from dipy.core.ndindex import ndindex\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\n\ndef test_ndindex():\n x = list(ndindex((1, 2, 3)))\n expected = [ix for ix, e in np.ndenumerate(np.zeros((1, 2, 3)))]\n assert_array_equal(x, expected)\n\n\ndef test_ndindex_0d():\n x = list(ndindex(np.array(1).shape))\n expected = [()]\n assert_array_equal(x, expected)\n",
"from dipy.reconst.cache import Cache\nfrom dipy.core.sphere import Sphere\n\nfrom numpy.testing import assert_, assert_equal, run_module_suite\n\n\nclass TestModel(Cache):\n def __init__(self):\n pass\n\n\ndef test_basic_cache():\n t = TestModel()\n s = Sphere(theta=[0], phi=[0])\n\n assert_(t.cache_get(\"design_matrix\", s) is None)\n\n m = [[1, 0], [0, 1]]\n\n t.cache_set(\"design_matrix\", key=s, value=m)\n assert_equal(t.cache_get(\"design_matrix\", s), m)\n\n t.cache_clear()\n assert_(t.cache_get(\"design_matrix\", s) is None)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"import numpy as np\nfrom scipy import ndimage\nfrom dipy.align import floating\nfrom dipy.align.metrics import SSDMetric, CCMetric, EMMetric\nfrom numpy.testing import (assert_array_equal,\n assert_array_almost_equal,\n assert_raises)\n\n\ndef test_exceptions():\n for invalid_dim in [-1, 0, 1, 4, 5]:\n assert_raises(ValueError, CCMetric, invalid_dim)\n assert_raises(ValueError, EMMetric, invalid_dim)\n assert_raises(ValueError, SSDMetric, invalid_dim)\n assert_raises(ValueError, SSDMetric, 3, step_type='unknown_metric_name')\n assert_raises(ValueError, EMMetric, 3, step_type='unknown_metric_name')\n\n\ndef test_EMMetric_image_dynamics():\n np.random.seed(7181309)\n metric = EMMetric(2)\n\n target_shape = (10, 10)\n # create a random image\n image = np.ndarray(target_shape, dtype=floating)\n image[...] = np.random.randint(\n 0, 10, np.size(image)).reshape(tuple(target_shape))\n # compute the expected binary mask\n expected = (image > 0).astype(np.int32)\n\n metric.use_static_image_dynamics(image, None)\n assert_array_equal(expected, metric.static_image_mask)\n\n metric.use_moving_image_dynamics(image, None)\n assert_array_equal(expected, metric.moving_image_mask)\n\n\ndef test_em_demons_step_2d():\n r\"\"\"\n Compares the output of the demons step in 2d against an analytical\n step. The fixed image is given by $F(x) = \\frac{1}{2}||x - c_f||^2$, the\n moving image is given by $G(x) = \\frac{1}{2}||x - c_g||^2$,\n $x, c_f, c_g \\in R^{2}$\n\n References\n ----------\n [Vercauteren09] Vercauteren, T., Pennec, X., Perchant, A., & Ayache, N.\n (2009). Diffeomorphic demons: efficient non-parametric\n image registration. NeuroImage, 45(1 Suppl), S61-72.\n doi:10.1016/j.neuroimage.2008.10.040\n \"\"\"\n # Select arbitrary images' shape (same shape for both images)\n sh = (20, 10)\n\n # Select arbitrary centers\n c_f = np.asarray(sh) / 2\n c_g = c_f + 0.5\n\n # Compute the identity vector field I(x) = x in R^2\n x_0 = np.asarray(range(sh[0]))\n x_1 = np.asarray(range(sh[1]))\n X = np.ndarray(sh + (2,), dtype=np.float64)\n O = np.ones(sh)\n X[..., 0] = x_0[:, None] * O\n X[..., 1] = x_1[None, :] * O\n\n # Compute the gradient fields of F and G\n grad_F = X - c_f\n grad_G = X - c_g\n\n # The squared norm of grad_G to be used later\n sq_norm_grad_F = np.sum(grad_F**2, -1)\n sq_norm_grad_G = np.sum(grad_G**2, -1)\n\n # Compute F and G\n F = 0.5 * sq_norm_grad_F\n G = 0.5 * sq_norm_grad_G\n\n # Create an instance of EMMetric\n metric = EMMetric(2)\n metric.static_spacing = np.array([1.2, 1.2])\n # The $\\sigma_x$ (eq. 4 in [Vercauteren09]) parameter is computed in ANTS\n # based on the image's spacing\n sigma_x_sq = np.sum(metric.static_spacing**2) / metric.dim\n # Set arbitrary values for $\\sigma_i$ (eq. 4 in [Vercauteren09])\n # The original Demons algorithm used simply |F(x) - G(x)| as an\n # estimator, so let's use it as well\n sigma_i_sq = (F - G)**2\n # Set the properties relevant to the demons methods\n metric.smooth = 3.0\n metric.gradient_static = np.array(grad_F, dtype=floating)\n metric.gradient_moving = np.array(grad_G, dtype=floating)\n metric.static_image = np.array(F, dtype=floating)\n metric.moving_image = np.array(G, dtype=floating)\n metric.staticq_means_field = np.array(F, dtype=floating)\n metric.staticq_sigma_sq_field = np.array(sigma_i_sq, dtype=floating)\n metric.movingq_means_field = np.array(G, dtype=floating)\n metric.movingq_sigma_sq_field = np.array(sigma_i_sq, dtype=floating)\n\n # compute the step using the implementation under test\n actual_forward = metric.compute_demons_step(True)\n actual_backward = metric.compute_demons_step(False)\n\n # Now directly compute the demons steps according to eq 4 in\n # [Vercauteren09]\n num_fwd = sigma_x_sq * (G - F)\n den_fwd = sigma_x_sq * sq_norm_grad_F + sigma_i_sq\n # This is $J^{P}$ in eq. 4 [Vercauteren09]\n expected_fwd = -1 * np.array(grad_F)\n expected_fwd[..., 0] *= num_fwd / den_fwd\n expected_fwd[..., 1] *= num_fwd / den_fwd\n # apply Gaussian smoothing\n expected_fwd[..., 0] = ndimage.filters.gaussian_filter(\n expected_fwd[..., 0], 3.0)\n expected_fwd[..., 1] = ndimage.filters.gaussian_filter(\n expected_fwd[..., 1], 3.0)\n\n num_bwd = sigma_x_sq * (F - G)\n den_bwd = sigma_x_sq * sq_norm_grad_G + sigma_i_sq\n # This is $J^{P}$ in eq. 4 [Vercauteren09]\n expected_bwd = -1 * np.array(grad_G)\n expected_bwd[..., 0] *= num_bwd / den_bwd\n expected_bwd[..., 1] *= num_bwd / den_bwd\n # apply Gaussian smoothing\n expected_bwd[..., 0] = ndimage.filters.gaussian_filter(\n expected_bwd[..., 0], 3.0)\n expected_bwd[..., 1] = ndimage.filters.gaussian_filter(\n expected_bwd[..., 1], 3.0)\n\n assert_array_almost_equal(actual_forward, expected_fwd)\n assert_array_almost_equal(actual_backward, expected_bwd)\n\n\ndef test_em_demons_step_3d():\n r\"\"\"\n Compares the output of the demons step in 3d against an analytical\n step. The fixed image is given by $F(x) = \\frac{1}{2}||x - c_f||^2$, the\n moving image is given by $G(x) = \\frac{1}{2}||x - c_g||^2$,\n $x, c_f, c_g \\in R^{3}$\n\n References\n ----------\n [Vercauteren09] Vercauteren, T., Pennec, X., Perchant, A., & Ayache, N.\n (2009). Diffeomorphic demons: efficient non-parametric\n image registration. NeuroImage, 45(1 Suppl), S61-72.\n doi:10.1016/j.neuroimage.2008.10.040\n \"\"\"\n # Select arbitrary images' shape (same shape for both images)\n sh = (20, 15, 10)\n\n # Select arbitrary centers\n c_f = np.asarray(sh) / 2\n c_g = c_f + 0.5\n\n # Compute the identity vector field I(x) = x in R^2\n x_0 = np.asarray(range(sh[0]))\n x_1 = np.asarray(range(sh[1]))\n x_2 = np.asarray(range(sh[2]))\n X = np.ndarray(sh + (3,), dtype=np.float64)\n O = np.ones(sh)\n X[..., 0] = x_0[:, None, None] * O\n X[..., 1] = x_1[None, :, None] * O\n X[..., 2] = x_2[None, None, :] * O\n\n # Compute the gradient fields of F and G\n grad_F = X - c_f\n grad_G = X - c_g\n\n # The squared norm of grad_G to be used later\n sq_norm_grad_F = np.sum(grad_F**2, -1)\n sq_norm_grad_G = np.sum(grad_G**2, -1)\n\n # Compute F and G\n F = 0.5 * sq_norm_grad_F\n G = 0.5 * sq_norm_grad_G\n\n # Create an instance of EMMetric\n metric = EMMetric(3)\n metric.static_spacing = np.array([1.2, 1.2, 1.2])\n # The $\\sigma_x$ (eq. 4 in [Vercauteren09]) parameter is computed in ANTS\n # based on the image's spacing\n sigma_x_sq = np.sum(metric.static_spacing**2) / metric.dim\n # Set arbitrary values for $\\sigma_i$ (eq. 4 in [Vercauteren09])\n # The original Demons algorithm used simply |F(x) - G(x)| as an\n # estimator, so let's use it as well\n sigma_i_sq = (F - G)**2\n # Set the properties relevant to the demons methods\n metric.smooth = 3.0\n metric.gradient_static = np.array(grad_F, dtype=floating)\n metric.gradient_moving = np.array(grad_G, dtype=floating)\n metric.static_image = np.array(F, dtype=floating)\n metric.moving_image = np.array(G, dtype=floating)\n metric.staticq_means_field = np.array(F, dtype=floating)\n metric.staticq_sigma_sq_field = np.array(sigma_i_sq, dtype=floating)\n metric.movingq_means_field = np.array(G, dtype=floating)\n metric.movingq_sigma_sq_field = np.array(sigma_i_sq, dtype=floating)\n\n # compute the step using the implementation under test\n actual_forward = metric.compute_demons_step(True)\n actual_backward = metric.compute_demons_step(False)\n\n # Now directly compute the demons steps according to eq 4 in\n # [Vercauteren09]\n num_fwd = sigma_x_sq * (G - F)\n den_fwd = sigma_x_sq * sq_norm_grad_F + sigma_i_sq\n expected_fwd = -1 * np.array(grad_F)\n expected_fwd[..., 0] *= num_fwd / den_fwd\n expected_fwd[..., 1] *= num_fwd / den_fwd\n expected_fwd[..., 2] *= num_fwd / den_fwd\n # apply Gaussian smoothing\n expected_fwd[..., 0] = ndimage.filters.gaussian_filter(\n expected_fwd[..., 0], 3.0)\n expected_fwd[..., 1] = ndimage.filters.gaussian_filter(\n expected_fwd[..., 1], 3.0)\n expected_fwd[..., 2] = ndimage.filters.gaussian_filter(\n expected_fwd[..., 2], 3.0)\n\n num_bwd = sigma_x_sq * (F - G)\n den_bwd = sigma_x_sq * sq_norm_grad_G + sigma_i_sq\n expected_bwd = -1 * np.array(grad_G)\n expected_bwd[..., 0] *= num_bwd / den_bwd\n expected_bwd[..., 1] *= num_bwd / den_bwd\n expected_bwd[..., 2] *= num_bwd / den_bwd\n # apply Gaussian smoothing\n expected_bwd[..., 0] = ndimage.filters.gaussian_filter(\n expected_bwd[..., 0], 3.0)\n expected_bwd[..., 1] = ndimage.filters.gaussian_filter(\n expected_bwd[..., 1], 3.0)\n expected_bwd[..., 2] = ndimage.filters.gaussian_filter(\n expected_bwd[..., 2], 3.0)\n\n assert_array_almost_equal(actual_forward, expected_fwd)\n assert_array_almost_equal(actual_backward, expected_bwd)\n\n\nif __name__ == '__main__':\n test_em_demons_step_2d()\n test_em_demons_step_3d()\n test_exceptions()\n test_EMMetric_image_dynamics()\n",
"from __future__ import division, print_function\n\nimport numpy as np\nfrom dipy.denoise.denspeed import nlmeans_3d\n\n\ndef nlmeans(arr, sigma, mask=None, patch_radius=1, block_radius=5,\n rician=True, num_threads=None):\n \"\"\" Non-local means for denoising 3D and 4D images\n\n Parameters\n ----------\n arr : 3D or 4D ndarray\n The array to be denoised\n mask : 3D ndarray\n sigma : float or 3D array\n standard deviation of the noise estimated from the data\n patch_radius : int\n patch size is ``2 x patch_radius + 1``. Default is 1.\n block_radius : int\n block size is ``2 x block_radius + 1``. Default is 5.\n rician : boolean\n If True the noise is estimated as Rician, otherwise Gaussian noise\n is assumed.\n num_threads : int\n Number of threads. If None (default) then all available threads\n will be used (all CPU cores).\n\n Returns\n -------\n denoised_arr : ndarray\n the denoised ``arr`` which has the same shape as ``arr``.\n \"\"\"\n\n if arr.ndim == 3:\n sigma = np.ones(arr.shape, dtype=np.float64) * sigma\n return nlmeans_3d(arr, mask, sigma,\n patch_radius, block_radius,\n rician, num_threads).astype(arr.dtype)\n\n elif arr.ndim == 4:\n denoised_arr = np.zeros_like(arr)\n\n if isinstance(sigma, np.ndarray) and sigma.ndim == 3:\n sigma = (np.ones(arr.shape, dtype=np.float64) *\n sigma[..., np.newaxis])\n else:\n sigma = np.ones(arr.shape, dtype=np.float64) * sigma\n\n for i in range(arr.shape[-1]):\n denoised_arr[..., i] = nlmeans_3d(arr[..., i],\n mask,\n sigma[..., i],\n patch_radius,\n block_radius,\n rician,\n num_threads).astype(arr.dtype)\n\n return denoised_arr\n\n else:\n raise ValueError(\"Only 3D or 4D array are supported!\", arr.shape)\n"
] | [
[
"numpy.linspace",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_raises",
"numpy.testing.assert_",
"numpy.array"
],
[
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.zeros"
],
[
"numpy.testing.run_module_suite"
],
[
"numpy.random.seed",
"numpy.asarray",
"numpy.ndarray",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.size",
"numpy.testing.assert_raises",
"scipy.ndimage.filters.gaussian_filter",
"numpy.array",
"numpy.sum",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.zeros_like",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Prakhar-Bhartiya/SentimentAnalysis | [
"8fa2664a57b01e7303ef26d1226a81c0e25be4b7"
] | [
"preprocessing.py"
] | [
"\"\"\"\nDATA DESCRIPTION\n\nsentiment140 dataset. It contains 1,600,000 tweets extracted using the twitter api . The tweets have been annotated (0 = negative, 4 = positive) and they can be used to detect sentiment .\n\nIt contains the following 6 fields:\n\ntarget: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive)\nids: The id of the tweet ( 2087)\ndate: the date of the tweet (Sat May 16 23:58:44 UTC 2009)\nflag: The query (lyx). If there is no query, then this value is NO_QUERY.\nuser: the user that tweeted (robotickilldozr)\ntext: the text of the tweet (Lyx is cool)\n\n\"\"\"\n\n#import libraries\nimport pandas as pd\n\ndata = pd.read_csv('training.1600000.processed.noemoticon.csv',encoding = 'latin', header=None, nrows=25)\n\n#Adding header to data\ndata = data.rename(columns={0: 'target', 1: 'id', 2: 'TimeStamp', 3: 'query', 4: 'username', 5: 'content'})\n\n#Dropping unncessary columns\ndata.drop(['id','TimeStamp','query'], axis=1, inplace=True)\n\n\nprint(data.to_string())\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
chrisjonesBSU/fresnel | [
"92e17346899a78b68af9bc8006a6bec95e3476cc"
] | [
"fresnel/__init__.py"
] | [
"# Copyright (c) 2016-2021 The Regents of the University of Michigan\n# Part of fresnel, released under the BSD 3-Clause License.\n\n\"\"\"The fresnel ray tracing package.\"\"\"\n\nimport os\nimport numpy\n\nfrom . import geometry # noqa: F401 - ignore unused import\nfrom . import tracer\nfrom . import camera\nfrom . import color # noqa: F401 - ignore unused import (users will use)\nfrom . import light\nfrom . import version # noqa: F401 - ignore unused import (users will use)\n\nfrom . import _common\nif _common.cpu_built():\n from . import _cpu\nif _common.gpu_built():\n from . import _gpu\n\n\nclass Device(object):\n \"\"\"Hardware device to use for ray tracing.\n\n Args:\n mode (str): Specify execution mode: Valid values are ``auto``, ``gpu``,\n and ``cpu``.\n n (int): Specify the number of cpu threads / GPUs this device will use.\n *None* will use all available threads / devices.\n\n `Device` defines hardware device to use for ray tracing. `Scene` and\n `Tracer` instances must be attached to a `Device`. You may attach any number\n of scenes and tracers to a single `Device`.\n\n See Also:\n Tutorials:\n\n - :doc:`examples/02-Advanced-topics/01-Devices`\n - :doc:`examples/02-Advanced-topics/02-Tracer-methods`\n\n When mode is ``auto``, the default, `Device` will select GPU rendering if\n available and fall back on CPU rendering if not. Set mode to ``gpu`` or\n ``cpu`` to force a specific mode.\n\n Important:\n By default (``n==None``), this device will use all available GPUs or CPU\n cores. Set *n* to the number of GPUs or CPU cores this device should\n use. When selecting *n* GPUs, the device selects the first *n* in the\n `available_gpus` list.\n\n Tip:\n Use only a single `Device` to reduce memory consumption.\n\n The static member `available_modes` lists which modes are available. For a\n mode to be available, the corresponding module must be enabled at compile\n time. Additionally, there must be at least one GPU present for the ``gpu``\n mode to be available.\n\n .. code-block:: python\n\n >>> fresnel.Device.available_modes\n ['gpu', 'cpu', 'auto']\n \"\"\"\n\n available_modes = []\n \"\"\"list[str]: Available execution modes.\"\"\"\n\n available_gpus = []\n \"\"\"list[str]: Available GPUS.\"\"\"\n\n def __init__(self, mode='auto', n=None):\n # determine the number of available GPUs\n num_gpus = 0\n if _common.gpu_built():\n num_gpus = _gpu.get_num_available_devices()\n\n # determine the selected mode\n selected_mode = ''\n\n if mode == 'auto':\n if num_gpus > 0:\n selected_mode = 'gpu'\n else:\n selected_mode = 'cpu'\n if not _common.cpu_built():\n raise RuntimeError(\"No GPUs available AND CPU \"\n \"implementation is not compiled\")\n\n if mode == 'gpu':\n if not _common.gpu_built():\n raise RuntimeError(\"GPU implementation is not compiled\")\n if num_gpus == 0:\n raise RuntimeError(\"No GPUs are available\")\n selected_mode = 'gpu'\n\n if mode == 'cpu':\n if not _common.cpu_built():\n raise RuntimeError(\"CPU implementation is not compiled\")\n selected_mode = 'cpu'\n\n if n is None:\n thread_limit = -1\n else:\n thread_limit = int(n)\n\n # initialize the device\n if selected_mode == 'gpu':\n self.module = _gpu\n self._device = _gpu.Device(\n os.path.dirname(os.path.realpath(__file__)), thread_limit)\n self._mode = 'gpu'\n elif selected_mode == 'cpu':\n self.module = _cpu\n self._device = _cpu.Device(thread_limit)\n self._mode = 'cpu'\n else:\n raise ValueError(\"Invalid mode\")\n\n @property\n def mode(self):\n \"\"\"str: The active mode.\"\"\"\n return self._mode\n\n def __str__(self):\n \"\"\"Human readable `Device` summary.\"\"\"\n return '<fresnel.Device: ' + self._device.describe() + '>'\n\n\n# determine available Device modes\nif _common.gpu_built():\n if _gpu.get_num_available_devices() > 0:\n Device.available_modes.append('gpu')\n\nif _common.cpu_built():\n Device.available_modes.append('cpu')\n\nif len(Device.available_modes) > 0:\n Device.available_modes.append('auto')\n\n# determine available Device GPUs\nif _common.gpu_built():\n gpus_str = _gpu.Device.getAllGPUs()\n gpus_list = gpus_str.split('\\n')\n if len(gpus_list) >= 2:\n Device.available_gpus = gpus_list[:-1]\n\n\nclass Scene(object):\n \"\"\"Content of the scene to ray trace.\n\n Args:\n device (Device): Device to use when rendering the scene.\n\n camera (camera.Camera): Camera to view the scene. When `None`,\n defaults to::\n\n camera.Orthographic(position=(0, 0, 100),\n look_at=(0, 0, 0),\n up=(0, 1, 0),\n height=100)\n\n lights (list[Light]): Lights to light the scene. When `None`, defaults\n to: ``light.rembrandt()``\n\n `Scene` defines the contents of the scene to be traced, including any number\n of `Geometry` objects, the `Camera`, the `background_color`,\n `background_alpha`, and `lights`.\n\n Every `Scene` must be associated with a `Device`. For convenience, `Scene`\n creates a default `Device` when *device* is ``None``.\n\n See Also:\n Tutorials:\n\n - :doc:`examples/00-Basic-tutorials/00-Introduction`\n - :doc:`examples/00-Basic-tutorials/04-Scene-properties`\n - :doc:`examples/00-Basic-tutorials/05-Lighting-setups`\n - :doc:`examples/02-Advanced-topics/01-Devices`\n \"\"\"\n\n def __init__(self, device=None, camera=None, lights=None):\n if device is None:\n device = Device()\n\n self._device = device\n self._scene = self.device.module.Scene(self.device._device)\n self.geometry = []\n if camera is None:\n self.camera = globals()['camera'].Orthographic(position=(0, 0, 100),\n look_at=(0, 0, 0),\n up=(0, 1, 0),\n height=100)\n else:\n self.camera = camera\n\n if lights is None:\n self.lights = light.rembrandt()\n else:\n self.lights = lights\n\n self._tracer = None\n\n def get_extents(self):\n \"\"\"Get the extents of the scene.\n\n Returns:\n (3,2) `numpy.ndarray` of ``numpy.float32``: The lower left and\\\n upper right corners of the scene.\n \"\"\"\n if len(self.geometry) == 0:\n return numpy.array([[0, 0, 0], [0, 0, 0]], dtype=numpy.float32)\n\n scene_extents = self.geometry[0].get_extents()\n for geom in self.geometry[1:]:\n extents = geom.get_extents()\n scene_extents[0, :] = numpy.min(\n [scene_extents[0, :], extents[0, :]], axis=0)\n scene_extents[1, :] = numpy.max(\n [scene_extents[1, :], extents[1, :]], axis=0)\n\n return scene_extents\n\n @property\n def device(self):\n \"\"\"Device: Device this `Scene` is attached to.\"\"\"\n return self._device\n\n @property\n def camera(self):\n \"\"\"camera.Camera: Camera view parameters.\"\"\"\n return camera._from_cpp(self._scene.getCamera())\n\n @camera.setter\n def camera(self, value):\n if isinstance(value, camera.Camera):\n self._scene.setCamera(value._camera)\n else:\n raise TypeError(f\"camera {value} is not a fresnel.camera.Camera\")\n\n @property\n def background_color(self):\n \"\"\"((3, ) `numpy.ndarray` of ``numpy.float32``): Background color \\\n linear RGB.\n\n Note:\n Use `fresnel.color.linear` to convert standard sRGB colors into the\n linear color space used by fresnel.\n \"\"\"\n c = self._scene.getBackgroundColor()\n return numpy.array([c.r, c.g, c.b], dtype=numpy.float32)\n\n @background_color.setter\n def background_color(self, value):\n self._scene.setBackgroundColor(_common.RGBf(*value))\n\n @property\n def background_alpha(self):\n \"\"\"float: Background alpha (opacity) in the range [0,1].\"\"\"\n return self._scene.getBackgroundAlpha()\n\n @background_alpha.setter\n def background_alpha(self, value):\n self._scene.setBackgroundAlpha(value)\n\n @property\n def lights(self):\n \"\"\"list[Light]: Lights in the scene.\n\n `lights` is a sequence of up to 4 directional lights that apply to the\n scene. Each light has a direction, color, and size.\n \"\"\"\n return light._LightListProxy(self._scene.getLights())\n\n @lights.setter\n def lights(self, values):\n tmp = light._LightListProxy()\n for v in values:\n tmp.append(v)\n\n self._scene.setLights(tmp._lights)\n\n\ndef preview(scene, w=600, h=370, anti_alias=True):\n \"\"\"Preview a scene.\n\n Args:\n scene (`Scene`): Scene to render.\n w (int): Output image width (in pixels).\n h (int): Output image height (in pixels).\n anti_alias (bool): Whether to perform anti-aliasing.\n\n :py:func:`preview` is a shortcut that renders output with `tracer.Preview`.\n \"\"\"\n t = tracer.Preview(scene.device, w=w, h=h, anti_alias=anti_alias)\n return t.render(scene)\n\n\ndef pathtrace(scene, w=600, h=370, samples=64, light_samples=1):\n \"\"\"Path trace a scene.\n\n Args:\n scene (`Scene`): Scene to render.\n w (int): Output image width (in pixels).\n h (int): Output image height (in pixels).\n samples (int): Number of times to sample the pixels of the scene.\n\n light_samples (int): Number of light samples to take for each pixel\n sample.\n\n :py:func:`pathtrace` is a shortcut that renders output with `tracer.Path`.\n \"\"\"\n t = tracer.Path(scene.device, w=w, h=h)\n t.sample(scene, samples=samples, light_samples=light_samples)\n return t.output\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brokenegg/transformer | [
"c402ccffd6be1e01c589ad2b9064a5837d4464c7"
] | [
"brokenegg_transformer/modeling/tf_utils.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common TF utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\nimport tensorflow as tf\n\nfrom tensorflow.python.util import deprecation\nfrom brokenegg_transformer.modeling import activations\n\n\[email protected](\n None,\n \"tf.keras.layers.Layer supports multiple positional args and kwargs as \"\n \"input tensors. pack/unpack inputs to override __call__ is no longer \"\n \"needed.\"\n)\ndef pack_inputs(inputs):\n \"\"\"Pack a list of `inputs` tensors to a tuple.\n\n Args:\n inputs: a list of tensors.\n\n Returns:\n a tuple of tensors. if any input is None, replace it with a special constant\n tensor.\n \"\"\"\n inputs = tf.nest.flatten(inputs)\n outputs = []\n for x in inputs:\n if x is None:\n outputs.append(tf.constant(0, shape=[], dtype=tf.int32))\n else:\n outputs.append(x)\n return tuple(outputs)\n\n\[email protected](\n None,\n \"tf.keras.layers.Layer supports multiple positional args and kwargs as \"\n \"input tensors. pack/unpack inputs to override __call__ is no longer \"\n \"needed.\"\n)\ndef unpack_inputs(inputs):\n \"\"\"unpack a tuple of `inputs` tensors to a tuple.\n\n Args:\n inputs: a list of tensors.\n\n Returns:\n a tuple of tensors. if any input is a special constant tensor, replace it\n with None.\n \"\"\"\n inputs = tf.nest.flatten(inputs)\n outputs = []\n for x in inputs:\n if is_special_none_tensor(x):\n outputs.append(None)\n else:\n outputs.append(x)\n x = tuple(outputs)\n\n # To trick the very pointless 'unbalanced-tuple-unpacking' pylint check\n # from triggering.\n if len(x) == 1:\n return x[0]\n return tuple(outputs)\n\n\ndef is_special_none_tensor(tensor):\n \"\"\"Checks if a tensor is a special None Tensor.\"\"\"\n return tensor.shape.ndims == 0 and tensor.dtype == tf.int32\n\n\n# TODO(hongkuny): consider moving custom string-map lookup to keras api.\ndef get_activation(identifier):\n \"\"\"Maps a identifier to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n It checks string first and if it is one of customized activation not in TF,\n the corresponding activation will be returned. For non-customized activation\n names and callable identifiers, always fallback to tf.keras.activations.get.\n\n Args:\n identifier: String name of the activation function or callable.\n\n Returns:\n A Python function corresponding to the activation function.\n \"\"\"\n if isinstance(identifier, six.string_types):\n name_to_fn = {\n \"gelu\": activations.gelu,\n \"simple_swish\": activations.simple_swish,\n \"hard_swish\": activations.hard_swish,\n \"identity\": activations.identity,\n }\n identifier = str(identifier).lower()\n if identifier in name_to_fn:\n return tf.keras.activations.get(name_to_fn[identifier])\n return tf.keras.activations.get(identifier)\n\n\ndef get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape\n\n\ndef assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n raise ValueError(\n \"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not \"\n \"equal to the expected tensor rank `%s`\" %\n (name, actual_rank, str(tensor.shape), str(expected_rank)))\n"
] | [
[
"tensorflow.keras.activations.get",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.nest.flatten",
"tensorflow.python.util.deprecation.deprecated"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
TammoR/LogicalFactorisationMachines | [
"55bd94001f2852ea61f69cbb07a0cbdb41231028"
] | [
"lom/_numba/lom_outputs_fuzzy.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nOutput functions for logical operator machine products\n\"\"\"\n\nimport numpy as np\nfrom numba import jit, prange # int8, float64,\n\n# fuzzy output functions mapping from scalar vectors of probabilities to\n# to single data-point\n\n\n# OR-AND\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_AND_product_fuzzy(Z_n, U_d):\n \"\"\"\n Compute probability of emitting a zero for fuzzy vectors under OR-AND logic.\n \"\"\"\n out = 1\n for l in range(Z_n.shape[0]):\n out *= 1 - Z_n[l] * U_d[l]\n return 1 - out # map to [-1,1], this is a shortcut here. it's correct.\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_AND_product_fuzzy_3d(Z_n, U_d, V_m):\n \"\"\"\n Compute probability of emitting a zero for fuzzy vectors under OR-AND logic.\n \"\"\"\n out = np.float64(1.0)\n for l in range(Z_n.shape[0]):\n out *= 1 - ( Z_n[l] * U_d[l] * V_m[l] )\n return 1 - out\n\n\n# XOR-AND\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True, parallel=False)\ndef XOR_AND_product_fuzzy(Z_n, U_d):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= 1 - Z_n[l_prime] * U_d[l_prime]\n out += Z_n[l] * U_d[l] * temp\n return out\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_AND_product_fuzzy_3d(Z_n, U_d, V_m):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= 1 - Z_n[l_prime] * U_d[l_prime] * V_m[l_prime]\n out += Z_n[l] * U_d[l] * V_m[l] * temp\n return out\n\n\n# XOR-NAND\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_NAND_product_fuzzy(Z_n, U_d):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= Z_n[l_prime] * U_d[l_prime]\n out += (1 - Z_n[l] * U_d[l]) * temp\n return out\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_NAND_product_fuzzy_3d(Z_n, U_d, V_m):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= Z_n[l_prime] * U_d[l_prime] * V_m[l_prime]\n out += (1 - Z_n[l] * U_d[l] * V_m[l]) * temp\n return out\n\n\n# OR-XOR\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_XOR_product_fuzzy(Z_n, U_d):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= (Z_n[l] * U_d[l]) + (1 - Z_n[l]) * (1 - U_d[l])\n return 1 - temp\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_XOR_product_fuzzy_3d(Z_n, U_d, V_m):\n temp = np.float64(1)\n # this is hard to generalise to arbitrary D\n for l in range(Z_n.shape[0]):\n temp *= 1 - Z_n[l] * (1 - U_d[l]) * (1 - V_m[l]) +\\\n U_d[l] * (1 - V_m[l]) * (1 - Z_n[l]) +\\\n V_m[l] * (1 - Z_n[l]) * (1 - U_d[l])\n return 1 - temp\n\n\n# NAND-XOR\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef NAND_XOR_product_fuzzy(Z_n, U_d):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= Z_n[l] * (1 - U_d[l]) + U_d[l] * (1 - Z_n[l])\n return 1 - temp\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef NAND_XOR_product_fuzzy_3d(Z_n, U_d, V_m):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= Z_n[l] * (1 - U_d[l]) * (1 - V_m[l]) +\\\n V_m[l] * (1 - Z_n[l]) * (1 - U_d[l]) +\\\n U_d[l] * (1 - V_m[l]) * (1 - Z_n[l])\n\n return 1 - temp\n\n\n# XOR_XOR\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_XOR_product_fuzzy(Z_n, U_d):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= Z_n[l_prime] * U_d[l_prime] +\\\n (1 - Z_n[l_prime]) * (1 - U_d[l_prime])\n out += temp * ((1 - Z_n[l]) * U_d[l] + (1 - U_d[l]) * Z_n[l])\n return out\n\n\n@jit('float64(float64, float64, float64)', nogil=True, nopython=True)\ndef p_XOR_fuzzy_3d(z, u, v):\n \"\"\"\n Compute XOR probability given p(x), p(u), p(z)\n \"\"\"\n return 3 * z * u * v - 2 * (z * u + u * v + z * v) + z + u + v\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_XOR_product_fuzzy_3d(Z_n, U_d, V_m):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= 1 - p_XOR_fuzzy_3d(\n Z_n[l_prime], U_d[l_prime], V_m[l_prime])\n out += temp * p_XOR_fuzzy_3d(Z_n[l], U_d[l], V_m[l])\n return out\n\n\n# XOR_NXOR\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_NXOR_product_fuzzy(Z_n, U_d):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= Z_n[l_prime] * (1 - U_d[l_prime]) +\\\n (1 - Z_n[l_prime]) * U_d[l_prime]\n out += temp * ((Z_n[l] * U_d[l]) + (1 - U_d[l]) * (1 - Z_n[l]))\n return out\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef XOR_NXOR_product_fuzzy_3d(Z_n, U_d, V_m):\n out = np.float64(0.0)\n for l in prange(Z_n.shape[0]):\n temp = 1.0\n for l_prime in range(Z_n.shape[0]):\n if l != l_prime:\n temp *= p_XOR_fuzzy_3d(\n Z_n[l_prime], U_d[l_prime], V_m[l_prime])\n out += temp * (1 - p_XOR_fuzzy_3d(Z_n[l], U_d[l], V_m[l]))\n return out\n\n\n# OR_NAND\n@jit('float64(float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_NAND_product_fuzzy(Z_n, U_d):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= Z_n[l] * U_d[l]\n return 1 - temp\n\n\n@jit('float64(float64[:], float64[:], float64[:])', nogil=True, nopython=True)\ndef OR_NAND_product_fuzzy_3d(Z_n, U_d, V_m):\n temp = np.float64(1)\n for l in range(Z_n.shape[0]):\n temp *= Z_n[l] * U_d[l] * V_m[l]\n return 1 - temp\n\n\n# MAX_AND\n@jit('float64[:, :](float64[:, :], float64[:, :], float64[:])',\n nogil=True, nopython=False, parallel=True)\ndef MAX_AND_product_fuzzy(Z, U, lbdas):\n N = Z.shape[0]\n D = U.shape[0]\n L = Z.shape[1]\n out = np.zeros([N, D]) # , dtype=np.float)\n for n in prange(N):\n for d in range(D):\n acc = 0 # accumulator for sum\n for l1 in range(L):\n temp1 = Z[n, l1] * U[d, l1] * lbdas[l1]\n # check for explaining away\n prod = 1\n for l2 in range(L):\n if l1 == l2:\n continue\n temp2 = Z[n, l2] * U[d, l2]\n if temp2 * lbdas[l2] > temp1:\n prod *= 1 - temp2\n acc += temp1 * prod\n out[n, d] = acc\n return out \n\n"
] | [
[
"numpy.zeros",
"numpy.float64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ipmach/Thesis2021 | [
"91dbb0eebba64f1fa2c18562e2c9f35f532ef7c0",
"91dbb0eebba64f1fa2c18562e2c9f35f532ef7c0"
] | [
"src/python_code/Models/model_PAE_CNN.py",
"src/python_code/Models/model_DAE_CNN.py"
] | [
"from Models.PAE_models.Encoder_CNN import EncoderCNN\nfrom Models.PAE_models.Decoder_CNN import DecoderCNN\nfrom Models.PAE_models.Bijecter import RealNVP\nfrom sklearn.preprocessing import MinMaxScaler\nfrom Models.AE_CNN_interface import AE_CNN\nimport joblib\n\n\nclass PAECNN(AE_CNN):\n\n def __init__(self, filters, dim, switch=False, **kwargs):\n \"\"\"\n Wrapper for the Probabilistic AutoEncoder (PAE)\n :param dim: hyperparameters of the model [h_dim, z_dim, real_dim]\n \"\"\"\n # Define the hyperparameters\n super(PAECNN, self).__init__(filters, dim, **kwargs)\n # Initialize the models\n self.encoder_ = EncoderCNN(filters, self.z_dim)\n self.decoder_ = DecoderCNN(filters)\n self.b = RealNVP(num_coupling_layers=6, z_dim=self.z_dim)\n self.scaler = MinMaxScaler()\n # Use bijecter in incode or not\n self.switch = switch\n\n self.encoder_.build(input_shape=(None, self.img_shape[0],\n self.img_shape[1], self.img_shape[2]))\n self.decoder_.build(input_shape=(None, self.z_dim))\n self.b.build(input_shape=(None, self.z_dim))\n self.encode_ = self.encoder_\n\n def load_weights_model(self, list_path):\n \"\"\"\n Load the weights of the model\n :param path_encoder: path of the encoder weights (.h5)\n :param path_decoder: path of the decoder weights (.h5)\n :param path_discriminator: path of the discriminator weights (.h5)\n :param path_scaler: path scaler (.pkl)\n :return:\n \"\"\"\n [path_encoder, path_decoder, path_discriminator, path_scaler] = list_path\n self.encoder_.load_weights(path_encoder)\n self.decoder_.load_weights(path_decoder)\n self.b.load_weights(path_discriminator)\n self.scaler = joblib.load(path_scaler)\n\n def save_weights_model(self, list_path):\n \"\"\"\n Save the weights of the model\n \"\"\"\n [path_encoder, path_decoder, path_discriminator, path_scaler] = list_path\n self.encoder_.save_weights(path_encoder)\n self.decoder_.save_weights(path_decoder)\n self.b.save_weights(path_discriminator)\n joblib.dump(self.scaler, path_scaler + 'scaler.pkl')\n\n def encode(self, x):\n \"\"\"\n Encode input\n :param x: input\n :return: input in the latent space\n \"\"\"\n if self.switch:\n z = self.encoder_(x)\n return self.bijecter(z)\n else:\n return self.encoder_(x)\n\n def decode(self, z):\n \"\"\"\n Decode with activation function sigmoid\n :param z: latent space\n :return: output model\n \"\"\"\n if self.switch:\n x = self.bijecter(z, inverse=True)\n return self.decoder_(x)\n else:\n return self.decoder_(z)\n\n def bijecter(self, z, inverse=False):\n if inverse:\n b_data, _ = self.b.predict(z)\n return self.scaler.inverse_transform(b_data)\n else:\n b_data = self.scaler.transform(z)\n b_data, _ = self.b(b_data)\n return b_data\n\n def call_(self, inputs, training=None, mask=None, index=None):\n \"\"\"\n Function that works as __call__\n :param inputs: input data\n :param training: (Not use)\n :param mask: (Not use)\n :return\n \"\"\"\n return self.decoder_(self.encoder_(inputs))",
"from Models.AE_CNN_interface import AE_CNN\nimport tensorflow as tf\n\n\nclass DAECNN(AE_CNN):\n\n def __init__(self, filters, z_dim):\n super(DAECNN, self).__init__(filters, z_dim)\n self.fc1 = tf.keras.layers.Dense(self.z_dim)\n self.fc2 = tf.keras.layers.Dense(4 * 4 * 32)\n\n def encode(self, x):\n \"\"\"\n Encode input\n :param x: input\n :return: input in the latent space\n \"\"\"\n x = self.encoder_(x)\n z = self.fc1(x)\n return z\n\n def decode(self, z):\n \"\"\"\n Decode without activation function\n :param z: latent space\n :return: output model\n \"\"\"\n x = self.fc2(z)\n x = self.decoder_(x)\n return x\n\n def call_(self, inputs, training=None, mask=None, index=None):\n \"\"\"\n Function that works as __call__\n :param inputs: input data\n :param training: (Not use)\n :param mask: (Not use)\n :return: model output\n \"\"\"\n encoded = self.encode(inputs)\n reconstructed = self.decode(encoded)\n return reconstructed\n\n def load_weights_model(self, list_path):\n \"\"\"\n Load the weights of the model\n \"\"\"\n self.load_weights(list_path[0])\n\n def save_weights_model(self, list_path):\n \"\"\"\n Save the weights of the model\n \"\"\"\n self.save_weights(list_path[0])"
] | [
[
"sklearn.preprocessing.MinMaxScaler"
],
[
"tensorflow.keras.layers.Dense"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
crougeux/-a-i_v1.6.3_modif | [
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"d1296cdc26356443d6ec5869495eedb766ecbcf2",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"d1296cdc26356443d6ec5869495eedb766ecbcf2",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"d1296cdc26356443d6ec5869495eedb766ecbcf2",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab",
"b499a812e79f335d082d3f9b1070e0465ad67bab"
] | [
"build/numpy/numpy/distutils/misc_util.py",
"tests/build/scipy/scipy/odr/odrpack.py",
"tests/build/scipy/scipy/sparse/csgraph/_laplacian.py",
"tests/build/scipy/scipy/interpolate/tests/test_ndgriddata.py",
"build/numpy/numpy/core/__init__.py",
"tests/build/scipy/scipy/sparse/csgraph/_validation.py",
"build/numpy/numpy/fft/setup.py",
"tests/build/scipy/scipy/sparse/linalg/isolve/tests/test_lgmres.py",
"tests/build/scipy/scipy/sparse/linalg/matfuncs.py",
"tests/build/scipy/scipy/optimize/tests/test_nnls.py",
"tests/build/scipy/scipy/weave/tests/test_ext_tools.py",
"tests/build/scipy/scipy/linalg/matfuncs.py",
"tests/build/scipy/scipy/stats/tests/test_distributions.py",
"build/numpy/numpy/f2py/tests/test_return_character.py",
"tests/build/scipy/scipy/signal/waveforms.py",
"tests/build/scipy/scipy/ndimage/io.py",
"tests/build/scipy/scipy/sparse/linalg/benchmarks/bench_expm_multiply.py",
"tests/build/scipy/scipy/linalg/__init__.py"
] | [
"from __future__ import division, absolute_import, print_function\n\nimport os\nimport re\nimport sys\nimport imp\nimport copy\nimport glob\nimport atexit\nimport tempfile\nimport subprocess\nimport shutil\n\nimport distutils\nfrom distutils.errors import DistutilsError\n\ntry:\n set\nexcept NameError:\n from sets import Set as set\n\nfrom numpy.distutils.compat import get_exception\n\n__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',\n 'dict_append', 'appendpath', 'generate_config_py',\n 'get_cmd', 'allpath', 'get_mathlibs',\n 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',\n 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',\n 'has_f_sources', 'has_cxx_sources', 'filter_sources',\n 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',\n 'get_script_files', 'get_lib_source_files', 'get_data_files',\n 'dot_join', 'get_frame', 'minrelpath', 'njoin',\n 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',\n 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info']\n\nclass InstallableLib(object):\n \"\"\"\n Container to hold information on an installable library.\n\n Parameters\n ----------\n name : str\n Name of the installed library.\n build_info : dict\n Dictionary holding build information.\n target_dir : str\n Absolute path specifying where to install the library.\n\n See Also\n --------\n Configuration.add_installed_library\n\n Notes\n -----\n The three parameters are stored as attributes with the same names.\n\n \"\"\"\n def __init__(self, name, build_info, target_dir):\n self.name = name\n self.build_info = build_info\n self.target_dir = target_dir\n\ndef quote_args(args):\n # don't used _nt_quote_args as it does not check if\n # args items already have quotes or not.\n args = list(args)\n for i in range(len(args)):\n a = args[i]\n if ' ' in a and a[0] not in '\"\\'':\n args[i] = '\"%s\"' % (a)\n return args\n\ndef allpath(name):\n \"Convert a /-separated pathname to one using the OS's path separator.\"\n splitted = name.split('/')\n return os.path.join(*splitted)\n\ndef rel_path(path, parent_path):\n \"\"\"Return path relative to parent_path.\n \"\"\"\n pd = os.path.abspath(parent_path)\n apath = os.path.abspath(path)\n if len(apath)<len(pd):\n return path\n if apath==pd:\n return ''\n if pd == apath[:len(pd)]:\n assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))\n path = apath[len(pd)+1:]\n return path\n\ndef get_path_from_frame(frame, parent_path=None):\n \"\"\"Return path of the module given a frame object from the call stack.\n\n Returned path is relative to parent_path when given,\n otherwise it is absolute path.\n \"\"\"\n\n # First, try to find if the file name is in the frame.\n try:\n caller_file = eval('__file__', frame.f_globals, frame.f_locals)\n d = os.path.dirname(os.path.abspath(caller_file))\n except NameError:\n # __file__ is not defined, so let's try __name__. We try this second\n # because setuptools spoofs __name__ to be '__main__' even though\n # sys.modules['__main__'] might be something else, like easy_install(1).\n caller_name = eval('__name__', frame.f_globals, frame.f_locals)\n __import__(caller_name)\n mod = sys.modules[caller_name]\n if hasattr(mod, '__file__'):\n d = os.path.dirname(os.path.abspath(mod.__file__))\n else:\n # we're probably running setup.py as execfile(\"setup.py\")\n # (likely we're building an egg)\n d = os.path.abspath('.')\n # hmm, should we use sys.argv[0] like in __builtin__ case?\n\n if parent_path is not None:\n d = rel_path(d, parent_path)\n\n return d or '.'\n\ndef njoin(*path):\n \"\"\"Join two or more pathname components +\n - convert a /-separated pathname to one using the OS's path separator.\n - resolve `..` and `.` from path.\n\n Either passing n arguments as in njoin('a','b'), or a sequence\n of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.\n \"\"\"\n paths = []\n for p in path:\n if is_sequence(p):\n # njoin(['a', 'b'], 'c')\n paths.append(njoin(*p))\n else:\n assert is_string(p)\n paths.append(p)\n path = paths\n if not path:\n # njoin()\n joined = ''\n else:\n # njoin('a', 'b')\n joined = os.path.join(*path)\n if os.path.sep != '/':\n joined = joined.replace('/', os.path.sep)\n return minrelpath(joined)\n\ndef get_mathlibs(path=None):\n \"\"\"Return the MATHLIB line from numpyconfig.h\n \"\"\"\n if path is not None:\n config_file = os.path.join(path, '_numpyconfig.h')\n else:\n # Look for the file in each of the numpy include directories.\n dirs = get_numpy_include_dirs()\n for path in dirs:\n fn = os.path.join(path, '_numpyconfig.h')\n if os.path.exists(fn):\n config_file = fn\n break\n else:\n raise DistutilsError('_numpyconfig.h not found in numpy include '\n 'dirs %r' % (dirs,))\n\n fid = open(config_file)\n mathlibs = []\n s = '#define MATHLIB'\n for line in fid:\n if line.startswith(s):\n value = line[len(s):].strip()\n if value:\n mathlibs.extend(value.split(','))\n fid.close()\n return mathlibs\n\ndef minrelpath(path):\n \"\"\"Resolve `..` and '.' from path.\n \"\"\"\n if not is_string(path):\n return path\n if '.' not in path:\n return path\n l = path.split(os.sep)\n while l:\n try:\n i = l.index('.', 1)\n except ValueError:\n break\n del l[i]\n j = 1\n while l:\n try:\n i = l.index('..', j)\n except ValueError:\n break\n if l[i-1]=='..':\n j += 1\n else:\n del l[i], l[i-1]\n j = 1\n if not l:\n return ''\n return os.sep.join(l)\n\ndef _fix_paths(paths, local_path, include_non_existing):\n assert is_sequence(paths), repr(type(paths))\n new_paths = []\n assert not is_string(paths), repr(paths)\n for n in paths:\n if is_string(n):\n if '*' in n or '?' in n:\n p = glob.glob(n)\n p2 = glob.glob(njoin(local_path, n))\n if p2:\n new_paths.extend(p2)\n elif p:\n new_paths.extend(p)\n else:\n if include_non_existing:\n new_paths.append(n)\n print('could not resolve pattern in %r: %r' %\n (local_path, n))\n else:\n n2 = njoin(local_path, n)\n if os.path.exists(n2):\n new_paths.append(n2)\n else:\n if os.path.exists(n):\n new_paths.append(n)\n elif include_non_existing:\n new_paths.append(n)\n if not os.path.exists(n):\n print('non-existing path in %r: %r' %\n (local_path, n))\n\n elif is_sequence(n):\n new_paths.extend(_fix_paths(n, local_path, include_non_existing))\n else:\n new_paths.append(n)\n return [minrelpath(p) for p in new_paths]\n\ndef gpaths(paths, local_path='', include_non_existing=True):\n \"\"\"Apply glob to paths and prepend local_path if needed.\n \"\"\"\n if is_string(paths):\n paths = (paths,)\n return _fix_paths(paths, local_path, include_non_existing)\n\n\n_temporary_directory = None\ndef clean_up_temporary_directory():\n global _temporary_directory\n if not _temporary_directory:\n return\n try:\n shutil.rmtree(_temporary_directory)\n except OSError:\n pass\n _temporary_directory = None\n\ndef make_temp_file(suffix='', prefix='', text=True):\n global _temporary_directory\n if not _temporary_directory:\n _temporary_directory = tempfile.mkdtemp()\n atexit.register(clean_up_temporary_directory)\n fid, name = tempfile.mkstemp(suffix=suffix,\n prefix=prefix,\n dir=_temporary_directory,\n text=text)\n fo = os.fdopen(fid, 'w')\n return fo, name\n\n# Hooks for colored terminal output.\n# See also http://www.livinglogic.de/Python/ansistyle\ndef terminal_has_colors():\n if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:\n # Avoid importing curses that causes illegal operation\n # with a message:\n # PYTHON2 caused an invalid page fault in\n # module CYGNURSES7.DLL as 015f:18bbfc28\n # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]\n # ssh to Win32 machine from debian\n # curses.version is 2.2\n # CYGWIN_98-4.10, release 1.5.7(0.109/3/2))\n return 0\n if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():\n try:\n import curses\n curses.setupterm()\n if (curses.tigetnum(\"colors\") >= 0\n and curses.tigetnum(\"pairs\") >= 0\n and ((curses.tigetstr(\"setf\") is not None\n and curses.tigetstr(\"setb\") is not None)\n or (curses.tigetstr(\"setaf\") is not None\n and curses.tigetstr(\"setab\") is not None)\n or curses.tigetstr(\"scp\") is not None)):\n return 1\n except Exception:\n pass\n return 0\n\nif terminal_has_colors():\n _colour_codes = dict(black=0, red=1, green=2, yellow=3,\n blue=4, magenta=5, cyan=6, white=7, default=9)\n def colour_text(s, fg=None, bg=None, bold=False):\n seq = []\n if bold:\n seq.append('1')\n if fg:\n fgcode = 30 + _colour_codes.get(fg.lower(), 0)\n seq.append(str(fgcode))\n if bg:\n bgcode = 40 + _colour_codes.get(fg.lower(), 7)\n seq.append(str(bgcode))\n if seq:\n return '\\x1b[%sm%s\\x1b[0m' % (';'.join(seq), s)\n else:\n return s\nelse:\n def colour_text(s, fg=None, bg=None):\n return s\n\ndef default_text(s):\n return colour_text(s, 'default')\ndef red_text(s):\n return colour_text(s, 'red')\ndef green_text(s):\n return colour_text(s, 'green')\ndef yellow_text(s):\n return colour_text(s, 'yellow')\ndef cyan_text(s):\n return colour_text(s, 'cyan')\ndef blue_text(s):\n return colour_text(s, 'blue')\n\n#########################\n\ndef cyg2win32(path):\n if sys.platform=='cygwin' and path.startswith('/cygdrive'):\n path = path[10] + ':' + os.path.normcase(path[11:])\n return path\n\ndef mingw32():\n \"\"\"Return true when using mingw32 environment.\n \"\"\"\n if sys.platform=='win32':\n if os.environ.get('OSTYPE', '')=='msys':\n return True\n if os.environ.get('MSYSTEM', '')=='MINGW32':\n return True\n return False\n\ndef msvc_runtime_library():\n \"Return name of MSVC runtime library if Python was built with MSVC >= 7\"\n msc_pos = sys.version.find('MSC v.')\n if msc_pos != -1:\n msc_ver = sys.version[msc_pos+6:msc_pos+10]\n lib = {'1300': 'msvcr70', # MSVC 7.0\n '1310': 'msvcr71', # MSVC 7.1\n '1400': 'msvcr80', # MSVC 8\n '1500': 'msvcr90', # MSVC 9 (VS 2008)\n '1600': 'msvcr100', # MSVC 10 (aka 2010)\n }.get(msc_ver, None)\n else:\n lib = None\n return lib\n\n\n#########################\n\n#XXX need support for .C that is also C++\ncxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\\Z', re.I).match\nfortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\\Z', re.I).match\nf90_ext_match = re.compile(r'.*[.](f90|f95)\\Z', re.I).match\nf90_module_name_match = re.compile(r'\\s*module\\s*(?P<name>[\\w_]+)', re.I).match\ndef _get_f90_modules(source):\n \"\"\"Return a list of Fortran f90 module names that\n given source file defines.\n \"\"\"\n if not f90_ext_match(source):\n return []\n modules = []\n f = open(source, 'r')\n for line in f:\n m = f90_module_name_match(line)\n if m:\n name = m.group('name')\n modules.append(name)\n # break # XXX can we assume that there is one module per file?\n f.close()\n return modules\n\ndef is_string(s):\n return isinstance(s, str)\n\ndef all_strings(lst):\n \"\"\"Return True if all items in lst are string objects. \"\"\"\n for item in lst:\n if not is_string(item):\n return False\n return True\n\ndef is_sequence(seq):\n if is_string(seq):\n return False\n try:\n len(seq)\n except:\n return False\n return True\n\ndef is_glob_pattern(s):\n return is_string(s) and ('*' in s or '?' is s)\n\ndef as_list(seq):\n if is_sequence(seq):\n return list(seq)\n else:\n return [seq]\n\ndef get_language(sources):\n # not used in numpy/scipy packages, use build_ext.detect_language instead\n \"\"\"Determine language value (c,f77,f90) from sources \"\"\"\n language = None\n for source in sources:\n if isinstance(source, str):\n if f90_ext_match(source):\n language = 'f90'\n break\n elif fortran_ext_match(source):\n language = 'f77'\n return language\n\ndef has_f_sources(sources):\n \"\"\"Return True if sources contains Fortran files \"\"\"\n for source in sources:\n if fortran_ext_match(source):\n return True\n return False\n\ndef has_cxx_sources(sources):\n \"\"\"Return True if sources contains C++ files \"\"\"\n for source in sources:\n if cxx_ext_match(source):\n return True\n return False\n\ndef filter_sources(sources):\n \"\"\"Return four lists of filenames containing\n C, C++, Fortran, and Fortran 90 module sources,\n respectively.\n \"\"\"\n c_sources = []\n cxx_sources = []\n f_sources = []\n fmodule_sources = []\n for source in sources:\n if fortran_ext_match(source):\n modules = _get_f90_modules(source)\n if modules:\n fmodule_sources.append(source)\n else:\n f_sources.append(source)\n elif cxx_ext_match(source):\n cxx_sources.append(source)\n else:\n c_sources.append(source)\n return c_sources, cxx_sources, f_sources, fmodule_sources\n\n\ndef _get_headers(directory_list):\n # get *.h files from list of directories\n headers = []\n for d in directory_list:\n head = glob.glob(os.path.join(d, \"*.h\")) #XXX: *.hpp files??\n headers.extend(head)\n return headers\n\ndef _get_directories(list_of_sources):\n # get unique directories from list of sources.\n direcs = []\n for f in list_of_sources:\n d = os.path.split(f)\n if d[0] != '' and not d[0] in direcs:\n direcs.append(d[0])\n return direcs\n\ndef get_dependencies(sources):\n #XXX scan sources for include statements\n return _get_headers(_get_directories(sources))\n\ndef is_local_src_dir(directory):\n \"\"\"Return true if directory is local directory.\n \"\"\"\n if not is_string(directory):\n return False\n abs_dir = os.path.abspath(directory)\n c = os.path.commonprefix([os.getcwd(), abs_dir])\n new_dir = abs_dir[len(c):].split(os.sep)\n if new_dir and not new_dir[0]:\n new_dir = new_dir[1:]\n if new_dir and new_dir[0]=='build':\n return False\n new_dir = os.sep.join(new_dir)\n return os.path.isdir(new_dir)\n\ndef general_source_files(top_path):\n pruned_directories = {'CVS':1, '.svn':1, 'build':1}\n prune_file_pat = re.compile(r'(?:[~#]|\\.py[co]|\\.o)$')\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n pruned = [ d for d in dirnames if d not in pruned_directories ]\n dirnames[:] = pruned\n for f in filenames:\n if not prune_file_pat.search(f):\n yield os.path.join(dirpath, f)\n\ndef general_source_directories_files(top_path):\n \"\"\"Return a directory name relative to top_path and\n files contained.\n \"\"\"\n pruned_directories = ['CVS', '.svn', 'build']\n prune_file_pat = re.compile(r'(?:[~#]|\\.py[co]|\\.o)$')\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n pruned = [ d for d in dirnames if d not in pruned_directories ]\n dirnames[:] = pruned\n for d in dirnames:\n dpath = os.path.join(dirpath, d)\n rpath = rel_path(dpath, top_path)\n files = []\n for f in os.listdir(dpath):\n fn = os.path.join(dpath, f)\n if os.path.isfile(fn) and not prune_file_pat.search(fn):\n files.append(fn)\n yield rpath, files\n dpath = top_path\n rpath = rel_path(dpath, top_path)\n filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \\\n if not prune_file_pat.search(f)]\n files = [f for f in filenames if os.path.isfile(f)]\n yield rpath, files\n\n\ndef get_ext_source_files(ext):\n # Get sources and any include files in the same directory.\n filenames = []\n sources = [_m for _m in ext.sources if is_string(_m)]\n filenames.extend(sources)\n filenames.extend(get_dependencies(sources))\n for d in ext.depends:\n if is_local_src_dir(d):\n filenames.extend(list(general_source_files(d)))\n elif os.path.isfile(d):\n filenames.append(d)\n return filenames\n\ndef get_script_files(scripts):\n scripts = [_m for _m in scripts if is_string(_m)]\n return scripts\n\ndef get_lib_source_files(lib):\n filenames = []\n sources = lib[1].get('sources', [])\n sources = [_m for _m in sources if is_string(_m)]\n filenames.extend(sources)\n filenames.extend(get_dependencies(sources))\n depends = lib[1].get('depends', [])\n for d in depends:\n if is_local_src_dir(d):\n filenames.extend(list(general_source_files(d)))\n elif os.path.isfile(d):\n filenames.append(d)\n return filenames\n\ndef get_shared_lib_extension(is_python_ext=False):\n \"\"\"Return the correct file extension for shared libraries.\n\n Parameters\n ----------\n is_python_ext : bool, optional\n Whether the shared library is a Python extension. Default is False.\n\n Returns\n -------\n so_ext : str\n The shared library extension.\n\n Notes\n -----\n For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,\n and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on\n POSIX systems according to PEP 3149. For Python 3.2 this is implemented on\n Linux, but not on OS X.\n\n \"\"\"\n confvars = distutils.sysconfig.get_config_vars()\n # SO is deprecated in 3.3.1, use EXT_SUFFIX instead\n so_ext = confvars.get('EXT_SUFFIX', None)\n if so_ext is None:\n so_ext = confvars.get('SO', '')\n\n if not is_python_ext:\n # hardcode known values, config vars (including SHLIB_SUFFIX) are\n # unreliable (see #3182)\n # darwin, windows and debug linux are wrong in 3.3.1 and older\n if (sys.platform.startswith('linux') or\n sys.platform.startswith('gnukfreebsd')):\n so_ext = '.so'\n elif sys.platform.startswith('darwin'):\n so_ext = '.dylib'\n elif sys.platform.startswith('win'):\n so_ext = '.dll'\n else:\n # fall back to config vars for unknown platforms\n # fix long extension for Python >=3.2, see PEP 3149.\n if 'SOABI' in confvars:\n # Does nothing unless SOABI config var exists\n so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)\n\n return so_ext\n\ndef get_data_files(data):\n if is_string(data):\n return [data]\n sources = data[1]\n filenames = []\n for s in sources:\n if hasattr(s, '__call__'):\n continue\n if is_local_src_dir(s):\n filenames.extend(list(general_source_files(s)))\n elif is_string(s):\n if os.path.isfile(s):\n filenames.append(s)\n else:\n print('Not existing data file:', s)\n else:\n raise TypeError(repr(s))\n return filenames\n\ndef dot_join(*args):\n return '.'.join([a for a in args if a])\n\ndef get_frame(level=0):\n \"\"\"Return frame object from call stack with given level.\n \"\"\"\n try:\n return sys._getframe(level+1)\n except AttributeError:\n frame = sys.exc_info()[2].tb_frame\n for _ in range(level+1):\n frame = frame.f_back\n return frame\n\n\n######################\n\nclass Configuration(object):\n\n _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',\n 'libraries', 'headers', 'scripts', 'py_modules',\n 'installed_libraries', 'define_macros']\n _dict_keys = ['package_dir', 'installed_pkg_config']\n _extra_keys = ['name', 'version']\n\n numpy_include_dirs = []\n\n def __init__(self,\n package_name=None,\n parent_name=None,\n top_path=None,\n package_path=None,\n caller_level=1,\n setup_name='setup.py',\n **attrs):\n \"\"\"Construct configuration instance of a package.\n\n package_name -- name of the package\n Ex.: 'distutils'\n parent_name -- name of the parent package\n Ex.: 'numpy'\n top_path -- directory of the toplevel package\n Ex.: the directory where the numpy package source sits\n package_path -- directory of package. Will be computed by magic from the\n directory of the caller module if not specified\n Ex.: the directory where numpy.distutils is\n caller_level -- frame level to caller namespace, internal parameter.\n \"\"\"\n self.name = dot_join(parent_name, package_name)\n self.version = None\n\n caller_frame = get_frame(caller_level)\n self.local_path = get_path_from_frame(caller_frame, top_path)\n # local_path -- directory of a file (usually setup.py) that\n # defines a configuration() function.\n # local_path -- directory of a file (usually setup.py) that\n # defines a configuration() function.\n if top_path is None:\n top_path = self.local_path\n self.local_path = ''\n if package_path is None:\n package_path = self.local_path\n elif os.path.isdir(njoin(self.local_path, package_path)):\n package_path = njoin(self.local_path, package_path)\n if not os.path.isdir(package_path or '.'):\n raise ValueError(\"%r is not a directory\" % (package_path,))\n self.top_path = top_path\n self.package_path = package_path\n # this is the relative path in the installed package\n self.path_in_package = os.path.join(*self.name.split('.'))\n\n self.list_keys = self._list_keys[:]\n self.dict_keys = self._dict_keys[:]\n\n for n in self.list_keys:\n v = copy.copy(attrs.get(n, []))\n setattr(self, n, as_list(v))\n\n for n in self.dict_keys:\n v = copy.copy(attrs.get(n, {}))\n setattr(self, n, v)\n\n known_keys = self.list_keys + self.dict_keys\n self.extra_keys = self._extra_keys[:]\n for n in attrs.keys():\n if n in known_keys:\n continue\n a = attrs[n]\n setattr(self, n, a)\n if isinstance(a, list):\n self.list_keys.append(n)\n elif isinstance(a, dict):\n self.dict_keys.append(n)\n else:\n self.extra_keys.append(n)\n\n if os.path.exists(njoin(package_path, '__init__.py')):\n self.packages.append(self.name)\n self.package_dir[self.name] = package_path\n\n self.options = dict(\n ignore_setup_xxx_py = False,\n assume_default_configuration = False,\n delegate_options_to_subpackages = False,\n quiet = False,\n )\n\n caller_instance = None\n for i in range(1, 3):\n try:\n f = get_frame(i)\n except ValueError:\n break\n try:\n caller_instance = eval('self', f.f_globals, f.f_locals)\n break\n except NameError:\n pass\n if isinstance(caller_instance, self.__class__):\n if caller_instance.options['delegate_options_to_subpackages']:\n self.set_options(**caller_instance.options)\n\n self.setup_name = setup_name\n\n def todict(self):\n \"\"\"\n Return a dictionary compatible with the keyword arguments of distutils\n setup function.\n\n Examples\n --------\n >>> setup(**config.todict()) #doctest: +SKIP\n \"\"\"\n\n self._optimize_data_files()\n d = {}\n known_keys = self.list_keys + self.dict_keys + self.extra_keys\n for n in known_keys:\n a = getattr(self, n)\n if a:\n d[n] = a\n return d\n\n def info(self, message):\n if not self.options['quiet']:\n print(message)\n\n def warn(self, message):\n sys.stderr.write('Warning: %s' % (message,))\n\n def set_options(self, **options):\n \"\"\"\n Configure Configuration instance.\n\n The following options are available:\n - ignore_setup_xxx_py\n - assume_default_configuration\n - delegate_options_to_subpackages\n - quiet\n\n \"\"\"\n for key, value in options.items():\n if key in self.options:\n self.options[key] = value\n else:\n raise ValueError('Unknown option: '+key)\n\n def get_distribution(self):\n \"\"\"Return the distutils distribution object for self.\"\"\"\n from numpy.distutils.core import get_distribution\n return get_distribution()\n\n def _wildcard_get_subpackage(self, subpackage_name,\n parent_name,\n caller_level = 1):\n l = subpackage_name.split('.')\n subpackage_path = njoin([self.local_path]+l)\n dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]\n config_list = []\n for d in dirs:\n if not os.path.isfile(njoin(d, '__init__.py')):\n continue\n if 'build' in d.split(os.sep):\n continue\n n = '.'.join(d.split(os.sep)[-len(l):])\n c = self.get_subpackage(n,\n parent_name = parent_name,\n caller_level = caller_level+1)\n config_list.extend(c)\n return config_list\n\n def _get_configuration_from_setup_py(self, setup_py,\n subpackage_name,\n subpackage_path,\n parent_name,\n caller_level = 1):\n # In case setup_py imports local modules:\n sys.path.insert(0, os.path.dirname(setup_py))\n try:\n fo_setup_py = open(setup_py, 'U')\n setup_name = os.path.splitext(os.path.basename(setup_py))[0]\n n = dot_join(self.name, subpackage_name, setup_name)\n setup_module = imp.load_module('_'.join(n.split('.')),\n fo_setup_py,\n setup_py,\n ('.py', 'U', 1))\n fo_setup_py.close()\n if not hasattr(setup_module, 'configuration'):\n if not self.options['assume_default_configuration']:\n self.warn('Assuming default configuration '\\\n '(%s does not define configuration())'\\\n % (setup_module))\n config = Configuration(subpackage_name, parent_name,\n self.top_path, subpackage_path,\n caller_level = caller_level + 1)\n else:\n pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))\n args = (pn,)\n def fix_args_py2(args):\n if setup_module.configuration.__code__.co_argcount > 1:\n args = args + (self.top_path,)\n return args\n def fix_args_py3(args):\n if setup_module.configuration.__code__.co_argcount > 1:\n args = args + (self.top_path,)\n return args\n if sys.version_info[0] < 3:\n args = fix_args_py2(args)\n else:\n args = fix_args_py3(args)\n config = setup_module.configuration(*args)\n if config.name!=dot_join(parent_name, subpackage_name):\n self.warn('Subpackage %r configuration returned as %r' % \\\n (dot_join(parent_name, subpackage_name), config.name))\n finally:\n del sys.path[0]\n return config\n\n def get_subpackage(self,subpackage_name,\n subpackage_path=None,\n parent_name=None,\n caller_level = 1):\n \"\"\"Return list of subpackage configurations.\n\n Parameters\n ----------\n subpackage_name : str or None\n Name of the subpackage to get the configuration. '*' in\n subpackage_name is handled as a wildcard.\n subpackage_path : str\n If None, then the path is assumed to be the local path plus the\n subpackage_name. If a setup.py file is not found in the\n subpackage_path, then a default configuration is used.\n parent_name : str\n Parent name.\n \"\"\"\n if subpackage_name is None:\n if subpackage_path is None:\n raise ValueError(\n \"either subpackage_name or subpackage_path must be specified\")\n subpackage_name = os.path.basename(subpackage_path)\n\n # handle wildcards\n l = subpackage_name.split('.')\n if subpackage_path is None and '*' in subpackage_name:\n return self._wildcard_get_subpackage(subpackage_name,\n parent_name,\n caller_level = caller_level+1)\n assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))\n if subpackage_path is None:\n subpackage_path = njoin([self.local_path] + l)\n else:\n subpackage_path = njoin([subpackage_path] + l[:-1])\n subpackage_path = self.paths([subpackage_path])[0]\n setup_py = njoin(subpackage_path, self.setup_name)\n if not self.options['ignore_setup_xxx_py']:\n if not os.path.isfile(setup_py):\n setup_py = njoin(subpackage_path,\n 'setup_%s.py' % (subpackage_name))\n if not os.path.isfile(setup_py):\n if not self.options['assume_default_configuration']:\n self.warn('Assuming default configuration '\\\n '(%s/{setup_%s,setup}.py was not found)' \\\n % (os.path.dirname(setup_py), subpackage_name))\n config = Configuration(subpackage_name, parent_name,\n self.top_path, subpackage_path,\n caller_level = caller_level+1)\n else:\n config = self._get_configuration_from_setup_py(\n setup_py,\n subpackage_name,\n subpackage_path,\n parent_name,\n caller_level = caller_level + 1)\n if config:\n return [config]\n else:\n return []\n\n def add_subpackage(self,subpackage_name,\n subpackage_path=None,\n standalone = False):\n \"\"\"Add a sub-package to the current Configuration instance.\n\n This is useful in a setup.py script for adding sub-packages to a\n package.\n\n Parameters\n ----------\n subpackage_name : str\n name of the subpackage\n subpackage_path : str\n if given, the subpackage path such as the subpackage is in\n subpackage_path / subpackage_name. If None,the subpackage is\n assumed to be located in the local path / subpackage_name.\n standalone : bool\n \"\"\"\n\n if standalone:\n parent_name = None\n else:\n parent_name = self.name\n config_list = self.get_subpackage(subpackage_name, subpackage_path,\n parent_name = parent_name,\n caller_level = 2)\n if not config_list:\n self.warn('No configuration returned, assuming unavailable.')\n for config in config_list:\n d = config\n if isinstance(config, Configuration):\n d = config.todict()\n assert isinstance(d, dict), repr(type(d))\n\n self.info('Appending %s configuration to %s' \\\n % (d.get('name'), self.name))\n self.dict_append(**d)\n\n dist = self.get_distribution()\n if dist is not None:\n self.warn('distutils distribution has been initialized,'\\\n ' it may be too late to add a subpackage '+ subpackage_name)\n\n def add_data_dir(self, data_path):\n \"\"\"Recursively add files under data_path to data_files list.\n\n Recursively add files under data_path to the list of data_files to be\n installed (and distributed). The data_path can be either a relative\n path-name, or an absolute path-name, or a 2-tuple where the first\n argument shows where in the install directory the data directory\n should be installed to.\n\n Parameters\n ----------\n data_path : seq or str\n Argument can be either\n\n * 2-sequence (<datadir suffix>, <path to data directory>)\n * path to data directory where python datadir suffix defaults\n to package dir.\n\n Notes\n -----\n Rules for installation paths:\n foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar\n (gun, foo/bar) -> parent/gun\n foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b\n (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun\n (gun/*, foo/*) -> parent/gun/a, parent/gun/b\n /foo/bar -> (bar, /foo/bar) -> parent/bar\n (gun, /foo/bar) -> parent/gun\n (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar\n\n Examples\n --------\n For example suppose the source directory contains fun/foo.dat and\n fun/bar/car.dat::\n\n >>> self.add_data_dir('fun') #doctest: +SKIP\n >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP\n >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP\n\n Will install data-files to the locations::\n\n <package install directory>/\n fun/\n foo.dat\n bar/\n car.dat\n sun/\n foo.dat\n bar/\n car.dat\n gun/\n foo.dat\n car.dat\n \"\"\"\n if is_sequence(data_path):\n d, data_path = data_path\n else:\n d = None\n if is_sequence(data_path):\n [self.add_data_dir((d, p)) for p in data_path]\n return\n if not is_string(data_path):\n raise TypeError(\"not a string: %r\" % (data_path,))\n if d is None:\n if os.path.isabs(data_path):\n return self.add_data_dir((os.path.basename(data_path), data_path))\n return self.add_data_dir((data_path, data_path))\n paths = self.paths(data_path, include_non_existing=False)\n if is_glob_pattern(data_path):\n if is_glob_pattern(d):\n pattern_list = allpath(d).split(os.sep)\n pattern_list.reverse()\n # /a/*//b/ -> /a/*/b\n rl = list(range(len(pattern_list)-1)); rl.reverse()\n for i in rl:\n if not pattern_list[i]:\n del pattern_list[i]\n #\n for path in paths:\n if not os.path.isdir(path):\n print('Not a directory, skipping', path)\n continue\n rpath = rel_path(path, self.local_path)\n path_list = rpath.split(os.sep)\n path_list.reverse()\n target_list = []\n i = 0\n for s in pattern_list:\n if is_glob_pattern(s):\n if i>=len(path_list):\n raise ValueError('cannot fill pattern %r with %r' \\\n % (d, path))\n target_list.append(path_list[i])\n else:\n assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))\n target_list.append(s)\n i += 1\n if path_list[i:]:\n self.warn('mismatch of pattern_list=%s and path_list=%s'\\\n % (pattern_list, path_list))\n target_list.reverse()\n self.add_data_dir((os.sep.join(target_list), path))\n else:\n for path in paths:\n self.add_data_dir((d, path))\n return\n assert not is_glob_pattern(d), repr(d)\n\n dist = self.get_distribution()\n if dist is not None and dist.data_files is not None:\n data_files = dist.data_files\n else:\n data_files = self.data_files\n\n for path in paths:\n for d1, f in list(general_source_directories_files(path)):\n target_path = os.path.join(self.path_in_package, d, d1)\n data_files.append((target_path, f))\n\n def _optimize_data_files(self):\n data_dict = {}\n for p, files in self.data_files:\n if p not in data_dict:\n data_dict[p] = set()\n for f in files:\n data_dict[p].add(f)\n self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]\n\n def add_data_files(self,*files):\n \"\"\"Add data files to configuration data_files.\n\n Parameters\n ----------\n files : sequence\n Argument(s) can be either\n\n * 2-sequence (<datadir prefix>,<path to data file(s)>)\n * paths to data files where python datadir prefix defaults\n to package dir.\n\n Notes\n -----\n The form of each element of the files sequence is very flexible\n allowing many combinations of where to get the files from the package\n and where they should ultimately be installed on the system. The most\n basic usage is for an element of the files argument sequence to be a\n simple filename. This will cause that file from the local path to be\n installed to the installation path of the self.name package (package\n path). The file argument can also be a relative path in which case the\n entire relative path will be installed into the package directory.\n Finally, the file can be an absolute path name in which case the file\n will be found at the absolute path name but installed to the package\n path.\n\n This basic behavior can be augmented by passing a 2-tuple in as the\n file argument. The first element of the tuple should specify the\n relative path (under the package install directory) where the\n remaining sequence of files should be installed to (it has nothing to\n do with the file-names in the source distribution). The second element\n of the tuple is the sequence of files that should be installed. The\n files in this sequence can be filenames, relative paths, or absolute\n paths. For absolute paths the file will be installed in the top-level\n package installation directory (regardless of the first argument).\n Filenames and relative path names will be installed in the package\n install directory under the path name given as the first element of\n the tuple.\n\n Rules for installation paths:\n\n #. file.txt -> (., file.txt)-> parent/file.txt\n #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt\n #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt\n #. *.txt -> parent/a.txt, parent/b.txt\n #. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt\n #. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt\n #. (sun, file.txt) -> parent/sun/file.txt\n #. (sun, bar/file.txt) -> parent/sun/file.txt\n #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt\n #. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt\n #. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt\n #. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt\n\n An additional feature is that the path to a data-file can actually be\n a function that takes no arguments and returns the actual path(s) to\n the data-files. This is useful when the data files are generated while\n building the package.\n\n Examples\n --------\n Add files to the list of data_files to be included with the package.\n\n >>> self.add_data_files('foo.dat',\n ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),\n ... 'bar/cat.dat',\n ... '/full/path/to/can.dat') #doctest: +SKIP\n\n will install these data files to::\n\n <package install directory>/\n foo.dat\n fun/\n gun.dat\n nun/\n pun.dat\n sun.dat\n bar/\n car.dat\n can.dat\n\n where <package install directory> is the package (or sub-package)\n directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:\n \\\\Python2.4 \\\\Lib \\\\site-packages \\\\mypackage') or\n '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:\n \\\\Python2.4 \\\\Lib \\\\site-packages \\\\mypackage \\\\mysubpackage').\n \"\"\"\n\n if len(files)>1:\n for f in files:\n self.add_data_files(f)\n return\n assert len(files)==1\n if is_sequence(files[0]):\n d, files = files[0]\n else:\n d = None\n if is_string(files):\n filepat = files\n elif is_sequence(files):\n if len(files)==1:\n filepat = files[0]\n else:\n for f in files:\n self.add_data_files((d, f))\n return\n else:\n raise TypeError(repr(type(files)))\n\n if d is None:\n if hasattr(filepat, '__call__'):\n d = ''\n elif os.path.isabs(filepat):\n d = ''\n else:\n d = os.path.dirname(filepat)\n self.add_data_files((d, files))\n return\n\n paths = self.paths(filepat, include_non_existing=False)\n if is_glob_pattern(filepat):\n if is_glob_pattern(d):\n pattern_list = d.split(os.sep)\n pattern_list.reverse()\n for path in paths:\n path_list = path.split(os.sep)\n path_list.reverse()\n path_list.pop() # filename\n target_list = []\n i = 0\n for s in pattern_list:\n if is_glob_pattern(s):\n target_list.append(path_list[i])\n i += 1\n else:\n target_list.append(s)\n target_list.reverse()\n self.add_data_files((os.sep.join(target_list), path))\n else:\n self.add_data_files((d, paths))\n return\n assert not is_glob_pattern(d), repr((d, filepat))\n\n dist = self.get_distribution()\n if dist is not None and dist.data_files is not None:\n data_files = dist.data_files\n else:\n data_files = self.data_files\n\n data_files.append((os.path.join(self.path_in_package, d), paths))\n\n ### XXX Implement add_py_modules\n\n def add_define_macros(self, macros):\n \"\"\"Add define macros to configuration\n\n Add the given sequence of macro name and value duples to the beginning\n of the define_macros list This list will be visible to all extension\n modules of the current package.\n \"\"\"\n dist = self.get_distribution()\n if dist is not None:\n if not hasattr(dist, 'define_macros'):\n dist.define_macros = []\n dist.define_macros.extend(macros)\n else:\n self.define_macros.extend(macros)\n\n\n def add_include_dirs(self,*paths):\n \"\"\"Add paths to configuration include directories.\n\n Add the given sequence of paths to the beginning of the include_dirs\n list. This list will be visible to all extension modules of the\n current package.\n \"\"\"\n include_dirs = self.paths(paths)\n dist = self.get_distribution()\n if dist is not None:\n if dist.include_dirs is None:\n dist.include_dirs = []\n dist.include_dirs.extend(include_dirs)\n else:\n self.include_dirs.extend(include_dirs)\n\n def add_numarray_include_dirs(self):\n import numpy.numarray.util as nnu\n self.add_include_dirs(*nnu.get_numarray_include_dirs())\n\n def add_headers(self,*files):\n \"\"\"Add installable headers to configuration.\n\n Add the given sequence of files to the beginning of the headers list.\n By default, headers will be installed under <python-\n include>/<self.name.replace('.','/')>/ directory. If an item of files\n is a tuple, then its first argument specifies the actual installation\n location relative to the <python-include> path.\n\n Parameters\n ----------\n files : str or seq\n Argument(s) can be either:\n\n * 2-sequence (<includedir suffix>,<path to header file(s)>)\n * path(s) to header file(s) where python includedir suffix will\n default to package name.\n \"\"\"\n headers = []\n for path in files:\n if is_string(path):\n [headers.append((self.name, p)) for p in self.paths(path)]\n else:\n if not isinstance(path, (tuple, list)) or len(path) != 2:\n raise TypeError(repr(path))\n [headers.append((path[0], p)) for p in self.paths(path[1])]\n dist = self.get_distribution()\n if dist is not None:\n if dist.headers is None:\n dist.headers = []\n dist.headers.extend(headers)\n else:\n self.headers.extend(headers)\n\n def paths(self,*paths,**kws):\n \"\"\"Apply glob to paths and prepend local_path if needed.\n\n Applies glob.glob(...) to each path in the sequence (if needed) and\n pre-pends the local_path if needed. Because this is called on all\n source lists, this allows wildcard characters to be specified in lists\n of sources for extension modules and libraries and scripts and allows\n path-names be relative to the source directory.\n\n \"\"\"\n include_non_existing = kws.get('include_non_existing', True)\n return gpaths(paths,\n local_path = self.local_path,\n include_non_existing=include_non_existing)\n\n def _fix_paths_dict(self, kw):\n for k in kw.keys():\n v = kw[k]\n if k in ['sources', 'depends', 'include_dirs', 'library_dirs',\n 'module_dirs', 'extra_objects']:\n new_v = self.paths(v)\n kw[k] = new_v\n\n def add_extension(self,name,sources,**kw):\n \"\"\"Add extension to configuration.\n\n Create and add an Extension instance to the ext_modules list. This\n method also takes the following optional keyword arguments that are\n passed on to the Extension constructor.\n\n Parameters\n ----------\n name : str\n name of the extension\n sources : seq\n list of the sources. The list of sources may contain functions\n (called source generators) which must take an extension instance\n and a build directory as inputs and return a source file or list of\n source files or None. If None is returned then no sources are\n generated. If the Extension instance has no sources after\n processing all source generators, then no extension module is\n built.\n include_dirs :\n define_macros :\n undef_macros :\n library_dirs :\n libraries :\n runtime_library_dirs :\n extra_objects :\n extra_compile_args :\n extra_link_args :\n extra_f77_compile_args :\n extra_f90_compile_args :\n export_symbols :\n swig_opts :\n depends :\n The depends list contains paths to files or directories that the\n sources of the extension module depend on. If any path in the\n depends list is newer than the extension module, then the module\n will be rebuilt.\n language :\n f2py_options :\n module_dirs :\n extra_info : dict or list\n dict or list of dict of keywords to be appended to keywords.\n\n Notes\n -----\n The self.paths(...) method is applied to all lists that may contain\n paths.\n \"\"\"\n ext_args = copy.copy(kw)\n ext_args['name'] = dot_join(self.name, name)\n ext_args['sources'] = sources\n\n if 'extra_info' in ext_args:\n extra_info = ext_args['extra_info']\n del ext_args['extra_info']\n if isinstance(extra_info, dict):\n extra_info = [extra_info]\n for info in extra_info:\n assert isinstance(info, dict), repr(info)\n dict_append(ext_args,**info)\n\n self._fix_paths_dict(ext_args)\n\n # Resolve out-of-tree dependencies\n libraries = ext_args.get('libraries', [])\n libnames = []\n ext_args['libraries'] = []\n for libname in libraries:\n if isinstance(libname, tuple):\n self._fix_paths_dict(libname[1])\n\n # Handle library names of the form libname@relative/path/to/library\n if '@' in libname:\n lname, lpath = libname.split('@', 1)\n lpath = os.path.abspath(njoin(self.local_path, lpath))\n if os.path.isdir(lpath):\n c = self.get_subpackage(None, lpath,\n caller_level = 2)\n if isinstance(c, Configuration):\n c = c.todict()\n for l in [l[0] for l in c.get('libraries', [])]:\n llname = l.split('__OF__', 1)[0]\n if llname == lname:\n c.pop('name', None)\n dict_append(ext_args,**c)\n break\n continue\n libnames.append(libname)\n\n ext_args['libraries'] = libnames + ext_args['libraries']\n ext_args['define_macros'] = \\\n self.define_macros + ext_args.get('define_macros', [])\n\n from numpy.distutils.core import Extension\n ext = Extension(**ext_args)\n self.ext_modules.append(ext)\n\n dist = self.get_distribution()\n if dist is not None:\n self.warn('distutils distribution has been initialized,'\\\n ' it may be too late to add an extension '+name)\n return ext\n\n def add_library(self,name,sources,**build_info):\n \"\"\"\n Add library to configuration.\n\n Parameters\n ----------\n name : str\n Name of the extension.\n sources : sequence\n List of the sources. The list of sources may contain functions\n (called source generators) which must take an extension instance\n and a build directory as inputs and return a source file or list of\n source files or None. If None is returned then no sources are\n generated. If the Extension instance has no sources after\n processing all source generators, then no extension module is\n built.\n build_info : dict, optional\n The following keys are allowed:\n\n * depends\n * macros\n * include_dirs\n * extra_compiler_args\n * extra_f77_compiler_args\n * extra_f90_compiler_args\n * f2py_options\n * language\n\n \"\"\"\n self._add_library(name, sources, None, build_info)\n\n dist = self.get_distribution()\n if dist is not None:\n self.warn('distutils distribution has been initialized,'\\\n ' it may be too late to add a library '+ name)\n\n def _add_library(self, name, sources, install_dir, build_info):\n \"\"\"Common implementation for add_library and add_installed_library. Do\n not use directly\"\"\"\n build_info = copy.copy(build_info)\n name = name #+ '__OF__' + self.name\n build_info['sources'] = sources\n\n # Sometimes, depends is not set up to an empty list by default, and if\n # depends is not given to add_library, distutils barfs (#1134)\n if not 'depends' in build_info:\n build_info['depends'] = []\n\n self._fix_paths_dict(build_info)\n\n # Add to libraries list so that it is build with build_clib\n self.libraries.append((name, build_info))\n\n def add_installed_library(self, name, sources, install_dir, build_info=None):\n \"\"\"\n Similar to add_library, but the specified library is installed.\n\n Most C libraries used with `distutils` are only used to build python\n extensions, but libraries built through this method will be installed\n so that they can be reused by third-party packages.\n\n Parameters\n ----------\n name : str\n Name of the installed library.\n sources : sequence\n List of the library's source files. See `add_library` for details.\n install_dir : str\n Path to install the library, relative to the current sub-package.\n build_info : dict, optional\n The following keys are allowed:\n\n * depends\n * macros\n * include_dirs\n * extra_compiler_args\n * extra_f77_compiler_args\n * extra_f90_compiler_args\n * f2py_options\n * language\n\n Returns\n -------\n None\n\n See Also\n --------\n add_library, add_npy_pkg_config, get_info\n\n Notes\n -----\n The best way to encode the options required to link against the specified\n C libraries is to use a \"libname.ini\" file, and use `get_info` to\n retrieve the required options (see `add_npy_pkg_config` for more\n information).\n\n \"\"\"\n if not build_info:\n build_info = {}\n\n install_dir = os.path.join(self.package_path, install_dir)\n self._add_library(name, sources, install_dir, build_info)\n self.installed_libraries.append(InstallableLib(name, build_info, install_dir))\n\n def add_npy_pkg_config(self, template, install_dir, subst_dict=None):\n \"\"\"\n Generate and install a npy-pkg config file from a template.\n\n The config file generated from `template` is installed in the\n given install directory, using `subst_dict` for variable substitution.\n\n Parameters\n ----------\n template : str\n The path of the template, relatively to the current package path.\n install_dir : str\n Where to install the npy-pkg config file, relatively to the current\n package path.\n subst_dict : dict, optional\n If given, any string of the form ``@key@`` will be replaced by\n ``subst_dict[key]`` in the template file when installed. The install\n prefix is always available through the variable ``@prefix@``, since the\n install prefix is not easy to get reliably from setup.py.\n\n See also\n --------\n add_installed_library, get_info\n\n Notes\n -----\n This works for both standard installs and in-place builds, i.e. the\n ``@prefix@`` refer to the source directory for in-place builds.\n\n Examples\n --------\n ::\n\n config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})\n\n Assuming the foo.ini.in file has the following content::\n\n [meta]\n Name=@foo@\n Version=1.0\n Description=dummy description\n\n [default]\n Cflags=-I@prefix@/include\n Libs=\n\n The generated file will have the following content::\n\n [meta]\n Name=bar\n Version=1.0\n Description=dummy description\n\n [default]\n Cflags=-Iprefix_dir/include\n Libs=\n\n and will be installed as foo.ini in the 'lib' subpath.\n\n \"\"\"\n if subst_dict is None:\n subst_dict = {}\n basename = os.path.splitext(template)[0]\n template = os.path.join(self.package_path, template)\n\n if self.name in self.installed_pkg_config:\n self.installed_pkg_config[self.name].append((template, install_dir,\n subst_dict))\n else:\n self.installed_pkg_config[self.name] = [(template, install_dir,\n subst_dict)]\n\n\n def add_scripts(self,*files):\n \"\"\"Add scripts to configuration.\n\n Add the sequence of files to the beginning of the scripts list.\n Scripts will be installed under the <prefix>/bin/ directory.\n\n \"\"\"\n scripts = self.paths(files)\n dist = self.get_distribution()\n if dist is not None:\n if dist.scripts is None:\n dist.scripts = []\n dist.scripts.extend(scripts)\n else:\n self.scripts.extend(scripts)\n\n def dict_append(self,**dict):\n for key in self.list_keys:\n a = getattr(self, key)\n a.extend(dict.get(key, []))\n for key in self.dict_keys:\n a = getattr(self, key)\n a.update(dict.get(key, {}))\n known_keys = self.list_keys + self.dict_keys + self.extra_keys\n for key in dict.keys():\n if key not in known_keys:\n a = getattr(self, key, None)\n if a and a==dict[key]: continue\n self.warn('Inheriting attribute %r=%r from %r' \\\n % (key, dict[key], dict.get('name', '?')))\n setattr(self, key, dict[key])\n self.extra_keys.append(key)\n elif key in self.extra_keys:\n self.info('Ignoring attempt to set %r (from %r to %r)' \\\n % (key, getattr(self, key), dict[key]))\n elif key in known_keys:\n # key is already processed above\n pass\n else:\n raise ValueError(\"Don't know about key=%r\" % (key))\n\n def __str__(self):\n from pprint import pformat\n known_keys = self.list_keys + self.dict_keys + self.extra_keys\n s = '<'+5*'-' + '\\n'\n s += 'Configuration of '+self.name+':\\n'\n known_keys.sort()\n for k in known_keys:\n a = getattr(self, k, None)\n if a:\n s += '%s = %s\\n' % (k, pformat(a))\n s += 5*'-' + '>'\n return s\n\n def get_config_cmd(self):\n \"\"\"\n Returns the numpy.distutils config command instance.\n \"\"\"\n cmd = get_cmd('config')\n cmd.ensure_finalized()\n cmd.dump_source = 0\n cmd.noisy = 0\n old_path = os.environ.get('PATH')\n if old_path:\n path = os.pathsep.join(['.', old_path])\n os.environ['PATH'] = path\n return cmd\n\n def get_build_temp_dir(self):\n \"\"\"\n Return a path to a temporary directory where temporary files should be\n placed.\n \"\"\"\n cmd = get_cmd('build')\n cmd.ensure_finalized()\n return cmd.build_temp\n\n def have_f77c(self):\n \"\"\"Check for availability of Fortran 77 compiler.\n\n Use it inside source generating function to ensure that\n setup distribution instance has been initialized.\n\n Notes\n -----\n True if a Fortran 77 compiler is available (because a simple Fortran 77\n code was able to be compiled successfully).\n \"\"\"\n simple_fortran_subroutine = '''\n subroutine simple\n end\n '''\n config_cmd = self.get_config_cmd()\n flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')\n return flag\n\n def have_f90c(self):\n \"\"\"Check for availability of Fortran 90 compiler.\n\n Use it inside source generating function to ensure that\n setup distribution instance has been initialized.\n\n Notes\n -----\n True if a Fortran 90 compiler is available (because a simple Fortran\n 90 code was able to be compiled successfully)\n \"\"\"\n simple_fortran_subroutine = '''\n subroutine simple\n end\n '''\n config_cmd = self.get_config_cmd()\n flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')\n return flag\n\n def append_to(self, extlib):\n \"\"\"Append libraries, include_dirs to extension or library item.\n \"\"\"\n if is_sequence(extlib):\n lib_name, build_info = extlib\n dict_append(build_info,\n libraries=self.libraries,\n include_dirs=self.include_dirs)\n else:\n from numpy.distutils.core import Extension\n assert isinstance(extlib, Extension), repr(extlib)\n extlib.libraries.extend(self.libraries)\n extlib.include_dirs.extend(self.include_dirs)\n\n def _get_svn_revision(self, path):\n \"\"\"Return path's SVN revision number.\n \"\"\"\n revision = None\n m = None\n cwd = os.getcwd()\n try:\n os.chdir(path or '.')\n p = subprocess.Popen(['svnversion'], shell=True,\n stdout=subprocess.PIPE, stderr=None,\n close_fds=True)\n sout = p.stdout\n m = re.match(r'(?P<revision>\\d+)', sout.read())\n except:\n pass\n os.chdir(cwd)\n if m:\n revision = int(m.group('revision'))\n return revision\n if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):\n entries = njoin(path, '_svn', 'entries')\n else:\n entries = njoin(path, '.svn', 'entries')\n if os.path.isfile(entries):\n f = open(entries)\n fstr = f.read()\n f.close()\n if fstr[:5] == '<?xml': # pre 1.4\n m = re.search(r'revision=\"(?P<revision>\\d+)\"', fstr)\n if m:\n revision = int(m.group('revision'))\n else: # non-xml entries file --- check to be sure that\n m = re.search(r'dir[\\n\\r]+(?P<revision>\\d+)', fstr)\n if m:\n revision = int(m.group('revision'))\n return revision\n\n def _get_hg_revision(self, path):\n \"\"\"Return path's Mercurial revision number.\n \"\"\"\n revision = None\n m = None\n cwd = os.getcwd()\n try:\n os.chdir(path or '.')\n p = subprocess.Popen(['hg identify --num'], shell=True,\n stdout=subprocess.PIPE, stderr=None,\n close_fds=True)\n sout = p.stdout\n m = re.match(r'(?P<revision>\\d+)', sout.read())\n except:\n pass\n os.chdir(cwd)\n if m:\n revision = int(m.group('revision'))\n return revision\n branch_fn = njoin(path, '.hg', 'branch')\n branch_cache_fn = njoin(path, '.hg', 'branch.cache')\n\n if os.path.isfile(branch_fn):\n branch0 = None\n f = open(branch_fn)\n revision0 = f.read().strip()\n f.close()\n\n branch_map = {}\n for line in file(branch_cache_fn, 'r'):\n branch1, revision1 = line.split()[:2]\n if revision1==revision0:\n branch0 = branch1\n try:\n revision1 = int(revision1)\n except ValueError:\n continue\n branch_map[branch1] = revision1\n\n revision = branch_map.get(branch0)\n return revision\n\n\n def get_version(self, version_file=None, version_variable=None):\n \"\"\"Try to get version string of a package.\n\n Return a version string of the current package or None if the version\n information could not be detected.\n\n Notes\n -----\n This method scans files named\n __version__.py, <packagename>_version.py, version.py, and\n __svn_version__.py for string variables version, __version\\__, and\n <packagename>_version, until a version number is found.\n \"\"\"\n version = getattr(self, 'version', None)\n if version is not None:\n return version\n\n # Get version from version file.\n if version_file is None:\n files = ['__version__.py',\n self.name.split('.')[-1]+'_version.py',\n 'version.py',\n '__svn_version__.py',\n '__hg_version__.py']\n else:\n files = [version_file]\n if version_variable is None:\n version_vars = ['version',\n '__version__',\n self.name.split('.')[-1]+'_version']\n else:\n version_vars = [version_variable]\n for f in files:\n fn = njoin(self.local_path, f)\n if os.path.isfile(fn):\n info = (open(fn), fn, ('.py', 'U', 1))\n name = os.path.splitext(os.path.basename(fn))[0]\n n = dot_join(self.name, name)\n try:\n version_module = imp.load_module('_'.join(n.split('.')),*info)\n except ImportError:\n msg = get_exception()\n self.warn(str(msg))\n version_module = None\n if version_module is None:\n continue\n\n for a in version_vars:\n version = getattr(version_module, a, None)\n if version is not None:\n break\n if version is not None:\n break\n\n if version is not None:\n self.version = version\n return version\n\n # Get version as SVN or Mercurial revision number\n revision = self._get_svn_revision(self.local_path)\n if revision is None:\n revision = self._get_hg_revision(self.local_path)\n\n if revision is not None:\n version = str(revision)\n self.version = version\n\n return version\n\n def make_svn_version_py(self, delete=True):\n \"\"\"Appends a data function to the data_files list that will generate\n __svn_version__.py file to the current package directory.\n\n Generate package __svn_version__.py file from SVN revision number,\n it will be removed after python exits but will be available\n when sdist, etc commands are executed.\n\n Notes\n -----\n If __svn_version__.py existed before, nothing is done.\n\n This is\n intended for working with source directories that are in an SVN\n repository.\n \"\"\"\n target = njoin(self.local_path, '__svn_version__.py')\n revision = self._get_svn_revision(self.local_path)\n if os.path.isfile(target) or revision is None:\n return\n else:\n def generate_svn_version_py():\n if not os.path.isfile(target):\n version = str(revision)\n self.info('Creating %s (version=%r)' % (target, version))\n f = open(target, 'w')\n f.write('version = %r\\n' % (version))\n f.close()\n\n import atexit\n def rm_file(f=target,p=self.info):\n if delete:\n try: os.remove(f); p('removed '+f)\n except OSError: pass\n try: os.remove(f+'c'); p('removed '+f+'c')\n except OSError: pass\n\n atexit.register(rm_file)\n\n return target\n\n self.add_data_files(('', generate_svn_version_py()))\n\n def make_hg_version_py(self, delete=True):\n \"\"\"Appends a data function to the data_files list that will generate\n __hg_version__.py file to the current package directory.\n\n Generate package __hg_version__.py file from Mercurial revision,\n it will be removed after python exits but will be available\n when sdist, etc commands are executed.\n\n Notes\n -----\n If __hg_version__.py existed before, nothing is done.\n\n This is intended for working with source directories that are\n in an Mercurial repository.\n \"\"\"\n target = njoin(self.local_path, '__hg_version__.py')\n revision = self._get_hg_revision(self.local_path)\n if os.path.isfile(target) or revision is None:\n return\n else:\n def generate_hg_version_py():\n if not os.path.isfile(target):\n version = str(revision)\n self.info('Creating %s (version=%r)' % (target, version))\n f = open(target, 'w')\n f.write('version = %r\\n' % (version))\n f.close()\n\n import atexit\n def rm_file(f=target,p=self.info):\n if delete:\n try: os.remove(f); p('removed '+f)\n except OSError: pass\n try: os.remove(f+'c'); p('removed '+f+'c')\n except OSError: pass\n\n atexit.register(rm_file)\n\n return target\n\n self.add_data_files(('', generate_hg_version_py()))\n\n def make_config_py(self,name='__config__'):\n \"\"\"Generate package __config__.py file containing system_info\n information used during building the package.\n\n This file is installed to the\n package installation directory.\n\n \"\"\"\n self.py_modules.append((self.name, name, generate_config_py))\n\n\n def get_info(self,*names):\n \"\"\"Get resources information.\n\n Return information (from system_info.get_info) for all of the names in\n the argument list in a single dictionary.\n \"\"\"\n from .system_info import get_info, dict_append\n info_dict = {}\n for a in names:\n dict_append(info_dict,**get_info(a))\n return info_dict\n\n\ndef get_cmd(cmdname, _cache={}):\n if cmdname not in _cache:\n import distutils.core\n dist = distutils.core._setup_distribution\n if dist is None:\n from distutils.errors import DistutilsInternalError\n raise DistutilsInternalError(\n 'setup distribution instance not initialized')\n cmd = dist.get_command_obj(cmdname)\n _cache[cmdname] = cmd\n return _cache[cmdname]\n\ndef get_numpy_include_dirs():\n # numpy_include_dirs are set by numpy/core/setup.py, otherwise []\n include_dirs = Configuration.numpy_include_dirs[:]\n if not include_dirs:\n import numpy\n include_dirs = [ numpy.get_include() ]\n # else running numpy/core/setup.py\n return include_dirs\n\ndef get_npy_pkg_dir():\n \"\"\"Return the path where to find the npy-pkg-config directory.\"\"\"\n # XXX: import here for bootstrapping reasons\n import numpy\n d = os.path.join(os.path.dirname(numpy.__file__),\n 'core', 'lib', 'npy-pkg-config')\n return d\n\ndef get_pkg_info(pkgname, dirs=None):\n \"\"\"\n Return library info for the given package.\n\n Parameters\n ----------\n pkgname : str\n Name of the package (should match the name of the .ini file, without\n the extension, e.g. foo for the file foo.ini).\n dirs : sequence, optional\n If given, should be a sequence of additional directories where to look\n for npy-pkg-config files. Those directories are searched prior to the\n NumPy directory.\n\n Returns\n -------\n pkginfo : class instance\n The `LibraryInfo` instance containing the build information.\n\n Raises\n ------\n PkgNotFound\n If the package is not found.\n\n See Also\n --------\n Configuration.add_npy_pkg_config, Configuration.add_installed_library,\n get_info\n\n \"\"\"\n from numpy.distutils.npy_pkg_config import read_config\n\n if dirs:\n dirs.append(get_npy_pkg_dir())\n else:\n dirs = [get_npy_pkg_dir()]\n return read_config(pkgname, dirs)\n\ndef get_info(pkgname, dirs=None):\n \"\"\"\n Return an info dict for a given C library.\n\n The info dict contains the necessary options to use the C library.\n\n Parameters\n ----------\n pkgname : str\n Name of the package (should match the name of the .ini file, without\n the extension, e.g. foo for the file foo.ini).\n dirs : sequence, optional\n If given, should be a sequence of additional directories where to look\n for npy-pkg-config files. Those directories are searched prior to the\n NumPy directory.\n\n Returns\n -------\n info : dict\n The dictionary with build information.\n\n Raises\n ------\n PkgNotFound\n If the package is not found.\n\n See Also\n --------\n Configuration.add_npy_pkg_config, Configuration.add_installed_library,\n get_pkg_info\n\n Examples\n --------\n To get the necessary information for the npymath library from NumPy:\n\n >>> npymath_info = np.distutils.misc_util.get_info('npymath')\n >>> npymath_info #doctest: +SKIP\n {'define_macros': [], 'libraries': ['npymath'], 'library_dirs':\n ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}\n\n This info dict can then be used as input to a `Configuration` instance::\n\n config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)\n\n \"\"\"\n from numpy.distutils.npy_pkg_config import parse_flags\n pkg_info = get_pkg_info(pkgname, dirs)\n\n # Translate LibraryInfo instance into a build_info dict\n info = parse_flags(pkg_info.cflags())\n for k, v in parse_flags(pkg_info.libs()).items():\n info[k].extend(v)\n\n # add_extension extra_info argument is ANAL\n info['define_macros'] = info['macros']\n del info['macros']\n del info['ignored']\n\n return info\n\ndef is_bootstrapping():\n if sys.version_info[0] >= 3:\n import builtins\n else:\n import __builtin__ as builtins\n\n try:\n builtins.__NUMPY_SETUP__\n return True\n except AttributeError:\n return False\n __NUMPY_SETUP__ = False\n\n\n#########################\n\ndef default_config_dict(name = None, parent_name = None, local_path=None):\n \"\"\"Return a configuration dictionary for usage in\n configuration() function defined in file setup_<name>.py.\n \"\"\"\n import warnings\n warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\\\n 'deprecated default_config_dict(%r,%r,%r)'\n % (name, parent_name, local_path,\n name, parent_name, local_path,\n ))\n c = Configuration(name, parent_name, local_path)\n return c.todict()\n\n\ndef dict_append(d, **kws):\n for k, v in kws.items():\n if k in d:\n ov = d[k]\n if isinstance(ov, str):\n d[k] = v\n else:\n d[k].extend(v)\n else:\n d[k] = v\n\ndef appendpath(prefix, path):\n if os.path.sep != '/':\n prefix = prefix.replace('/', os.path.sep)\n path = path.replace('/', os.path.sep)\n drive = ''\n if os.path.isabs(path):\n drive = os.path.splitdrive(prefix)[0]\n absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]\n pathdrive, path = os.path.splitdrive(path)\n d = os.path.commonprefix([absprefix, path])\n if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \\\n or os.path.join(path[:len(d)], path[len(d):]) != path:\n # Handle invalid paths\n d = os.path.dirname(d)\n subpath = path[len(d):]\n if os.path.isabs(subpath):\n subpath = subpath[1:]\n else:\n subpath = path\n return os.path.normpath(njoin(drive + prefix, subpath))\n\ndef generate_config_py(target):\n \"\"\"Generate config.py file containing system_info information\n used during building the package.\n\n Usage:\n config['py_modules'].append((packagename, '__config__',generate_config_py))\n \"\"\"\n from numpy.distutils.system_info import system_info\n from distutils.dir_util import mkpath\n mkpath(os.path.dirname(target))\n f = open(target, 'w')\n f.write('# This file is generated by %s\\n' % (os.path.abspath(sys.argv[0])))\n f.write('# It contains system_info results at the time of building this package.\\n')\n f.write('__all__ = [\"get_info\",\"show\"]\\n\\n')\n for k, i in system_info.saved_results.items():\n f.write('%s=%r\\n' % (k, i))\n f.write(r'''\ndef get_info(name):\n g = globals()\n return g.get(name, g.get(name + \"_info\", {}))\n\ndef show():\n for name,info_dict in globals().items():\n if name[0] == \"_\" or type(info_dict) is not type({}): continue\n print(name + \":\")\n if not info_dict:\n print(\" NOT AVAILABLE\")\n for k,v in info_dict.items():\n v = str(v)\n if k == \"sources\" and len(v) > 200:\n v = v[:60] + \" ...\\n... \" + v[-60:]\n print(\" %s = %s\" % (k,v))\n ''')\n\n f.close()\n return target\n\ndef msvc_version(compiler):\n \"\"\"Return version major and minor of compiler instance if it is\n MSVC, raise an exception otherwise.\"\"\"\n if not compiler.compiler_type == \"msvc\":\n raise ValueError(\"Compiler instance is not msvc (%s)\"\\\n % compiler.compiler_type)\n return compiler._MSVCCompiler__version\n\nif sys.version[:3] >= '2.5':\n def get_build_architecture():\n from distutils.msvccompiler import get_build_architecture\n return get_build_architecture()\nelse:\n #copied from python 2.5.1 distutils/msvccompiler.py\n def get_build_architecture():\n \"\"\"Return the processor architecture.\n\n Possible results are \"Intel\", \"Itanium\", or \"AMD64\".\n \"\"\"\n prefix = \" bit (\"\n i = sys.version.find(prefix)\n if i == -1:\n return \"Intel\"\n j = sys.version.find(\")\", i)\n return sys.version[i+len(prefix):j]\n",
"\"\"\"\nPython wrappers for Orthogonal Distance Regression (ODRPACK).\n\nNotes\n=====\n\n* Array formats -- FORTRAN stores its arrays in memory column first, i.e. an\n array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently,\n NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For\n efficiency and convenience, the input and output arrays of the fitting\n function (and its Jacobians) are passed to FORTRAN without transposition.\n Therefore, where the ODRPACK documentation says that the X array is of shape\n (N, M), it will be passed to the Python function as an array of shape (M, N).\n If M==1, the one-dimensional case, then nothing matters; if M>1, then your\n Python functions will be dealing with arrays that are indexed in reverse of\n the ODRPACK documentation. No real biggie, but watch out for your indexing of\n the Jacobians: the i,j'th elements (@f_i/@x_j) evaluated at the n'th\n observation will be returned as jacd[j, i, n]. Except for the Jacobians, it\n really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course,\n you can always use the transpose() function from scipy explicitly.\n\n* Examples -- See the accompanying file test/test.py for examples of how to set\n up fits of your own. Some are taken from the User's Guide; some are from\n other sources.\n\n* Models -- Some common models are instantiated in the accompanying module\n models.py . Contributions are welcome.\n\nCredits\n=======\n\n* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs.\n\nRobert Kern\[email protected]\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy\nfrom scipy.odr import __odrpack\n\n__all__ = ['odr', 'odr_error', 'odr_stop', 'Data', 'RealData', 'Model',\n 'Output', 'ODR']\n\nodr = __odrpack.odr\n\n\nclass odr_error(Exception):\n \"\"\"\n Exception indicating an error in fitting.\n\n This is raised by `scipy.odr` if an error occurs during fitting.\n \"\"\"\n pass\n\n\nclass odr_stop(Exception):\n \"\"\"\n Exception stopping fitting.\n\n You can raise this exception in your objective function to tell\n `scipy.odr` to stop fitting.\n \"\"\"\n pass\n\n__odrpack._set_exceptions(odr_error, odr_stop)\n\n\ndef _conv(obj, dtype=None):\n \"\"\" Convert an object to the preferred form for input to the odr routine.\n \"\"\"\n\n if obj is None:\n return obj\n else:\n if dtype is None:\n obj = numpy.asarray(obj)\n else:\n obj = numpy.asarray(obj, dtype)\n if obj.shape == ():\n # Scalar.\n return obj.dtype.type(obj)\n else:\n return obj\n\n\ndef _report_error(info):\n \"\"\" Interprets the return code of the odr routine.\n\n Parameters\n ----------\n info : int\n The return code of the odr routine.\n\n Returns\n -------\n problems : list(str)\n A list of messages about why the odr() routine stopped.\n \"\"\"\n\n stopreason = ('Blank',\n 'Sum of squares convergence',\n 'Parameter convergence',\n 'Both sum of squares and parameter convergence',\n 'Iteration limit reached')[info % 5]\n\n if info >= 5:\n # questionable results or fatal error\n\n I = (info//10000 % 10,\n info//1000 % 10,\n info//100 % 10,\n info//10 % 10,\n info % 10)\n problems = []\n\n if I[0] == 0:\n if I[1] != 0:\n problems.append('Derivatives possibly not correct')\n if I[2] != 0:\n problems.append('Error occurred in callback')\n if I[3] != 0:\n problems.append('Problem is not full rank at solution')\n problems.append(stopreason)\n elif I[0] == 1:\n if I[1] != 0:\n problems.append('N < 1')\n if I[2] != 0:\n problems.append('M < 1')\n if I[3] != 0:\n problems.append('NP < 1 or NP > N')\n if I[4] != 0:\n problems.append('NQ < 1')\n elif I[0] == 2:\n if I[1] != 0:\n problems.append('LDY and/or LDX incorrect')\n if I[2] != 0:\n problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')\n if I[3] != 0:\n problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')\n if I[4] != 0:\n problems.append('LWORK and/or LIWORK too small')\n elif I[0] == 3:\n if I[1] != 0:\n problems.append('STPB and/or STPD incorrect')\n if I[2] != 0:\n problems.append('SCLB and/or SCLD incorrect')\n if I[3] != 0:\n problems.append('WE incorrect')\n if I[4] != 0:\n problems.append('WD incorrect')\n elif I[0] == 4:\n problems.append('Error in derivatives')\n elif I[0] == 5:\n problems.append('Error occurred in callback')\n elif I[0] == 6:\n problems.append('Numerical error detected')\n\n return problems\n\n else:\n return [stopreason]\n\n\nclass Data(object):\n \"\"\"\n The data to fit.\n\n Parameters\n ----------\n x : array_like\n Input data for regression.\n y : array_like, optional\n Input data for regression.\n we : array_like, optional\n If `we` is a scalar, then that value is used for all data points (and\n all dimensions of the response variable).\n If `we` is a rank-1 array of length q (the dimensionality of the\n response variable), then this vector is the diagonal of the covariant\n weighting matrix for all data points.\n If `we` is a rank-1 array of length n (the number of data points), then\n the i'th element is the weight for the i'th response variable\n observation (single-dimensional only).\n If `we` is a rank-2 array of shape (q, q), then this is the full\n covariant weighting matrix broadcast to each observation.\n If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the\n diagonal of the covariant weighting matrix for the i'th observation.\n If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the\n full specification of the covariant weighting matrix for each\n observation.\n If the fit is implicit, then only a positive scalar value is used.\n wd : array_like, optional\n If `wd` is a scalar, then that value is used for all data points\n (and all dimensions of the input variable). If `wd` = 0, then the\n covariant weighting matrix for each observation is set to the identity\n matrix (so each dimension of each observation has the same weight).\n If `wd` is a rank-1 array of length m (the dimensionality of the input\n variable), then this vector is the diagonal of the covariant weighting\n matrix for all data points.\n If `wd` is a rank-1 array of length n (the number of data points), then\n the i'th element is the weight for the i'th input variable observation\n (single-dimensional only).\n If `wd` is a rank-2 array of shape (m, m), then this is the full\n covariant weighting matrix broadcast to each observation.\n If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the\n diagonal of the covariant weighting matrix for the i'th observation.\n If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the\n full specification of the covariant weighting matrix for each\n observation.\n fix : array_like of ints, optional\n The `fix` argument is the same as ifixx in the class ODR. It is an\n array of integers with the same shape as data.x that determines which\n input observations are treated as fixed. One can use a sequence of\n length m (the dimensionality of the input observations) to fix some\n dimensions for all observations. A value of 0 fixes the observation,\n a value > 0 makes it free.\n meta : dict, optional\n Free-form dictionary for metadata.\n\n Notes\n -----\n Each argument is attached to the member of the instance of the same name.\n The structures of `x` and `y` are described in the Model class docstring.\n If `y` is an integer, then the Data instance can only be used to fit with\n implicit models where the dimensionality of the response is equal to the\n specified value of `y`.\n\n The `we` argument weights the effect a deviation in the response variable\n has on the fit. The `wd` argument weights the effect a deviation in the\n input variable has on the fit. To handle multidimensional inputs and\n responses easily, the structure of these arguments has the n'th\n dimensional axis first. These arguments heavily use the structured\n arguments feature of ODRPACK to conveniently and flexibly support all\n options. See the ODRPACK User's Guide for a full explanation of how these\n weights are used in the algorithm. Basically, a higher value of the weight\n for a particular data point makes a deviation at that point more\n detrimental to the fit.\n\n \"\"\"\n\n def __init__(self, x, y=None, we=None, wd=None, fix=None, meta={}):\n self.x = _conv(x)\n self.y = _conv(y)\n self.we = _conv(we)\n self.wd = _conv(wd)\n self.fix = _conv(fix)\n self.meta = meta\n\n def set_meta(self, **kwds):\n \"\"\" Update the metadata dictionary with the keywords and data provided\n by keywords.\n\n Examples\n --------\n >>> data.set_meta(lab=\"Ph 7; Lab 26\", title=\"Ag110 + Ag108 Decay\")\n \"\"\"\n\n self.meta.update(kwds)\n\n def __getattr__(self, attr):\n \"\"\" Dispatch attribute access to the metadata dictionary.\n \"\"\"\n if attr in self.meta:\n return self.meta[attr]\n else:\n raise AttributeError(\"'%s' not in metadata\" % attr)\n\n\nclass RealData(Data):\n \"\"\"\n The data, with weightings as actual standard deviations and/or\n covariances.\n\n Parameters\n ----------\n x : array_like\n x\n y : array_like, optional\n y\n sx, sy : array_like, optional\n Standard deviations of `x`.\n `sx` are standard deviations of `x` and are converted to weights by\n dividing 1.0 by their squares.\n sy : array_like, optional\n Standard deviations of `y`.\n `sy` are standard deviations of `y` and are converted to weights by\n dividing 1.0 by their squares.\n covx : array_like, optional\n Covariance of `x`\n `covx` is an array of covariance matrices of `x` and are converted to\n weights by performing a matrix inversion on each observation's\n covariance matrix.\n covy : array_like, optional\n Covariance of `y`\n `covy` is an array of covariance matrices and are converted to\n weights by performing a matrix inversion on each observation's\n covariance matrix.\n fix : array_like, optional\n The argument and member fix is the same as Data.fix and ODR.ifixx:\n It is an array of integers with the same shape as `x` that\n determines which input observations are treated as fixed. One can\n use a sequence of length m (the dimensionality of the input\n observations) to fix some dimensions for all observations. A value\n of 0 fixes the observation, a value > 0 makes it free.\n meta : dict, optional\n Free-form dictionary for metadata.\n\n Notes\n -----\n The weights `wd` and `we` are computed from provided values as follows:\n\n `sx` and `sy` are converted to weights by dividing 1.0 by their squares.\n For example, ``wd = 1./numpy.power(`sx`, 2)``.\n\n `covx` and `covy` are arrays of covariance matrices and are converted to\n weights by performing a matrix inversion on each observation's covariance\n matrix. For example, ``we[i] = numpy.linalg.inv(covy[i])``.\n\n These arguments follow the same structured argument conventions as wd and\n we only restricted by their natures: `sx` and `sy` can't be rank-3, but\n `covx` and `covy` can be.\n\n Only set *either* `sx` or `covx` (not both). Setting both will raise an\n exception. Same with `sy` and `covy`.\n\n \"\"\"\n\n def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None,\n fix=None, meta={}):\n if (sx is not None) and (covx is not None):\n raise ValueError(\"cannot set both sx and covx\")\n if (sy is not None) and (covy is not None):\n raise ValueError(\"cannot set both sy and covy\")\n\n # Set flags for __getattr__\n self._ga_flags = {}\n if sx is not None:\n self._ga_flags['wd'] = 'sx'\n else:\n self._ga_flags['wd'] = 'covx'\n if sy is not None:\n self._ga_flags['we'] = 'sy'\n else:\n self._ga_flags['we'] = 'covy'\n\n self.x = _conv(x)\n self.y = _conv(y)\n self.sx = _conv(sx)\n self.sy = _conv(sy)\n self.covx = _conv(covx)\n self.covy = _conv(covy)\n self.fix = _conv(fix)\n self.meta = meta\n\n def _sd2wt(self, sd):\n \"\"\" Convert standard deviation to weights.\n \"\"\"\n\n return 1./numpy.power(sd, 2)\n\n def _cov2wt(self, cov):\n \"\"\" Convert covariance matrix(-ices) to weights.\n \"\"\"\n\n from numpy.dual import inv\n\n if len(cov.shape) == 2:\n return inv(cov)\n else:\n weights = numpy.zeros(cov.shape, float)\n\n for i in range(cov.shape[-1]): # n\n weights[:,:,i] = inv(cov[:,:,i])\n\n return weights\n\n def __getattr__(self, attr):\n lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx),\n ('wd', 'covx'): (self._cov2wt, self.covx),\n ('we', 'sy'): (self._sd2wt, self.sy),\n ('we', 'covy'): (self._cov2wt, self.covy)}\n\n if attr not in ('wd', 'we'):\n if attr in self.meta:\n return self.meta[attr]\n else:\n raise AttributeError(\"'%s' not in metadata\" % attr)\n else:\n func, arg = lookup_tbl[(attr, self._ga_flags[attr])]\n\n if arg is not None:\n return func(*(arg,))\n else:\n return None\n\n\nclass Model(object):\n \"\"\"\n The Model class stores information about the function you wish to fit.\n\n It stores the function itself, at the least, and optionally stores\n functions which compute the Jacobians used during fitting. Also, one\n can provide a function that will provide reasonable starting values\n for the fit parameters possibly given the set of data.\n\n Parameters\n ----------\n fcn : function\n fcn(beta, x) --> y\n fjacb : function\n Jacobian of fcn wrt the fit parameters beta.\n\n fjacb(beta, x) --> @f_i(x,B)/@B_j\n fjacd : function\n Jacobian of fcn wrt the (possibly multidimensional) input\n variable.\n\n fjacd(beta, x) --> @f_i(x,B)/@x_j\n extra_args : tuple, optional\n If specified, `extra_args` should be a tuple of extra\n arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called\n by `apply(fcn, (beta, x) + extra_args)`\n estimate : array_like of rank-1\n Provides estimates of the fit parameters from the data\n\n estimate(data) --> estbeta\n implicit : boolean\n If TRUE, specifies that the model\n is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit\n against\n meta : dict, optional\n freeform dictionary of metadata for the model\n\n Notes\n -----\n Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and\n return a NumPy array. The `estimate` object takes an instance of the\n Data class.\n\n Here are the rules for the shapes of the argument and return\n arrays of the callback functions:\n\n `x`\n if the input data is single-dimensional, then `x` is rank-1\n array; i.e. ``x = array([1, 2, 3, ...]); x.shape = (n,)``\n If the input data is multi-dimensional, then `x` is a rank-2 array;\n i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``.\n In all cases, it has the same shape as the input data array passed to\n `odr`. `m` is the dimensionality of the input data, `n` is the number\n of observations.\n `y`\n if the response variable is single-dimensional, then `y` is a\n rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``.\n If the response variable is multi-dimensional, then `y` is a rank-2\n array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape =\n (q, n)`` where `q` is the dimensionality of the response variable.\n `beta`\n rank-1 array of length `p` where `p` is the number of parameters;\n i.e. ``beta = array([B_1, B_2, ..., B_p])``\n `fjacb`\n if the response variable is multi-dimensional, then the\n return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] =\n d f_l(X,B)/d B_k`` evaluated at the i'th data point. If `q == 1`, then\n the return array is only rank-2 and with shape `(p, n)`.\n `fjacd`\n as with fjacb, only the return array's shape is `(q, m, n)`\n such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the i'th data\n point. If `q == 1`, then the return array's shape is `(m, n)`. If\n `m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`.\n\n \"\"\"\n\n def __init__(self, fcn, fjacb=None, fjacd=None,\n extra_args=None, estimate=None, implicit=0, meta=None):\n\n self.fcn = fcn\n self.fjacb = fjacb\n self.fjacd = fjacd\n\n if extra_args is not None:\n extra_args = tuple(extra_args)\n\n self.extra_args = extra_args\n self.estimate = estimate\n self.implicit = implicit\n self.meta = meta\n\n def set_meta(self, **kwds):\n \"\"\" Update the metadata dictionary with the keywords and data provided\n here.\n\n Examples\n --------\n set_meta(name=\"Exponential\", equation=\"y = a exp(b x) + c\")\n \"\"\"\n\n self.meta.update(kwds)\n\n def __getattr__(self, attr):\n \"\"\" Dispatch attribute access to the metadata.\n \"\"\"\n\n if attr in self.meta:\n return self.meta[attr]\n else:\n raise AttributeError(\"'%s' not in metadata\" % attr)\n\n\nclass Output(object):\n \"\"\"\n The Output class stores the output of an ODR run.\n\n Attributes\n ----------\n beta : ndarray\n Estimated parameter values, of shape (q,).\n sd_beta : ndarray\n Standard errors of the estimated parameters, of shape (p,).\n cov_beta : ndarray\n Covariance matrix of the estimated parameters, of shape (p,p).\n delta : ndarray, optional\n Array of estimated errors in input variables, of same shape as `x`.\n eps : ndarray, optional\n Array of estimated errors in response variables, of same shape as `y`.\n xplus : ndarray, optional\n Array of ``x + delta``.\n y : ndarray, optional\n Array ``y = fcn(x + delta)``.\n res_var : float, optional\n Residual variance.\n sum_sqare : float, optional\n Sum of squares error.\n sum_square_delta : float, optional\n Sum of squares of delta error.\n sum_square_eps : float, optional\n Sum of squares of eps error.\n inv_condnum : float, optional\n Inverse condition number (cf. ODRPACK UG p. 77).\n rel_error : float, optional\n Relative error in function values computed within fcn.\n work : ndarray, optional\n Final work array.\n work_ind : dict, optional\n Indices into work for drawing out values (cf. ODRPACK UG p. 83).\n info : int, optional\n Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38).\n stopreason : list of str, optional\n `info` interpreted into English.\n\n Notes\n -----\n Takes one argument for initialization, the return value from the\n function `odr`. The attributes listed as \"optional\" above are only\n present if `odr` was run with ``full_output=1``.\n\n \"\"\"\n\n def __init__(self, output):\n self.beta = output[0]\n self.sd_beta = output[1]\n self.cov_beta = output[2]\n\n if len(output) == 4:\n # full output\n self.__dict__.update(output[3])\n self.stopreason = _report_error(self.info)\n\n def pprint(self):\n \"\"\" Pretty-print important results.\n \"\"\"\n\n print('Beta:', self.beta)\n print('Beta Std Error:', self.sd_beta)\n print('Beta Covariance:', self.cov_beta)\n if hasattr(self, 'info'):\n print('Residual Variance:',self.res_var)\n print('Inverse Condition #:', self.inv_condnum)\n print('Reason(s) for Halting:')\n for r in self.stopreason:\n print(' %s' % r)\n\n\nclass ODR(object):\n \"\"\"\n The ODR class gathers all information and coordinates the running of the\n main fitting routine.\n\n Members of instances of the ODR class have the same names as the arguments\n to the initialization routine.\n\n Parameters\n ----------\n data : Data class instance\n instance of the Data class\n model : Model class instance\n instance of the Model class\n\n Other Parameters\n ----------------\n beta0 : array_like of rank-1\n a rank-1 sequence of initial parameter values. Optional if\n model provides an \"estimate\" function to estimate these values.\n delta0 : array_like of floats of rank-1, optional\n a (double-precision) float array to hold the initial values of\n the errors in the input variables. Must be same shape as data.x\n ifixb : array_like of ints of rank-1, optional\n sequence of integers with the same length as beta0 that determines\n which parameters are held fixed. A value of 0 fixes the parameter,\n a value > 0 makes the parameter free.\n ifixx : array_like of ints with same shape as data.x, optional\n an array of integers with the same shape as data.x that determines\n which input observations are treated as fixed. One can use a sequence\n of length m (the dimensionality of the input observations) to fix some\n dimensions for all observations. A value of 0 fixes the observation,\n a value > 0 makes it free.\n job : int, optional\n an integer telling ODRPACK what tasks to perform. See p. 31 of the\n ODRPACK User's Guide if you absolutely must set the value here. Use the\n method set_job post-initialization for a more readable interface.\n iprint : int, optional\n an integer telling ODRPACK what to print. See pp. 33-34 of the\n ODRPACK User's Guide if you absolutely must set the value here. Use the\n method set_iprint post-initialization for a more readable interface.\n errfile : str, optional\n string with the filename to print ODRPACK errors to. *Do Not Open\n This File Yourself!*\n rptfile : str, optional\n string with the filename to print ODRPACK summaries to. *Do Not\n Open This File Yourself!*\n ndigit : int, optional\n integer specifying the number of reliable digits in the computation\n of the function.\n taufac : float, optional\n float specifying the initial trust region. The default value is 1.\n The initial trust region is equal to taufac times the length of the\n first computed Gauss-Newton step. taufac must be less than 1.\n sstol : float, optional\n float specifying the tolerance for convergence based on the relative\n change in the sum-of-squares. The default value is eps**(1/2) where eps\n is the smallest value such that 1 + eps > 1 for double precision\n computation on the machine. sstol must be less than 1.\n partol : float, optional\n float specifying the tolerance for convergence based on the relative\n change in the estimated parameters. The default value is eps**(2/3) for\n explicit models and ``eps**(1/3)`` for implicit models. partol must be less\n than 1.\n maxit : int, optional\n integer specifying the maximum number of iterations to perform. For\n first runs, maxit is the total number of iterations performed and\n defaults to 50. For restarts, maxit is the number of additional\n iterations to perform and defaults to 10.\n stpb : array_like, optional\n sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute\n finite difference derivatives wrt the parameters.\n stpd : optional\n array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative\n step sizes to compute finite difference derivatives wrt the input\n variable errors. If stpd is a rank-1 array with length m (the\n dimensionality of the input variable), then the values are broadcast to\n all observations.\n sclb : array_like, optional\n sequence (``len(stpb) == len(beta0)``) of scaling factors for the\n parameters. The purpose of these scaling factors are to scale all of\n the parameters to around unity. Normally appropriate scaling factors\n are computed if this argument is not specified. Specify them yourself\n if the automatic procedure goes awry.\n scld : array_like, optional\n array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling\n factors for the *errors* in the input variables. Again, these factors\n are automatically computed if you do not provide them. If scld.shape ==\n (m,), then the scaling factors are broadcast to all observations.\n work : ndarray, optional\n array to hold the double-valued working data for ODRPACK. When\n restarting, takes the value of self.output.work.\n iwork : ndarray, optional\n array to hold the integer-valued working data for ODRPACK. When\n restarting, takes the value of self.output.iwork.\n\n Attributes\n ----------\n data : Data\n The data for this fit\n model : Model\n The model used in fit\n output : Output\n An instance if the Output class containing all of the returned\n data from an invocation of ODR.run() or ODR.restart()\n\n \"\"\"\n\n def __init__(self, data, model, beta0=None, delta0=None, ifixb=None,\n ifixx=None, job=None, iprint=None, errfile=None, rptfile=None,\n ndigit=None, taufac=None, sstol=None, partol=None, maxit=None,\n stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None):\n\n self.data = data\n self.model = model\n\n if beta0 is None:\n if self.model.estimate is not None:\n self.beta0 = _conv(self.model.estimate(self.data))\n else:\n raise ValueError(\n \"must specify beta0 or provide an estimater with the model\"\n )\n else:\n self.beta0 = _conv(beta0)\n\n self.delta0 = _conv(delta0)\n # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit\n # platforms.\n # XXX: some other FORTRAN compilers may not agree.\n self.ifixx = _conv(ifixx, dtype=numpy.int32)\n self.ifixb = _conv(ifixb, dtype=numpy.int32)\n self.job = job\n self.iprint = iprint\n self.errfile = errfile\n self.rptfile = rptfile\n self.ndigit = ndigit\n self.taufac = taufac\n self.sstol = sstol\n self.partol = partol\n self.maxit = maxit\n self.stpb = _conv(stpb)\n self.stpd = _conv(stpd)\n self.sclb = _conv(sclb)\n self.scld = _conv(scld)\n self.work = _conv(work)\n self.iwork = _conv(iwork)\n\n self.output = None\n\n self._check()\n\n def _check(self):\n \"\"\" Check the inputs for consistency, but don't bother checking things\n that the builtin function odr will check.\n \"\"\"\n\n x_s = list(self.data.x.shape)\n\n if isinstance(self.data.y, numpy.ndarray):\n y_s = list(self.data.y.shape)\n if self.model.implicit:\n raise odr_error(\"an implicit model cannot use response data\")\n else:\n # implicit model with q == self.data.y\n y_s = [self.data.y, x_s[-1]]\n if not self.model.implicit:\n raise odr_error(\"an explicit model needs response data\")\n self.set_job(fit_type=1)\n\n if x_s[-1] != y_s[-1]:\n raise odr_error(\"number of observations do not match\")\n\n n = x_s[-1]\n\n if len(x_s) == 2:\n m = x_s[0]\n else:\n m = 1\n if len(y_s) == 2:\n q = y_s[0]\n else:\n q = 1\n\n p = len(self.beta0)\n\n # permissible output array shapes\n\n fcn_perms = [(q, n)]\n fjacd_perms = [(q, m, n)]\n fjacb_perms = [(q, p, n)]\n\n if q == 1:\n fcn_perms.append((n,))\n fjacd_perms.append((m, n))\n fjacb_perms.append((p, n))\n if m == 1:\n fjacd_perms.append((q, n))\n if p == 1:\n fjacb_perms.append((q, n))\n if m == q == 1:\n fjacd_perms.append((n,))\n if p == q == 1:\n fjacb_perms.append((n,))\n\n # try evaluating the supplied functions to make sure they provide\n # sensible outputs\n\n arglist = (self.beta0, self.data.x)\n if self.model.extra_args is not None:\n arglist = arglist + self.model.extra_args\n res = self.model.fcn(*arglist)\n\n if res.shape not in fcn_perms:\n print(res.shape)\n print(fcn_perms)\n raise odr_error(\"fcn does not output %s-shaped array\" % y_s)\n\n if self.model.fjacd is not None:\n res = self.model.fjacd(*arglist)\n if res.shape not in fjacd_perms:\n raise odr_error(\n \"fjacd does not output %s-shaped array\" % (q, m, n))\n if self.model.fjacb is not None:\n res = self.model.fjacb(*arglist)\n if res.shape not in fjacb_perms:\n raise odr_error(\n \"fjacb does not output %s-shaped array\" % (q, p, n))\n\n # check shape of delta0\n\n if self.delta0 is not None and self.delta0.shape != self.data.x.shape:\n raise odr_error(\n \"delta0 is not a %s-shaped array\" % self.data.x.shape)\n\n def _gen_work(self):\n \"\"\" Generate a suitable work array if one does not already exist.\n \"\"\"\n\n n = self.data.x.shape[-1]\n p = self.beta0.shape[0]\n\n if len(self.data.x.shape) == 2:\n m = self.data.x.shape[0]\n else:\n m = 1\n\n if self.model.implicit:\n q = self.data.y\n elif len(self.data.y.shape) == 2:\n q = self.data.y.shape[0]\n else:\n q = 1\n\n if self.data.we is None:\n ldwe = ld2we = 1\n elif len(self.data.we.shape) == 3:\n ld2we, ldwe = self.data.we.shape[1:]\n else:\n # Okay, this isn't precisely right, but for this calculation,\n # it's fine\n ldwe = 1\n ld2we = self.data.we.shape[1]\n\n if self.job % 10 < 2:\n # ODR not OLS\n lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p +\n 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q)\n else:\n # OLS not ODR\n lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p +\n 5*q + q*(p+m) + ldwe*ld2we*q)\n\n if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\\\n and self.work.dtype.str.endswith('f8'):\n # the existing array is fine\n return\n else:\n self.work = numpy.zeros((lwork,), float)\n\n def set_job(self, fit_type=None, deriv=None, var_calc=None,\n del_init=None, restart=None):\n \"\"\"\n Sets the \"job\" parameter is a hopefully comprehensible way.\n\n If an argument is not specified, then the value is left as is. The\n default value from class initialization is for all of these options set\n to 0.\n\n Parameters\n ----------\n fit_type : {0, 1, 2} int\n 0 -> explicit ODR\n\n 1 -> implicit ODR\n\n 2 -> ordinary least-squares\n deriv : {0, 1, 2, 3} int\n 0 -> forward finite differences\n\n 1 -> central finite differences\n\n 2 -> user-supplied derivatives (Jacobians) with results\n checked by ODRPACK\n\n 3 -> user-supplied derivatives, no checking\n var_calc : {0, 1, 2} int\n 0 -> calculate asymptotic covariance matrix and fit\n parameter uncertainties (V_B, s_B) using derivatives\n recomputed at the final solution\n\n 1 -> calculate V_B and s_B using derivatives from last iteration\n\n 2 -> do not calculate V_B and s_B\n del_init : {0, 1} int\n 0 -> initial input variable offsets set to 0\n\n 1 -> initial offsets provided by user in variable \"work\"\n restart : {0, 1} int\n 0 -> fit is not a restart\n\n 1 -> fit is a restart\n\n Notes\n -----\n The permissible values are different from those given on pg. 31 of the\n ODRPACK User's Guide only in that one cannot specify numbers greater than\n the last value for each variable.\n\n If one does not supply functions to compute the Jacobians, the fitting\n procedure will change deriv to 0, finite differences, as a default. To\n initialize the input variable offsets by yourself, set del_init to 1 and\n put the offsets into the \"work\" variable correctly.\n\n \"\"\"\n\n if self.job is None:\n job_l = [0, 0, 0, 0, 0]\n else:\n job_l = [self.job // 10000 % 10,\n self.job // 1000 % 10,\n self.job // 100 % 10,\n self.job // 10 % 10,\n self.job % 10]\n\n if fit_type in (0, 1, 2):\n job_l[4] = fit_type\n if deriv in (0, 1, 2, 3):\n job_l[3] = deriv\n if var_calc in (0, 1, 2):\n job_l[2] = var_calc\n if del_init in (0, 1):\n job_l[1] = del_init\n if restart in (0, 1):\n job_l[0] = restart\n\n self.job = (job_l[0]*10000 + job_l[1]*1000 +\n job_l[2]*100 + job_l[3]*10 + job_l[4])\n\n def set_iprint(self, init=None, so_init=None,\n iter=None, so_iter=None, iter_step=None, final=None, so_final=None):\n \"\"\" Set the iprint parameter for the printing of computation reports.\n\n If any of the arguments are specified here, then they are set in the\n iprint member. If iprint is not set manually or with this method, then\n ODRPACK defaults to no printing. If no filename is specified with the\n member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to\n print to stdout in addition to the specified filename by setting the\n so_* arguments to this function, but one cannot specify to print to\n stdout but not a file since one can do that by not specifying a rptfile\n filename.\n\n There are three reports: initialization, iteration, and final reports.\n They are represented by the arguments init, iter, and final\n respectively. The permissible values are 0, 1, and 2 representing \"no\n report\", \"short report\", and \"long report\" respectively.\n\n The argument iter_step (0 <= iter_step <= 9) specifies how often to make\n the iteration report; the report will be made for every iter_step'th\n iteration starting with iteration one. If iter_step == 0, then no\n iteration report is made, regardless of the other arguments.\n\n If the rptfile is None, then any so_* arguments supplied will raise an\n exception.\n \"\"\"\n if self.iprint is None:\n self.iprint = 0\n\n ip = [self.iprint // 1000 % 10,\n self.iprint // 100 % 10,\n self.iprint // 10 % 10,\n self.iprint % 10]\n\n # make a list to convert iprint digits to/from argument inputs\n # rptfile, stdout\n ip2arg = [[0, 0], # none, none\n [1, 0], # short, none\n [2, 0], # long, none\n [1, 1], # short, short\n [2, 1], # long, short\n [1, 2], # short, long\n [2, 2]] # long, long\n\n if (self.rptfile is None and\n (so_init is not None or\n so_iter is not None or\n so_final is not None)):\n raise odr_error(\n \"no rptfile specified, cannot output to stdout twice\")\n\n iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]]\n\n if init is not None:\n iprint_l[0] = init\n if so_init is not None:\n iprint_l[1] = so_init\n if iter is not None:\n iprint_l[2] = iter\n if so_iter is not None:\n iprint_l[3] = so_iter\n if final is not None:\n iprint_l[4] = final\n if so_final is not None:\n iprint_l[5] = so_final\n\n if iter_step in range(10):\n # 0..9\n ip[2] = iter_step\n\n ip[0] = ip2arg.index(iprint_l[0:2])\n ip[1] = ip2arg.index(iprint_l[2:4])\n ip[3] = ip2arg.index(iprint_l[4:6])\n\n self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3]\n\n def run(self):\n \"\"\" Run the fitting routine with all of the information given.\n\n Returns\n -------\n output : Output instance\n This object is also assigned to the attribute .output .\n \"\"\"\n\n args = (self.model.fcn, self.beta0, self.data.y, self.data.x)\n kwds = {'full_output': 1}\n kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile',\n 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb',\n 'stpd', 'sclb', 'scld', 'work', 'iwork']\n\n if self.delta0 is not None and self.job % 1000 // 10 == 1:\n # delta0 provided and fit is not a restart\n self._gen_work()\n\n d0 = numpy.ravel(self.delta0)\n\n self.work[:len(d0)] = d0\n\n # set the kwds from other objects explicitly\n if self.model.fjacb is not None:\n kwds['fjacb'] = self.model.fjacb\n if self.model.fjacd is not None:\n kwds['fjacd'] = self.model.fjacd\n if self.data.we is not None:\n kwds['we'] = self.data.we\n if self.data.wd is not None:\n kwds['wd'] = self.data.wd\n if self.model.extra_args is not None:\n kwds['extra_args'] = self.model.extra_args\n\n # implicitly set kwds from self's members\n for attr in kwd_l:\n obj = getattr(self, attr)\n if obj is not None:\n kwds[attr] = obj\n\n self.output = Output(odr(*args, **kwds))\n\n return self.output\n\n def restart(self, iter=None):\n \"\"\" Restarts the run with iter more iterations.\n\n Parameters\n ----------\n iter : int, optional\n ODRPACK's default for the number of new iterations is 10.\n\n Returns\n -------\n output : Output instance\n This object is also assigned to the attribute .output .\n \"\"\"\n\n if self.output is None:\n raise odr_error(\"cannot restart: run() has not been called before\")\n\n self.set_job(restart=1)\n self.work = self.output.work\n self.iwork = self.output.iwork\n\n self.maxit = iter\n\n return self.run()\n",
"\"\"\"\nLaplacian of a compressed-sparse graph\n\"\"\"\n\n# Authors: Aric Hagberg <[email protected]>\n# Gael Varoquaux <[email protected]>\n# Jake Vanderplas <[email protected]>\n# License: BSD\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom scipy.sparse import isspmatrix, coo_matrix\n\n\n###############################################################################\n# Graph laplacian\ndef laplacian(csgraph, normed=False, return_diag=False):\n \"\"\" Return the Laplacian matrix of a directed graph.\n\n For non-symmetric graphs the out-degree is used in the computation.\n\n Parameters\n ----------\n csgraph : array_like or sparse matrix, 2 dimensions\n compressed-sparse graph, with shape (N, N).\n normed : bool, optional\n If True, then compute normalized Laplacian.\n return_diag : bool, optional\n If True, then return diagonal as well as laplacian.\n\n Returns\n -------\n lap : ndarray\n The N x N laplacian matrix of graph.\n diag : ndarray\n The length-N diagonal of the laplacian matrix.\n diag is returned only if return_diag is True.\n\n Notes\n -----\n The Laplacian matrix of a graph is sometimes referred to as the\n \"Kirchoff matrix\" or the \"admittance matrix\", and is useful in many\n parts of spectral graph theory. In particular, the eigen-decomposition\n of the laplacian matrix can give insight into many properties of the graph.\n\n For non-symmetric directed graphs, the laplacian is computed using the\n out-degree of each node.\n\n Examples\n --------\n >>> from scipy.sparse import csgraph\n >>> G = np.arange(5) * np.arange(5)[:, np.newaxis]\n >>> G\n array([[ 0, 0, 0, 0, 0],\n [ 0, 1, 2, 3, 4],\n [ 0, 2, 4, 6, 8],\n [ 0, 3, 6, 9, 12],\n [ 0, 4, 8, 12, 16]])\n >>> csgraph.laplacian(G, normed=False)\n array([[ 0, 0, 0, 0, 0],\n [ 0, 9, -2, -3, -4],\n [ 0, -2, 16, -6, -8],\n [ 0, -3, -6, 21, -12],\n [ 0, -4, -8, -12, 24]])\n \"\"\"\n if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:\n raise ValueError('csgraph must be a square matrix or array')\n\n if normed and (np.issubdtype(csgraph.dtype, np.int)\n or np.issubdtype(csgraph.dtype, np.uint)):\n csgraph = csgraph.astype(np.float)\n\n if isspmatrix(csgraph):\n return _laplacian_sparse(csgraph, normed=normed,\n return_diag=return_diag)\n else:\n return _laplacian_dense(csgraph, normed=normed,\n return_diag=return_diag)\n\n\ndef _laplacian_sparse(graph, normed=False, return_diag=False):\n n_nodes = graph.shape[0]\n if not graph.format == 'coo':\n lap = (-graph).tocoo()\n else:\n lap = -graph.copy()\n diag_mask = (lap.row == lap.col)\n if not diag_mask.sum() == n_nodes:\n # The sparsity pattern of the matrix has holes on the diagonal,\n # we need to fix that\n diag_idx = lap.row[diag_mask]\n diagonal_holes = list(set(range(n_nodes)).difference(\n diag_idx))\n new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])\n new_row = np.concatenate([lap.row, diagonal_holes])\n new_col = np.concatenate([lap.col, diagonal_holes])\n lap = coo_matrix((new_data, (new_row, new_col)), shape=lap.shape)\n diag_mask = (lap.row == lap.col)\n\n lap.data[diag_mask] = 0\n w = -np.asarray(lap.sum(axis=1)).squeeze()\n if normed:\n w = np.sqrt(w)\n w_zeros = (w == 0)\n w[w_zeros] = 1\n lap.data /= w[lap.row]\n lap.data /= w[lap.col]\n lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(lap.data.dtype)\n else:\n lap.data[diag_mask] = w[lap.row[diag_mask]]\n\n if return_diag:\n return lap, w\n return lap\n\n\ndef _laplacian_dense(graph, normed=False, return_diag=False):\n n_nodes = graph.shape[0]\n lap = -np.asarray(graph) # minus sign leads to a copy\n\n # set diagonal to zero\n lap.flat[::n_nodes + 1] = 0\n w = -lap.sum(axis=0)\n if normed:\n w = np.sqrt(w)\n w_zeros = (w == 0)\n w[w_zeros] = 1\n lap /= w\n lap /= w[:, np.newaxis]\n lap.flat[::n_nodes + 1] = 1 - w_zeros\n else:\n lap.flat[::n_nodes + 1] = w\n\n if return_diag:\n return lap, w\n return lap\n",
"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom numpy.testing import assert_equal, assert_array_equal, assert_allclose, \\\n run_module_suite\n\nfrom scipy.interpolate import griddata\n\n\nclass TestGriddata(object):\n def test_fill_value(self):\n x = [(0,0), (0,1), (1,0)]\n y = [1, 2, 3]\n\n yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)\n assert_array_equal(yi, [-1., -1, 1])\n\n yi = griddata(x, y, [(1,1), (1,2), (0,0)])\n assert_array_equal(yi, [np.nan, np.nan, 1])\n\n def test_alternative_call(self):\n x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],\n dtype=np.double)\n y = (np.arange(x.shape[0], dtype=np.double)[:,None]\n + np.array([0,1])[None,:])\n\n for method in ('nearest', 'linear', 'cubic'):\n yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method)\n assert_allclose(y, yi, atol=1e-14, err_msg=method)\n\n def test_multivalue_2d(self):\n x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],\n dtype=np.double)\n y = (np.arange(x.shape[0], dtype=np.double)[:,None]\n + np.array([0,1])[None,:])\n\n for method in ('nearest', 'linear', 'cubic'):\n yi = griddata(x, y, x, method=method)\n assert_allclose(y, yi, atol=1e-14, err_msg=method)\n\n def test_multipoint_2d(self):\n x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n\n xi = x[:,None,:] + np.array([0,0,0])[None,:,None]\n\n for method in ('nearest', 'linear', 'cubic'):\n yi = griddata(x, y, xi, method=method)\n\n assert_equal(yi.shape, (5, 3), err_msg=method)\n assert_allclose(yi, np.tile(y[:,None], (1, 3)),\n atol=1e-14, err_msg=method)\n\n def test_complex_2d(self):\n x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],\n dtype=np.double)\n y = np.arange(x.shape[0], dtype=np.double)\n y = y - 2j*y[::-1]\n\n xi = x[:,None,:] + np.array([0,0,0])[None,:,None]\n\n for method in ('nearest', 'linear', 'cubic'):\n yi = griddata(x, y, xi, method=method)\n\n assert_equal(yi.shape, (5, 3), err_msg=method)\n assert_allclose(yi, np.tile(y[:,None], (1, 3)),\n atol=1e-14, err_msg=method)\n\n def test_1d(self):\n x = np.array([1, 2.5, 3, 4.5, 5, 6])\n y = np.array([1, 2, 0, 3.9, 2, 1])\n\n for method in ('nearest', 'linear', 'cubic'):\n assert_allclose(griddata(x, y, x, method=method), y,\n err_msg=method, atol=1e-14)\n assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,\n err_msg=method, atol=1e-14)\n assert_allclose(griddata((x,), y, (x,), method=method), y,\n err_msg=method, atol=1e-14)\n\n def test_1d_unsorted(self):\n x = np.array([2.5, 1, 4.5, 5, 6, 3])\n y = np.array([1, 2, 0, 3.9, 2, 1])\n\n for method in ('nearest', 'linear', 'cubic'):\n assert_allclose(griddata(x, y, x, method=method), y,\n err_msg=method, atol=1e-10)\n assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,\n err_msg=method, atol=1e-10)\n assert_allclose(griddata((x,), y, (x,), method=method), y,\n err_msg=method, atol=1e-10)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"from __future__ import division, absolute_import, print_function\n\nfrom .info import __doc__\nfrom numpy.version import version as __version__\n\nfrom . import multiarray\nfrom . import umath\nfrom . import _internal # for freeze programs\nfrom . import numerictypes as nt\nmultiarray.set_typeDict(nt.sctypeDict)\nfrom . import numeric\nfrom .numeric import *\nfrom . import fromnumeric\nfrom .fromnumeric import *\nfrom . import defchararray as char\nfrom . import records as rec\nfrom .records import *\nfrom .memmap import *\nfrom .defchararray import chararray\nfrom . import scalarmath\nfrom . import function_base\nfrom .function_base import *\nfrom . import machar\nfrom .machar import *\nfrom . import getlimits\nfrom .getlimits import *\nfrom . import shape_base\nfrom .shape_base import *\ndel nt\n\nfrom .fromnumeric import amax as max, amin as min, \\\n round_ as round\nfrom .numeric import absolute as abs\n\n__all__ = ['char', 'rec', 'memmap']\n__all__ += numeric.__all__\n__all__ += fromnumeric.__all__\n__all__ += rec.__all__\n__all__ += ['chararray']\n__all__ += function_base.__all__\n__all__ += machar.__all__\n__all__ += getlimits.__all__\n__all__ += shape_base.__all__\n\n\nfrom numpy.testing import Tester\ntest = Tester().test\nbench = Tester().bench\n\n# Make it possible so that ufuncs can be pickled\n# Here are the loading and unloading functions\n# The name numpy.core._ufunc_reconstruct must be\n# available for unpickling to work.\ndef _ufunc_reconstruct(module, name):\n mod = __import__(module)\n return getattr(mod, name)\n\ndef _ufunc_reduce(func):\n from pickle import whichmodule\n name = func.__name__\n return _ufunc_reconstruct, (whichmodule(func, name), name)\n\n\nimport sys\nif sys.version_info[0] >= 3:\n import copyreg\nelse:\n import copy_reg as copyreg\n\ncopyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)\n# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)\ndel copyreg\ndel sys\ndel _ufunc_reduce\n",
"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc, isspmatrix_csr\nfrom ._tools import csgraph_to_dense, csgraph_from_dense,\\\n csgraph_masked_from_dense, csgraph_from_masked\n\nDTYPE = np.float64\n\n\ndef validate_graph(csgraph, directed, dtype=DTYPE,\n csr_output=True, dense_output=True,\n copy_if_dense=False, copy_if_sparse=False,\n null_value_in=0, null_value_out=np.inf,\n infinity_null=True, nan_null=True):\n \"\"\"Routine for validation and conversion of csgraph inputs\"\"\"\n if not (csr_output or dense_output):\n raise ValueError(\"Internal: dense or csr output must be true\")\n\n # if undirected and csc storage, then transposing in-place\n # is quicker than later converting to csr.\n if (not directed) and isspmatrix_csc(csgraph):\n csgraph = csgraph.T\n\n if isspmatrix(csgraph):\n if csr_output:\n csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)\n else:\n csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)\n elif np.ma.is_masked(csgraph):\n if dense_output:\n mask = csgraph.mask\n csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)\n csgraph[mask] = null_value_out\n else:\n csgraph = csgraph_from_masked(csgraph)\n else:\n if dense_output:\n csgraph = csgraph_masked_from_dense(csgraph,\n copy=copy_if_dense,\n null_value=null_value_in,\n nan_null=nan_null,\n infinity_null=infinity_null)\n mask = csgraph.mask\n csgraph = np.asarray(csgraph.data, dtype=DTYPE)\n csgraph[mask] = null_value_out\n else:\n csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,\n infinity_null=infinity_null,\n nan_null=nan_null)\n\n if csgraph.ndim != 2:\n raise ValueError(\"compressed-sparse graph must be two dimensional\")\n\n if csgraph.shape[0] != csgraph.shape[1]:\n raise ValueError(\"compressed-sparse graph must be shape (N, N)\")\n\n return csgraph\n",
"from __future__ import division, print_function\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration('fft', parent_package, top_path)\n\n config.add_data_dir('tests')\n\n # Configure fftpack_lite\n config.add_extension('fftpack_lite',\n sources=['fftpack_litemodule.c', 'fftpack.c']\n )\n\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n",
"#!/usr/bin/env python\n\"\"\"Tests for the linalg.isolve.lgmres module\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom numpy.testing import TestCase, assert_\n\nfrom numpy import zeros, array, allclose\nfrom scipy.linalg import norm\nfrom scipy.sparse import csr_matrix\n\nfrom scipy.sparse.linalg.interface import LinearOperator\nfrom scipy.sparse.linalg import splu\nfrom scipy.sparse.linalg.isolve import lgmres\n\nAm = csr_matrix(array([[-2,1,0,0,0,9],\n [1,-2,1,0,5,0],\n [0,1,-2,1,0,0],\n [0,0,1,-2,1,0],\n [0,3,0,1,-2,1],\n [1,0,0,0,1,-2]]))\nb = array([1,2,3,4,5,6])\ncount = [0]\n\n\ndef matvec(v):\n count[0] += 1\n return Am*v\nA = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)\n\n\ndef do_solve(**kw):\n count[0] = 0\n x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=6, tol=1e-14, **kw)\n count_0 = count[0]\n assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))\n return x0, count_0\n\n\nclass TestLGMRES(TestCase):\n def test_preconditioner(self):\n # Check that preconditioning works\n pc = splu(Am.tocsc())\n M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)\n\n x0, count_0 = do_solve()\n x1, count_1 = do_solve(M=M)\n\n assert_(count_1 == 3)\n assert_(count_1 < count_0/2)\n assert_(allclose(x1, x0, rtol=1e-14))\n\n def test_outer_v(self):\n # Check that the augmentation vectors behave as expected\n\n outer_v = []\n x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)\n assert_(len(outer_v) > 0)\n assert_(len(outer_v) <= 6)\n\n x1, count_1 = do_solve(outer_k=6, outer_v=outer_v)\n assert_(count_1 == 2, count_1)\n assert_(count_1 < count_0/2)\n assert_(allclose(x1, x0, rtol=1e-14))\n\n # ---\n\n outer_v = []\n x0, count_0 = do_solve(outer_k=6, outer_v=outer_v, store_outer_Av=False)\n assert_(array([v[1] is None for v in outer_v]).all())\n assert_(len(outer_v) > 0)\n assert_(len(outer_v) <= 6)\n\n x1, count_1 = do_solve(outer_k=6, outer_v=outer_v)\n assert_(count_1 == 3, count_1)\n assert_(count_1 < count_0/2)\n assert_(allclose(x1, x0, rtol=1e-14))\n\nif __name__ == \"__main__\":\n import nose\n nose.run(argv=['', __file__])\n",
"\"\"\"\nSparse matrix functions\n\"\"\"\n\n#\n# Authors: Travis Oliphant, March 2002\n# Anthony Scopatz, August 2012 (Sparse Updates)\n# Jake Vanderplas, August 2012 (Sparse Updates)\n#\n\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = ['expm', 'inv']\n\nimport math\n\nfrom numpy import asarray, dot, eye, ceil, log2\nfrom numpy import matrix as mat\nimport numpy as np\n\nimport scipy.misc\nfrom scipy.linalg.misc import norm\nfrom scipy.linalg.basic import solve, solve_triangular, inv\n\nfrom scipy.sparse.base import isspmatrix\nfrom scipy.sparse.construct import eye as speye\nfrom scipy.sparse.linalg import spsolve\n\nimport scipy.sparse\nimport scipy.sparse.linalg\nfrom scipy.sparse.linalg.interface import LinearOperator\n\nUPPER_TRIANGULAR = 'upper_triangular'\n\n\ndef inv(A):\n \"\"\"\n Compute the inverse of a sparse matrix\n\n .. versionadded:: 0.12.0\n\n Parameters\n ----------\n A : (M,M) ndarray or sparse matrix\n square matrix to be inverted\n\n Returns\n -------\n Ainv : (M,M) ndarray or sparse matrix\n inverse of `A`\n\n Notes\n -----\n This computes the sparse inverse of `A`. If the inverse of `A` is expected\n to be non-sparse, it will likely be faster to convert `A` to dense and use\n scipy.linalg.inv.\n\n \"\"\"\n I = speye(A.shape[0], A.shape[1], dtype=A.dtype, format=A.format)\n Ainv = spsolve(A, I)\n return Ainv\n\n\ndef _exact_1_norm(A):\n # A compatibility function which should eventually disappear.\n # This is copypasted from expm_action.\n if scipy.sparse.isspmatrix(A):\n return max(abs(A).sum(axis=0).flat)\n else:\n return np.linalg.norm(A, 1)\n\n\ndef _ident_like(A):\n # A compatibility function which should eventually disappear.\n # This is copypasted from expm_action.\n if scipy.sparse.isspmatrix(A):\n return scipy.sparse.construct.eye(A.shape[0], A.shape[1],\n dtype=A.dtype, format=A.format)\n else:\n return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)\n\n\ndef _count_nonzero(A):\n # A compatibility function which should eventually disappear.\n #XXX There should be a better way to do this when A is sparse\n # in the traditional sense.\n if isspmatrix(A):\n return np.sum(A.toarray() != 0)\n else:\n return np.sum(A != 0)\n\n\ndef _is_upper_triangular(A):\n # This function could possibly be of wider interest.\n if isspmatrix(A):\n lower_part = scipy.sparse.tril(A, -1)\n if lower_part.nnz == 0:\n # structural upper triangularity\n return True\n else:\n # coincidental upper triangularity\n return _count_nonzero(lower_part) == 0\n else:\n return _count_nonzero(np.tril(A, -1)) == 0\n\n\nclass MatrixPowerOperator(LinearOperator):\n\n def __init__(self, A, p):\n if A.ndim != 2 or A.shape[0] != A.shape[1]:\n raise ValueError('expected A to be like a square matrix')\n if p < 0:\n raise ValueError('expected p to be a non-negative integer')\n self._A = A\n self._p = p\n self.ndim = A.ndim\n self.shape = A.shape\n\n def matvec(self, x):\n for i in range(self._p):\n x = self._A.dot(x)\n return x\n\n def rmatvec(self, x):\n for i in range(self._p):\n x = x.dot(self._A)\n return x\n\n def matmat(self, X):\n for i in range(self._p):\n X = self._A.dot(X)\n return X\n\n @property\n def T(self):\n return MatrixPowerOperator(self._A.T, self._p)\n\n\nclass ProductOperator(LinearOperator):\n \"\"\"\n For now, this is limited to products of multiple square matrices.\n \"\"\"\n\n def __init__(self, *args):\n for A in args:\n if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n raise ValueError(\n 'For now, the ProductOperator implementation is '\n 'limited to the product of multiple square matrices.')\n if args:\n n = args[0].shape[0]\n for A in args:\n for d in A.shape:\n if d != n:\n raise ValueError(\n 'The square matrices of the ProductOperator '\n 'must all have the same shape.')\n self.shape = (n, n)\n self.ndim = len(self.shape)\n self._operator_sequence = args\n\n def matvec(self, x):\n for A in reversed(self._operator_sequence):\n x = A.dot(x)\n return x\n\n def rmatvec(self, x):\n for A in self._operator_sequence:\n x = x.dot(A)\n return x\n\n def matmat(self, X):\n for A in reversed(self._operator_sequence):\n X = A.dot(X)\n return X\n\n @property\n def T(self):\n T_args = [A.T for A in reversed(self._operator_sequence)]\n return ProductOperator(*T_args)\n\n\ndef _onenormest_matrix_power(A, p,\n t=2, itmax=5, compute_v=False, compute_w=False):\n \"\"\"\n Efficiently estimate the 1-norm of A^p.\n\n Parameters\n ----------\n A : ndarray\n Matrix whose 1-norm of a power is to be computed.\n p : int\n Non-negative integer power.\n t : int, optional\n A positive parameter controlling the tradeoff between\n accuracy versus time and memory usage.\n Larger values take longer and use more memory\n but give more accurate output.\n itmax : int, optional\n Use at most this many iterations.\n compute_v : bool, optional\n Request a norm-maximizing linear operator input vector if True.\n compute_w : bool, optional\n Request a norm-maximizing linear operator output vector if True.\n\n Returns\n -------\n est : float\n An underestimate of the 1-norm of the sparse matrix.\n v : ndarray, optional\n The vector such that ||Av||_1 == est*||v||_1.\n It can be thought of as an input to the linear operator\n that gives an output with particularly large norm.\n w : ndarray, optional\n The vector Av which has relatively large 1-norm.\n It can be thought of as an output of the linear operator\n that is relatively large in norm compared to the input.\n\n \"\"\"\n #XXX Eventually turn this into an API function in the _onenormest module,\n #XXX and remove its underscore,\n #XXX but wait until expm_action and expm_2009 go into scipy.\n return scipy.sparse.linalg.onenormest(MatrixPowerOperator(A, p))\n\n\ndef _onenormest_product(operator_seq,\n t=2, itmax=5, compute_v=False, compute_w=False):\n \"\"\"\n Efficiently estimate the 1-norm of the matrix product of the args.\n\n Parameters\n ----------\n operator_seq : linear operator sequence\n Matrices whose 1-norm of product is to be computed.\n t : int, optional\n A positive parameter controlling the tradeoff between\n accuracy versus time and memory usage.\n Larger values take longer and use more memory\n but give more accurate output.\n itmax : int, optional\n Use at most this many iterations.\n compute_v : bool, optional\n Request a norm-maximizing linear operator input vector if True.\n compute_w : bool, optional\n Request a norm-maximizing linear operator output vector if True.\n\n Returns\n -------\n est : float\n An underestimate of the 1-norm of the sparse matrix.\n v : ndarray, optional\n The vector such that ||Av||_1 == est*||v||_1.\n It can be thought of as an input to the linear operator\n that gives an output with particularly large norm.\n w : ndarray, optional\n The vector Av which has relatively large 1-norm.\n It can be thought of as an output of the linear operator\n that is relatively large in norm compared to the input.\n\n \"\"\"\n #XXX Eventually turn this into an API function in the _onenormest module,\n #XXX and remove its underscore,\n #XXX but wait until expm_2009 goes into scipy.\n return scipy.sparse.linalg.onenormest(ProductOperator(*operator_seq))\n\n\ndef expm(A):\n \"\"\"\n Compute the matrix exponential using Pade approximation.\n\n .. versionadded:: 0.12.0\n\n Parameters\n ----------\n A : (M,M) array or sparse matrix\n 2D Array or Matrix (sparse or dense) to be exponentiated\n\n Returns\n -------\n expA : (M,M) ndarray\n Matrix exponential of `A`\n\n Notes\n -----\n This is algorithm (6.1) which is a simplification of algorithm (5.1).\n\n References\n ----------\n .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)\n \"A New Scaling and Squaring Algorithm for the Matrix Exponential.\"\n SIAM Journal on Matrix Analysis and Applications.\n 31 (3). pp. 970-989. ISSN 1095-7162\n\n \"\"\"\n # Detect upper triangularity.\n structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None\n\n # Define the identity matrix depending on sparsity.\n ident = _ident_like(A)\n\n # Try Pade order 3.\n A2 = A.dot(A)\n d6 = _onenormest_matrix_power(A2, 3)**(1/6.)\n eta_1 = max(_onenormest_matrix_power(A2, 2)**(1/4.), d6)\n if eta_1 < 1.495585217958292e-002 and _ell(A, 3) == 0:\n U, V = _pade3(A, ident, A2)\n return _solve_P_Q(U, V, structure=structure)\n\n # Try Pade order 5.\n A4 = A2.dot(A2)\n d4 = _exact_1_norm(A4)**(1/4.)\n eta_2 = max(d4, d6)\n if eta_2 < 2.539398330063230e-001 and _ell(A, 5) == 0:\n U, V = _pade5(A, ident, A2, A4)\n return _solve_P_Q(U, V, structure=structure)\n\n # Try Pade orders 7 and 9.\n A6 = A2.dot(A4)\n d6 = _exact_1_norm(A6)**(1/6.)\n d8 = _onenormest_matrix_power(A4, 2)**(1/8.)\n eta_3 = max(d6, d8)\n if eta_3 < 9.504178996162932e-001 and _ell(A, 7) == 0:\n U, V = _pade7(A, ident, A2, A4, A6)\n return _solve_P_Q(U, V, structure=structure)\n if eta_3 < 2.097847961257068e+000 and _ell(A, 9) == 0:\n U, V = _pade9(A, ident, A2, A4, A6)\n return _solve_P_Q(U, V, structure=structure)\n\n # Use Pade order 13.\n d10 = _onenormest_product((A4, A6))**(1/10.)\n eta_4 = max(d8, d10)\n eta_5 = min(eta_3, eta_4)\n theta_13 = 4.25\n s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)\n s = s + _ell(2**-s * A, 13)\n B = A * 2**-s\n B2 = A2 * 2**(-2*s)\n B4 = A4 * 2**(-4*s)\n B6 = A6 * 2**(-6*s)\n U, V = _pade13(B, ident, B2, B4, B6)\n X = _solve_P_Q(U, V, structure=structure)\n if structure == UPPER_TRIANGULAR:\n # Invoke Code Fragment 2.1.\n X = _fragment_2_1(X, A, s)\n else:\n # X = r_13(A)^(2^s) by repeated squaring.\n for i in range(s):\n X = X.dot(X)\n return X\n\n\ndef _solve_P_Q(U, V, structure=None):\n \"\"\"\n A helper function for expm_2009.\n\n Parameters\n ----------\n U : ndarray\n Pade numerator.\n V : ndarray\n Pade denominator.\n structure : str, optional\n A string describing the structure of both matrices `U` and `V`.\n Only `upper_triangular` is currently supported.\n\n Notes\n -----\n The `structure` argument is inspired by similar args\n for theano and cvxopt functions.\n\n \"\"\"\n P = U + V\n Q = -U + V\n if isspmatrix(U):\n return spsolve(Q, P)\n elif structure is None:\n return solve(Q, P)\n elif structure == UPPER_TRIANGULAR:\n return solve_triangular(Q, P)\n else:\n raise ValueError('unsupported matrix structure: ' + str(structure))\n\n\ndef _sinch(x):\n \"\"\"\n Stably evaluate sinch.\n\n Notes\n -----\n The strategy of falling back to a sixth order Taylor expansion\n was suggested by the Spallation Neutron Source docs\n which was found on the internet by google search.\n http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html\n The details of the cutoff point and the Horner-like evaluation\n was picked without reference to anything in particular.\n\n Note that sinch is not currently implemented in scipy.special,\n whereas the \"engineer's\" definition of sinc is implemented.\n The implementation of sinc involves a scaling factor of pi\n that distinguishes it from the \"mathematician's\" version of sinc.\n\n \"\"\"\n\n # If x is small then use sixth order Taylor expansion.\n # How small is small? I am using the point where the relative error\n # of the approximation is less than 1e-14.\n # If x is large then directly evaluate sinh(x) / x.\n x2 = x*x\n if abs(x) < 0.0135:\n return 1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))\n else:\n return np.sinh(x) / x\n\n\ndef _eq_10_42(lam_1, lam_2, t_12):\n \"\"\"\n Equation (10.42) of Functions of Matrices: Theory and Computation.\n\n Notes\n -----\n This is a helper function for _fragment_2_1 of expm_2009.\n Equation (10.42) is on page 251 in the section on Schur algorithms.\n In particular, section 10.4.3 explains the Schur-Parlett algorithm.\n expm([[lam_1, t_12], [0, lam_1])\n =\n [[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],\n [0, exp(lam_2)]\n \"\"\"\n\n # The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)\n # apparently suffers from cancellation, according to Higham's textbook.\n # A nice implementation of sinch, defined as sinh(x)/x,\n # will apparently work around the cancellation.\n a = 0.5 * (lam_1 + lam_2)\n b = 0.5 * (lam_1 - lam_2)\n return t_12 * np.exp(a) * _sinch(b)\n\n\ndef _fragment_2_1(X, T, s):\n \"\"\"\n A helper function for expm_2009.\n\n Notes\n -----\n The argument X is modified in-place, but this modification is not the same\n as the returned value of the function.\n This function also takes pains to do things in ways that are compatible\n with sparse matrices, for example by avoiding fancy indexing\n and by using methods of the matrices whenever possible instead of\n using functions of the numpy or scipy libraries themselves.\n\n \"\"\"\n # Form X = r_m(2^-s T)\n # Replace diag(X) by exp(2^-s diag(T)).\n n = X.shape[0]\n diag_T = T.diagonal().copy()\n\n # Replace diag(X) by exp(2^-s diag(T)).\n scale = 2 ** -s\n exp_diag = np.exp(scale * diag_T)\n for k in range(n):\n X[k, k] = exp_diag[k]\n\n for i in range(s-1, -1, -1):\n X = X.dot(X)\n\n # Replace diag(X) by exp(2^-i diag(T)).\n scale = 2 ** -i\n exp_diag = np.exp(scale * diag_T)\n for k in range(n):\n X[k, k] = exp_diag[k]\n\n # Replace (first) superdiagonal of X by explicit formula\n # for superdiagonal of exp(2^-i T) from Eq (10.42) of\n # the author's 2008 textbook\n # Functions of Matrices: Theory and Computation.\n for k in range(n-1):\n lam_1 = scale * diag_T[k]\n lam_2 = scale * diag_T[k+1]\n t_12 = scale * T[k, k+1]\n value = _eq_10_42(lam_1, lam_2, t_12)\n X[k, k+1] = value\n\n # Return the updated X matrix.\n return X\n\n\ndef _ell(A, m):\n \"\"\"\n A helper function for expm_2009.\n\n Parameters\n ----------\n A : linear operator\n A linear operator whose norm of power we care about.\n m : int\n The power of the linear operator\n\n Returns\n -------\n value : int\n A value related to a bound.\n\n \"\"\"\n p = 2*m + 1\n\n # The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.\n # They are coefficients of terms of a generating function series expansion.\n abs_c_recip = scipy.misc.comb(2*p, p, exact=True) * math.factorial(2*p + 1)\n\n # This is explained after Eq. (1.2) of the 2009 expm paper.\n # It is the \"unit roundoff\" of IEEE double precision arithmetic.\n u = 2**-53\n\n # Estimate the 1-norm of the matrix power.\n est = _onenormest_matrix_power(abs(A), p)\n\n # Treat zero norm as a special case.\n if not est:\n return 0\n\n alpha = est / (_exact_1_norm(A) * abs_c_recip)\n log2_alpha_div_u = np.log2(alpha/u)\n value = int(np.ceil(log2_alpha_div_u / (2 * m)))\n return max(value, 0)\n\n\n# Implementation of Pade approximations of various degree\n# using the algorithm presented in [Higham 2005].\n# These should apply to both dense and sparse matricies.\n# ident is the identity matrix, which matches A in being sparse or dense.\n\ndef _pade3(A, ident, A2=None):\n b = (120., 60., 12., 1.)\n if A2 is None:\n A2 = A.dot(A)\n U = A.dot(b[3]*A2 + b[1]*ident)\n V = b[2]*A2 + b[0]*ident\n return U,V\n\n\ndef _pade5(A, ident, A2=None, A4=None):\n b = (30240., 15120., 3360., 420., 30., 1.)\n if A2 is None:\n A2 = A.dot(A)\n if A4 is None:\n A4 = A2.dot(A2)\n U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = b[4]*A4 + b[2]*A2 + b[0]*ident\n return U,V\n\n\ndef _pade7(A, ident, A2=None, A4=None, A6=None):\n b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)\n if A2 is None:\n A2 = A.dot(A)\n if A4 is None:\n A4 = A2.dot(A2)\n if A6 is None:\n A6 = A4.dot(A2)\n U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident\n return U,V\n\n\ndef _pade9(A, ident, A2=None, A4=None, A6=None):\n b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,\n 2162160., 110880., 3960., 90., 1.)\n if A2 is None:\n A2 = A.dot(A)\n if A4 is None:\n A4 = A2.dot(A2)\n if A6 is None:\n A6 = A4.dot(A2)\n A8 = A6.dot(A2)\n U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident\n return U,V\n\n\ndef _pade13(A, ident, A2=None, A4=None, A6=None):\n b = (64764752532480000., 32382376266240000., 7771770303897600.,\n 1187353796428800., 129060195264000., 10559470521600., 670442572800.,\n 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.)\n if A2 is None:\n A2 = A.dot(A)\n if A4 is None:\n A4 = A2.dot(A2)\n if A6 is None:\n A6 = A4.dot(A2)\n U = A.dot(A6.dot(b[13]*A6 + b[11]*A4 + b[9]*A2) + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = A6.dot(b[12]*A6 + b[10]*A4 + b[8]*A2) + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident\n return U,V\n",
"\"\"\" Unit tests for nonnegative least squares\nAuthor: Uwe Schmitt\nSep 2008\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom numpy.testing import assert_, TestCase, run_module_suite\n\nfrom scipy.optimize import nnls\nfrom numpy import arange, dot\nfrom numpy.linalg import norm\n\n\nclass TestNNLS(TestCase):\n\n def test_nnls(self):\n a = arange(25.0).reshape(-1,5)\n x = arange(5.0)\n y = dot(a,x)\n x, res = nnls(a,y)\n assert_(res < 1e-7)\n assert_(norm(dot(a,x)-y) < 1e-7)\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"from __future__ import absolute_import, print_function\n\nfrom numpy.testing import TestCase, dec, assert_equal, assert_\n\nfrom scipy.weave import ext_tools, c_spec\ntry:\n from scipy.weave.standard_array_spec import array_converter\nexcept ImportError:\n pass # requires numpy.numerix\n\nfrom weave_test_utils import empty_temp_dir\n\nbuild_dir = empty_temp_dir()\n\n\nclass TestExtModule(TestCase):\n\n # should really do some testing of where modules end up\n\n @dec.slow\n def test_simple(self):\n \"\"\" Simplest possible module \"\"\"\n mod = ext_tools.ext_module('simple_ext_module')\n mod.compile(location=build_dir)\n import simple_ext_module\n\n @dec.slow\n def test_multi_functions(self):\n mod = ext_tools.ext_module('module_multi_function')\n var_specs = []\n code = \"\"\n test = ext_tools.ext_function_from_specs('test',code,var_specs)\n mod.add_function(test)\n test2 = ext_tools.ext_function_from_specs('test2',code,var_specs)\n mod.add_function(test2)\n mod.compile(location=build_dir)\n import module_multi_function\n module_multi_function.test()\n module_multi_function.test2()\n\n @dec.slow\n def test_with_include(self):\n # decalaring variables\n a = 2.\n\n # declare module\n mod = ext_tools.ext_module('ext_module_with_include')\n mod.customize.add_header('<iostream>')\n\n # function 2 --> a little more complex expression\n var_specs = ext_tools.assign_variable_types(['a'],locals(),globals())\n code = \"\"\"\n std::cout.clear(std::ios_base::badbit);\n std::cout << std::endl;\n std::cout << \"test printing a value:\" << a << std::endl;\n std::cout.clear(std::ios_base::goodbit);\n \"\"\"\n test = ext_tools.ext_function_from_specs('test',code,var_specs)\n mod.add_function(test)\n # build module\n mod.compile(location=build_dir)\n import ext_module_with_include\n ext_module_with_include.test(a)\n\n @dec.slow\n def test_string_and_int(self):\n # decalaring variables\n a = 2\n b = 'string'\n # declare module\n mod = ext_tools.ext_module('ext_string_and_int')\n code = \"\"\"\n a=b.length();\n return_val = PyInt_FromLong(a);\n \"\"\"\n test = ext_tools.ext_function('test',code,['a','b'])\n mod.add_function(test)\n mod.compile(location=build_dir)\n import ext_string_and_int\n c = ext_string_and_int.test(a,b)\n assert_(c == len(b))\n\n @dec.slow\n def test_return_tuple(self):\n # decalaring variables\n a = 2\n # declare module\n mod = ext_tools.ext_module('ext_return_tuple')\n var_specs = ext_tools.assign_variable_types(['a'],locals())\n code = \"\"\"\n int b;\n b = a + 1;\n py::tuple returned(2);\n returned[0] = a;\n returned[1] = b;\n return_val = returned;\n \"\"\"\n test = ext_tools.ext_function('test',code,['a'])\n mod.add_function(test)\n mod.compile(location=build_dir)\n import ext_return_tuple\n c,d = ext_return_tuple.test(a)\n assert_(c == a and d == a+1)\n\n\nclass TestExtFunction(TestCase):\n\n # should really do some testing of where modules end up\n\n @dec.slow\n def test_simple(self):\n \"\"\" Simplest possible function \"\"\"\n mod = ext_tools.ext_module('simple_ext_function')\n var_specs = []\n code = \"\"\n test = ext_tools.ext_function_from_specs('test',code,var_specs)\n mod.add_function(test)\n mod.compile(location=build_dir)\n import simple_ext_function\n simple_ext_function.test()\n\n\nclass TestAssignVariableTypes(TestCase):\n\n def test_assign_variable_types(self):\n try:\n from numpy.numerix import arange, Float32, Float64\n except:\n # skip this test if numpy.numerix not installed\n return\n\n import types\n a = arange(10,typecode=Float32)\n b = arange(5,typecode=Float64)\n c = 5\n arg_list = ['a','b','c']\n actual = ext_tools.assign_variable_types(arg_list,locals())\n # desired = {'a':(Float32,1),'b':(Float32,1),'i':(Int32,0)}\n\n ad = array_converter()\n ad.name, ad.var_type, ad.dims = 'a', Float32, 1\n bd = array_converter()\n bd.name, bd.var_type, bd.dims = 'b', Float64, 1\n\n cd = c_spec.int_converter()\n cd.name, cd.var_type = 'c', types.IntType\n desired = [ad,bd,cd]\n assert_equal(actual,desired)\n\n\nif __name__ == \"__main__\":\n import nose\n nose.run(argv=['', __file__])\n",
"#\n# Author: Travis Oliphant, March 2002\n#\n\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = ['expm','expm2','expm3','cosm','sinm','tanm','coshm','sinhm',\n 'tanhm','logm','funm','signm','sqrtm',\n 'expm_frechet', 'fractional_matrix_power']\n\nfrom numpy import asarray, Inf, dot, eye, diag, exp, \\\n product, logical_not, ravel, transpose, conjugate, \\\n cast, log, ogrid, imag, real, absolute, amax, sign, \\\n isfinite, sqrt, single\nfrom numpy import matrix as mat\nimport numpy as np\n\n\n# Local imports\nfrom .misc import norm\nfrom .basic import solve, inv\nfrom .lapack import ztrsyl\nfrom .special_matrices import triu, all_mat\nfrom .decomp import eig\nfrom .decomp_svd import orth, svd\nfrom .decomp_schur import schur, rsf2csf\nfrom ._expm_frechet import expm_frechet\nfrom ._matfuncs_sqrtm import sqrtm\nimport warnings\n\neps = np.finfo(float).eps\nfeps = np.finfo(single).eps\n\n\ndef fractional_matrix_power(A, t):\n # This fixes some issue with imports;\n # this function calls onenormest which is in scipy.sparse.\n import scipy.linalg._matfuncs_inv_ssq\n return scipy.linalg._matfuncs_inv_ssq.fractional_matrix_power(A, t)\n\n\ndef logm(A, disp=True):\n \"\"\"\n Compute matrix logarithm.\n\n The matrix logarithm is the inverse of\n expm: expm(logm(`A`)) == `A`\n\n Parameters\n ----------\n A : (N, N) array_like\n Matrix whose logarithm to evaluate\n disp : bool, optional\n Print warning if error in the result is estimated large\n instead of returning estimated error. (Default: True)\n\n Returns\n -------\n logm : (N, N) ndarray\n Matrix logarithm of `A`\n errest : float\n (if disp == False)\n\n 1-norm of the estimated error, ||err||_1 / ||A||_1\n\n \"\"\"\n import scipy.linalg._matfuncs_inv_ssq\n A = mat(asarray(A))\n F = scipy.linalg._matfuncs_inv_ssq.logm(A)\n errtol = 1000*eps\n #TODO use a better error approximation\n errest = norm(expm(F)-A,1) / norm(A,1)\n if disp:\n if not isfinite(errest) or errest >= errtol:\n print(\"logm result may be inaccurate, approximate err =\", errest)\n return F\n else:\n return F, errest\n\n\ndef expm(A, q=None):\n \"\"\"\n Compute the matrix exponential using Pade approximation.\n\n Parameters\n ----------\n A : (N, N) array_like\n Matrix to be exponentiated\n\n Returns\n -------\n expm : (N, N) ndarray\n Matrix exponential of `A`\n\n References\n ----------\n N. J. Higham,\n \"The Scaling and Squaring Method for the Matrix Exponential Revisited\",\n SIAM. J. Matrix Anal. & Appl. 26, 1179 (2005).\n\n \"\"\"\n if q:\n warnings.warn(\"argument q=... in scipy.linalg.expm is deprecated.\")\n import scipy.sparse.linalg\n return scipy.sparse.linalg.expm(A)\n\n\n# deprecated, but probably should be left there in the long term\[email protected](new_name=\"expm\")\ndef expm2(A):\n \"\"\"\n Compute the matrix exponential using eigenvalue decomposition.\n\n Parameters\n ----------\n A : (N, N) array_like\n Matrix to be exponentiated\n\n Returns\n -------\n expm2 : (N, N) ndarray\n Matrix exponential of `A`\n\n \"\"\"\n A = asarray(A)\n t = A.dtype.char\n if t not in ['f','F','d','D']:\n A = A.astype('d')\n t = 'd'\n s,vr = eig(A)\n vri = inv(vr)\n r = dot(dot(vr,diag(exp(s))),vri)\n if t in ['f', 'd']:\n return r.real.astype(t)\n else:\n return r.astype(t)\n\n\n# deprecated, but probably should be left there in the long term\[email protected](new_name=\"expm\")\ndef expm3(A, q=20):\n \"\"\"\n Compute the matrix exponential using Taylor series.\n\n Parameters\n ----------\n A : (N, N) array_like\n Matrix to be exponentiated\n q : int\n Order of the Taylor series used is `q-1`\n\n Returns\n -------\n expm3 : (N, N) ndarray\n Matrix exponential of `A`\n\n \"\"\"\n A = asarray(A)\n t = A.dtype.char\n if t not in ['f','F','d','D']:\n A = A.astype('d')\n t = 'd'\n A = mat(A)\n eA = eye(*A.shape,**{'dtype':t})\n trm = mat(eA, copy=True)\n castfunc = cast[t]\n for k in range(1,q):\n trm *= A / castfunc(k)\n eA += trm\n return eA\n\n_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}\n\n\ndef toreal(arr, tol=None):\n \"\"\"Return as real array if imaginary part is small.\n\n Parameters\n ----------\n arr : array\n tol : float\n Absolute tolerance\n\n Returns\n -------\n arr : double or complex array\n \"\"\"\n if tol is None:\n tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[arr.dtype.char]]\n if (arr.dtype.char in ['F', 'D','G']) and \\\n np.allclose(arr.imag, 0.0, atol=tol):\n arr = arr.real\n return arr\n\n\ndef cosm(A):\n \"\"\"\n Compute the matrix cosine.\n\n This routine uses expm to compute the matrix exponentials.\n\n Parameters\n ----------\n A : (N, N) array_like\n Input array\n\n Returns\n -------\n cosm : (N, N) ndarray\n Matrix cosine of A\n\n \"\"\"\n A = asarray(A)\n if A.dtype.char not in ['F','D','G']:\n return expm(1j*A).real\n else:\n return 0.5*(expm(1j*A) + expm(-1j*A))\n\n\ndef sinm(A):\n \"\"\"\n Compute the matrix sine.\n\n This routine uses expm to compute the matrix exponentials.\n\n Parameters\n ----------\n A : (N, N) array_like\n Input array.\n\n Returns\n -------\n sinm : (N, N) ndarray\n Matrix cosine of `A`\n\n \"\"\"\n A = asarray(A)\n if A.dtype.char not in ['F','D','G']:\n return expm(1j*A).imag\n else:\n return -0.5j*(expm(1j*A) - expm(-1j*A))\n\n\ndef tanm(A):\n \"\"\"\n Compute the matrix tangent.\n\n This routine uses expm to compute the matrix exponentials.\n\n Parameters\n ----------\n A : (N, N) array_like\n Input array.\n\n Returns\n -------\n tanm : (N, N) ndarray\n Matrix tangent of `A`\n\n \"\"\"\n A = asarray(A)\n if A.dtype.char not in ['F','D','G']:\n return toreal(solve(cosm(A), sinm(A)))\n else:\n return solve(cosm(A), sinm(A))\n\n\ndef coshm(A):\n \"\"\"\n Compute the hyperbolic matrix cosine.\n\n This routine uses expm to compute the matrix exponentials.\n\n Parameters\n ----------\n A : (N, N) array_like\n Input array.\n\n Returns\n -------\n coshm : (N, N) ndarray\n Hyperbolic matrix cosine of `A`\n\n \"\"\"\n A = asarray(A)\n if A.dtype.char not in ['F','D','G']:\n return toreal(0.5*(expm(A) + expm(-A)))\n else:\n return 0.5*(expm(A) + expm(-A))\n\n\ndef sinhm(A):\n \"\"\"\n Compute the hyperbolic matrix sine.\n\n This routine uses expm to compute the matrix exponentials.\n\n Parameters\n ----------\n A : (N, N) array_like\n Input array.\n\n Returns\n -------\n sinhm : (N, N) ndarray\n Hyperbolic matrix sine of `A`\n\n \"\"\"\n A = asarray(A)\n if A.dtype.char not in ['F','D']:\n return toreal(0.5*(expm(A) - expm(-A)))\n else:\n return 0.5*(expm(A) - expm(-A))\n\n\ndef tanhm(A):\n \"\"\"\n Compute the hyperbolic matrix tangent.\n\n This routine uses expm to compute the matrix exponentials.\n\n Parameters\n ----------\n A : (N, N) array_like\n Input array\n\n Returns\n -------\n tanhm : (N, N) ndarray\n Hyperbolic matrix tangent of `A`\n\n \"\"\"\n A = asarray(A)\n if A.dtype.char not in ['F','D']:\n return toreal(solve(coshm(A), sinhm(A)))\n else:\n return solve(coshm(A), sinhm(A))\n\n\ndef funm(A, func, disp=True):\n \"\"\"\n Evaluate a matrix function specified by a callable.\n\n Returns the value of matrix-valued function ``f`` at `A`. The\n function ``f`` is an extension of the scalar-valued function `func`\n to matrices.\n\n Parameters\n ----------\n A : (N, N) array_like\n Matrix at which to evaluate the function\n func : callable\n Callable object that evaluates a scalar function f.\n Must be vectorized (eg. using vectorize).\n disp : bool, optional\n Print warning if error in the result is estimated large\n instead of returning estimated error. (Default: True)\n\n Returns\n -------\n funm : (N, N) ndarray\n Value of the matrix function specified by func evaluated at `A`\n errest : float\n (if disp == False)\n\n 1-norm of the estimated error, ||err||_1 / ||A||_1\n\n \"\"\"\n # Perform Shur decomposition (lapack ?gees)\n A = asarray(A)\n if len(A.shape) != 2:\n raise ValueError(\"Non-matrix input to matrix function.\")\n if A.dtype.char in ['F', 'D', 'G']:\n cmplx_type = 1\n else:\n cmplx_type = 0\n T, Z = schur(A)\n T, Z = rsf2csf(T,Z)\n n,n = T.shape\n F = diag(func(diag(T))) # apply function to diagonal elements\n F = F.astype(T.dtype.char) # e.g. when F is real but T is complex\n\n minden = abs(T[0,0])\n\n # implement Algorithm 11.1.1 from Golub and Van Loan\n # \"matrix Computations.\"\n for p in range(1,n):\n for i in range(1,n-p+1):\n j = i + p\n s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])\n ksl = slice(i,j-1)\n val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])\n s = s + val\n den = T[j-1,j-1] - T[i-1,i-1]\n if den != 0.0:\n s = s / den\n F[i-1,j-1] = s\n minden = min(minden,abs(den))\n\n F = dot(dot(Z, F),transpose(conjugate(Z)))\n if not cmplx_type:\n F = toreal(F)\n\n tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]\n if minden == 0.0:\n minden = tol\n err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))\n if product(ravel(logical_not(isfinite(F))),axis=0):\n err = Inf\n if disp:\n if err > 1000*tol:\n print(\"funm result may be inaccurate, approximate err =\", err)\n return F\n else:\n return F, err\n\n\ndef signm(a, disp=True):\n \"\"\"\n Matrix sign function.\n\n Extension of the scalar sign(x) to matrices.\n\n Parameters\n ----------\n A : (N, N) array_like\n Matrix at which to evaluate the sign function\n disp : bool, optional\n Print warning if error in the result is estimated large\n instead of returning estimated error. (Default: True)\n\n Returns\n -------\n signm : (N, N) ndarray\n Value of the sign function at `A`\n errest : float\n (if disp == False)\n\n 1-norm of the estimated error, ||err||_1 / ||A||_1\n\n Examples\n --------\n >>> from scipy.linalg import signm, eigvals\n >>> a = [[1,2,3], [1,2,1], [1,1,1]]\n >>> eigvals(a)\n array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j])\n >>> eigvals(signm(a))\n array([-1.+0.j, 1.+0.j, 1.+0.j])\n\n \"\"\"\n def rounded_sign(x):\n rx = real(x)\n if rx.dtype.char == 'f':\n c = 1e3*feps*amax(x)\n else:\n c = 1e3*eps*amax(x)\n return sign((absolute(rx) > c) * rx)\n result,errest = funm(a, rounded_sign, disp=0)\n errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]\n if errest < errtol:\n return result\n\n # Handle signm of defective matrices:\n\n # See \"E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,\n # 8:237-250,1981\" for how to improve the following (currently a\n # rather naive) iteration process:\n\n a = asarray(a)\n # a = result # sometimes iteration converges faster but where??\n\n # Shifting to avoid zero eigenvalues. How to ensure that shifting does\n # not change the spectrum too much?\n vals = svd(a,compute_uv=0)\n max_sv = np.amax(vals)\n # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]\n # c = 0.5/min_nonzero_sv\n c = 0.5/max_sv\n S0 = a + c*np.identity(a.shape[0])\n prev_errest = errest\n for i in range(100):\n iS0 = inv(S0)\n S0 = 0.5*(S0 + iS0)\n Pp = 0.5*(dot(S0,S0)+S0)\n errest = norm(dot(Pp,Pp)-Pp,1)\n if errest < errtol or prev_errest == errest:\n break\n prev_errest = errest\n if disp:\n if not isfinite(errest) or errest >= errtol:\n print(\"signm result may be inaccurate, approximate err =\", errest)\n return S0\n else:\n return S0, errest\n",
"\"\"\" Test functions for stats module\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\nimport re\nimport sys\n\nfrom numpy.testing import (TestCase, run_module_suite, assert_equal,\n assert_array_equal, assert_almost_equal, assert_array_almost_equal,\n assert_allclose, assert_, assert_raises, rand, dec)\nfrom numpy.testing.utils import WarningManager\nfrom nose import SkipTest\n\nimport numpy\nimport numpy as np\nfrom numpy import typecodes, array\nfrom scipy import special\nimport scipy.stats as stats\nfrom scipy.stats.distributions import argsreduce\nfrom scipy.special import xlogy\n\n\n# python -OO strips docstrings\nDOCSTRINGS_STRIPPED = sys.flags.optimize > 1\n\n\ndef kolmogorov_check(diststr, args=(), N=20, significance=0.01):\n qtest = stats.ksoneisf(significance, N)\n cdf = eval('stats.'+diststr+'.cdf')\n dist = eval('stats.'+diststr)\n # Get random numbers\n kwds = {'size':N}\n vals = numpy.sort(dist.rvs(*args, **kwds))\n cdfvals = cdf(vals, *args)\n q = max(abs(cdfvals - np.arange(1.0, N+1)/N))\n assert_(q < qtest, msg=\"Failed q=%f, bound=%f, alpha=%f\" % (q, qtest, significance))\n return\n\n\n# generate test cases to test cdf and distribution consistency\ndists = ['uniform','norm','lognorm','expon','beta',\n 'powerlaw','bradford','burr','fisk','cauchy','halfcauchy',\n 'foldcauchy','gamma','gengamma','loggamma',\n 'alpha','anglit','arcsine','betaprime',\n 'dgamma','exponweib','exponpow','frechet_l','frechet_r',\n 'gilbrat','f','ncf','chi2','chi','nakagami','genpareto',\n 'genextreme','genhalflogistic','pareto','lomax','halfnorm',\n 'halflogistic','fatiguelife','foldnorm','ncx2','t','nct',\n 'weibull_min','weibull_max','dweibull','maxwell','rayleigh',\n 'genlogistic', 'logistic','gumbel_l','gumbel_r','gompertz',\n 'hypsecant', 'laplace', 'reciprocal','triang','tukeylambda',\n 'vonmises', 'pearson3']\n\n# check function for test generator\n\n\ndef check_distribution(dist, args, alpha):\n D,pval = stats.kstest(dist,'', args=args, N=1000)\n if (pval < alpha):\n D,pval = stats.kstest(dist,'',args=args, N=1000)\n # if (pval < alpha):\n # D,pval = stats.kstest(dist,'',args=args, N=1000)\n assert_(pval > alpha, msg=\"D = \" + str(D) + \"; pval = \" + str(pval) +\n \"; alpha = \" + str(alpha) + \"\\nargs = \" + str(args))\n\n# nose test generator\n\n\ndef test_all_distributions():\n for dist in dists:\n distfunc = getattr(stats, dist)\n nargs = distfunc.numargs\n alpha = 0.01\n if dist == 'fatiguelife':\n alpha = 0.001\n\n if dist == 'frechet':\n args = tuple(2*rand(1))+(0,)+tuple(2*rand(2))\n elif dist == 'triang':\n args = tuple(rand(nargs))\n elif dist == 'reciprocal':\n vals = rand(nargs)\n vals[1] = vals[0] + 1.0\n args = tuple(vals)\n elif dist == 'vonmises':\n yield check_distribution, dist, (10,), alpha\n yield check_distribution, dist, (101,), alpha\n args = tuple(1.0+rand(nargs))\n else:\n args = tuple(1.0+rand(nargs))\n\n yield check_distribution, dist, args, alpha\n\n\ndef check_vonmises_pdf_periodic(k,l,s,x):\n vm = stats.vonmises(k,loc=l,scale=s)\n assert_almost_equal(vm.pdf(x),vm.pdf(x % (2*numpy.pi*s)))\n\n\ndef check_vonmises_cdf_periodic(k,l,s,x):\n vm = stats.vonmises(k,loc=l,scale=s)\n assert_almost_equal(vm.cdf(x) % 1,vm.cdf(x % (2*numpy.pi*s)) % 1)\n\n\ndef test_vonmises_pdf_periodic():\n for k in [0.1, 1, 101]:\n for x in [0,1,numpy.pi,10,100]:\n yield check_vonmises_pdf_periodic, k, 0, 1, x\n yield check_vonmises_pdf_periodic, k, 1, 1, x\n yield check_vonmises_pdf_periodic, k, 0, 10, x\n\n yield check_vonmises_cdf_periodic, k, 0, 1, x\n yield check_vonmises_cdf_periodic, k, 1, 1, x\n yield check_vonmises_cdf_periodic, k, 0, 10, x\n\n\nclass TestRandInt(TestCase):\n def test_rvs(self):\n vals = stats.randint.rvs(5,30,size=100)\n assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))\n assert_(len(vals) == 100)\n vals = stats.randint.rvs(5,30,size=(2,50))\n assert_(numpy.shape(vals) == (2,50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.randint.rvs(15,46)\n assert_((val >= 15) & (val < 46))\n assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))\n val = stats.randint(15,46).rvs(3)\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pdf(self):\n k = numpy.r_[0:36]\n out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)\n vals = stats.randint.pmf(k,5,30)\n assert_array_almost_equal(vals,out)\n\n def test_cdf(self):\n x = numpy.r_[0:36:100j]\n k = numpy.floor(x)\n out = numpy.select([k >= 30,k >= 5],[1.0,(k-5.0+1)/(30-5.0)],0)\n vals = stats.randint.cdf(x,5,30)\n assert_array_almost_equal(vals, out, decimal=12)\n\n\nclass TestBinom(TestCase):\n def test_rvs(self):\n vals = stats.binom.rvs(10, 0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.binom.rvs(10, 0.75)\n assert_(isinstance(val, int))\n val = stats.binom(10, 0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf(self):\n # regression test for Ticket #1842\n vals1 = stats.binom.pmf(100, 100,1)\n vals2 = stats.binom.pmf(0, 100,0)\n assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)\n assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)\n\n def test_entropy(self):\n # Basic entropy tests.\n b = stats.binom(2, 0.5)\n expected_p = np.array([0.25, 0.5, 0.25])\n expected_h = -sum(xlogy(expected_p, expected_p))\n h = b.entropy()\n assert_allclose(h, expected_h)\n\n b = stats.binom(2, 0.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n b = stats.binom(2, 1.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n\nclass TestBernoulli(TestCase):\n def test_rvs(self):\n vals = stats.bernoulli.rvs(0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.bernoulli.rvs(0.75)\n assert_(isinstance(val, int))\n val = stats.bernoulli(0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_entropy(self):\n # Simple tests of entropy.\n b = stats.bernoulli(0.25)\n expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)\n h = b.entropy()\n assert_allclose(h, expected_h)\n\n b = stats.bernoulli(0.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n b = stats.bernoulli(1.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n\nclass TestNBinom(TestCase):\n def test_rvs(self):\n vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.nbinom.rvs(10, 0.75)\n assert_(isinstance(val, int))\n val = stats.nbinom(10, 0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf(self):\n # regression test for ticket 1779\n assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),\n stats.nbinom.pmf(700, 721, 0.52))\n\n\nclass TestGeom(TestCase):\n def test_rvs(self):\n vals = stats.geom.rvs(0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.geom.rvs(0.75)\n assert_(isinstance(val, int))\n val = stats.geom(0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf(self):\n vals = stats.geom.pmf([1,2,3],0.5)\n assert_array_almost_equal(vals,[0.5,0.25,0.125])\n\n def test_logpmf(self):\n # regression test for ticket 1793\n vals1 = np.log(stats.geom.pmf([1,2,3], 0.5))\n vals2 = stats.geom.logpmf([1,2,3], 0.5)\n assert_allclose(vals1, vals2, rtol=1e-15, atol=0)\n\n def test_cdf_sf(self):\n vals = stats.geom.cdf([1,2,3],0.5)\n vals_sf = stats.geom.sf([1,2,3],0.5)\n expected = array([0.5,0.75,0.875])\n assert_array_almost_equal(vals,expected)\n assert_array_almost_equal(vals_sf,1-expected)\n\n\nclass TestTruncnorm(TestCase):\n def test_ppf_ticket1131(self):\n vals = stats.truncnorm.ppf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,\n loc=[3]*7, scale=2)\n expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])\n assert_array_almost_equal(vals, expected)\n\n def test_isf_ticket1131(self):\n vals = stats.truncnorm.isf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,\n loc=[3]*7, scale=2)\n expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])\n assert_array_almost_equal(vals, expected)\n\n def test_gh_2477_small_values(self):\n # Check a case that worked in the original issue.\n low, high = -11, -10\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n # Check a case that failed in the original issue.\n low, high = 10, 11\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n def test_gh_2477_large_values(self):\n # Check a case that fails because of extreme tailness.\n raise SkipTest('truncnorm rvs is know to fail at extreme tails')\n low, high = 100, 101\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n def test_gh_1489_trac_962_rvs(self):\n # Check the original example.\n low, high = 10, 15\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n\nclass TestHypergeom(TestCase):\n def test_rvs(self):\n vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))\n assert_(numpy.all(vals >= 0) &\n numpy.all(vals <= 3))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.hypergeom.rvs(20, 3, 10)\n assert_(isinstance(val, int))\n val = stats.hypergeom(20, 3, 10).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_precision(self):\n # comparison number from mpmath\n M = 2500\n n = 50\n N = 500\n tot = M\n good = n\n hgpmf = stats.hypergeom.pmf(2, tot, good, N)\n assert_almost_equal(hgpmf, 0.0010114963068932233, 11)\n\n def test_precision2(self):\n \"\"\"Test hypergeom precision for large numbers. See #1218.\"\"\"\n # Results compared with those from R.\n oranges = 9.9e4\n pears = 1.1e5\n fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4\n quantile = 2e4\n res = []\n for eaten in fruits_eaten:\n res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten))\n expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,\n 8.265601e-11, 0.1237904, 1])\n assert_allclose(res, expected, atol=0, rtol=5e-7)\n\n # Test with array_like first argument\n quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]\n res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)\n expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]\n assert_allclose(res2, expected2, atol=0, rtol=5e-7)\n\n def test_entropy(self):\n # Simple tests of entropy.\n hg = stats.hypergeom(4, 1, 1)\n h = hg.entropy()\n expected_p = np.array([0.75, 0.25])\n expected_h = -np.sum(xlogy(expected_p, expected_p))\n assert_allclose(h, expected_h)\n\n hg = stats.hypergeom(1, 1, 1)\n h = hg.entropy()\n assert_equal(h, 0.0)\n\n\nclass TestLoggamma(TestCase):\n\n def test_stats(self):\n # The following precomputed values are from the table in section 2.2\n # of \"A Statistical Study of Log-Gamma Distribution\", by Ping Shing\n # Chan (thesis, McMaster University, 1993).\n table = np.array([\n # c, mean, var, skew, exc. kurt.\n 0.5, -1.9635, 4.9348, -1.5351, 4.0000,\n 1.0, -0.5772, 1.6449, -1.1395, 2.4000,\n 12.0, 2.4427, 0.0869, -0.2946, 0.1735,\n ]).reshape(-1, 5)\n for c, mean, var, skew, kurt in table:\n computed = stats.loggamma.stats(c, moments='msvk')\n assert_array_almost_equal(computed, [mean, var, skew, kurt],\n decimal=4)\n\n\nclass TestLogser(TestCase):\n def test_rvs(self):\n vals = stats.logser.rvs(0.75, size=(2, 50))\n assert_(numpy.all(vals >= 1))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.logser.rvs(0.75)\n assert_(isinstance(val, int))\n val = stats.logser(0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n\nclass TestPareto(TestCase):\n def test_stats(self):\n # Check the stats() method with some simple values. Also check\n # that the calculations do not trigger RuntimeWarnings.\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n\n m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')\n assert_equal(m, np.inf)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')\n assert_equal(m, np.inf)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')\n assert_equal(m, 3.0)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')\n assert_equal(m, 2.0)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')\n assert_allclose(m, 2.5 / 1.5)\n assert_allclose(v, 2.5 / (1.5*1.5*0.5))\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')\n assert_allclose(m, 1.5)\n assert_allclose(v, 0.75)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')\n assert_allclose(m, 3.5 / 2.5)\n assert_allclose(v, 3.5 / (2.5*2.5*1.5))\n assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')\n assert_allclose(m, 4.0 / 3.0)\n assert_allclose(v, 4.0 / 18.0)\n assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')\n assert_allclose(m, 4.5 / 3.5)\n assert_allclose(v, 4.5 / (3.5*3.5*2.5))\n assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))\n assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))\n\n\nclass TestPearson3(TestCase):\n def test_rvs(self):\n vals = stats.pearson3.rvs(0.1, size=(2, 50))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllFloat'])\n val = stats.pearson3.rvs(0.5)\n assert_(isinstance(val, float))\n val = stats.pearson3(0.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllFloat'])\n assert_(len(val) == 3)\n\n def test_pdf(self):\n vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])\n assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),\n atol=1e-6)\n vals = stats.pearson3.pdf(-3, 0.1)\n assert_allclose(vals, np.array([0.00313791]), atol=1e-6)\n vals = stats.pearson3.pdf([-3,-2,-1,0,1], 0.1)\n assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,\n 0.39885918, 0.23413173]), atol=1e-6)\n\n def test_cdf(self):\n vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])\n assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),\n atol=1e-6)\n vals = stats.pearson3.cdf(-3, 0.1)\n assert_allclose(vals, [0.00082256], atol=1e-6)\n vals = stats.pearson3.cdf([-3,-2,-1,0,1], 0.1)\n assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,\n 5.06649130e-01, 8.41442111e-01], atol=1e-6)\n\n\nclass TestPoisson(TestCase):\n def test_rvs(self):\n vals = stats.poisson.rvs(0.5, size=(2, 50))\n assert_(numpy.all(vals >= 0))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.poisson.rvs(0.5)\n assert_(isinstance(val, int))\n val = stats.poisson(0.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_stats(self):\n mu = 16.0\n result = stats.poisson.stats(mu, moments='mvsk')\n assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])\n\n\nclass TestZipf(TestCase):\n def test_rvs(self):\n vals = stats.zipf.rvs(1.5, size=(2, 50))\n assert_(numpy.all(vals >= 1))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.zipf.rvs(1.5)\n assert_(isinstance(val, int))\n val = stats.zipf(1.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n\nclass TestDLaplace(TestCase):\n def test_rvs(self):\n vals = stats.dlaplace.rvs(1.5, size=(2, 50))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.dlaplace.rvs(1.5)\n assert_(isinstance(val, int))\n val = stats.dlaplace(1.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_stats(self):\n # compare the explicit formulas w/ direct summation using pmf\n a = 1.\n dl = stats.dlaplace(a)\n m, v, s, k = dl.stats('mvsk')\n\n N = 37\n xx = np.arange(-N, N+1)\n pp = dl.pmf(xx)\n m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)\n assert_equal((m, s), (0,0))\n assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)\n\n def test_stats2(self):\n a = np.log(2.)\n dl = stats.dlaplace(a)\n m, v, s, k = dl.stats('mvsk')\n assert_equal((m, s), (0.,0.))\n assert_allclose((v, k), (4., 3.25))\n\n\ndef test_rvgeneric_std():\n # Regression test for #1191\n assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])\n\n\nclass TestRvDiscrete(TestCase):\n def test_rvs(self):\n states = [-1,0,1,2,3,4]\n probability = [0.0,0.3,0.4,0.0,0.3,0.0]\n samples = 1000\n r = stats.rv_discrete(name='sample',values=(states,probability))\n x = r.rvs(size=samples)\n assert_(isinstance(x, numpy.ndarray))\n\n for s,p in zip(states,probability):\n assert_(abs(sum(x == s)/float(samples) - p) < 0.05)\n\n x = r.rvs()\n assert_(isinstance(x, int))\n\n def test_entropy(self):\n # Basic tests of entropy.\n pvals = np.array([0.25, 0.45, 0.3])\n p = stats.rv_discrete(values=([0, 1, 2], pvals))\n expected_h = -sum(xlogy(pvals, pvals))\n h = p.entropy()\n assert_allclose(h, expected_h)\n\n p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))\n h = p.entropy()\n assert_equal(h, 0.0)\n\n\nclass TestExpon(TestCase):\n def test_zero(self):\n assert_equal(stats.expon.pdf(0),1)\n\n def test_tail(self): # Regression test for ticket 807\n assert_equal(stats.expon.cdf(1e-18), 1e-18)\n assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)\n\n\nclass TestGenExpon(TestCase):\n def test_pdf_unity_area(self):\n from scipy.integrate import simps\n # PDF should integrate to one\n assert_almost_equal(simps(stats.genexpon.pdf(numpy.arange(0,10,0.01),\n 0.5, 0.5, 2.0),\n dx=0.01), 1, 1)\n\n def test_cdf_bounds(self):\n # CDF should always be positive\n cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)\n assert_(numpy.all((0 <= cdf) & (cdf <= 1)))\n\n\nclass TestExponpow(TestCase):\n def test_tail(self):\n assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)\n assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), 5)\n\n\nclass TestSkellam(TestCase):\n def test_pmf(self):\n # comparison to R\n k = numpy.arange(-10, 15)\n mu1, mu2 = 10, 5\n skpmfR = numpy.array(\n [4.2254582961926893e-005, 1.1404838449648488e-004,\n 2.8979625801752660e-004, 6.9177078182101231e-004,\n 1.5480716105844708e-003, 3.2412274963433889e-003,\n 6.3373707175123292e-003, 1.1552351566696643e-002,\n 1.9606152375042644e-002, 3.0947164083410337e-002,\n 4.5401737566767360e-002, 6.1894328166820688e-002,\n 7.8424609500170578e-002, 9.2418812533573133e-002,\n 1.0139793148019728e-001, 1.0371927988298846e-001,\n 9.9076583077406091e-002, 8.8546660073089561e-002,\n 7.4187842052486810e-002, 5.8392772862200251e-002,\n 4.3268692953013159e-002, 3.0248159818374226e-002,\n 1.9991434305603021e-002, 1.2516877303301180e-002,\n 7.4389876226229707e-003])\n\n assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)\n\n def test_cdf(self):\n # comparison to R, only 5 decimals\n k = numpy.arange(-10, 15)\n mu1, mu2 = 10, 5\n skcdfR = numpy.array(\n [6.4061475386192104e-005, 1.7810985988267694e-004,\n 4.6790611790020336e-004, 1.1596768997212152e-003,\n 2.7077485103056847e-003, 5.9489760066490718e-003,\n 1.2286346724161398e-002, 2.3838698290858034e-002,\n 4.3444850665900668e-002, 7.4392014749310995e-002,\n 1.1979375231607835e-001, 1.8168808048289900e-001,\n 2.6011268998306952e-001, 3.5253150251664261e-001,\n 4.5392943399683988e-001, 5.5764871387982828e-001,\n 6.5672529695723436e-001, 7.4527195703032389e-001,\n 8.1945979908281064e-001, 8.7785257194501087e-001,\n 9.2112126489802404e-001, 9.5136942471639818e-001,\n 9.7136085902200120e-001, 9.8387773632530240e-001,\n 9.9131672394792536e-001])\n\n assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)\n\n\nclass TestLognorm(TestCase):\n def test_pdf(self):\n # Regression test for Ticket #1471: avoid nan with 0/0 situation\n with np.errstate(divide='ignore'):\n pdf = stats.lognorm.pdf([0, 0.5, 1], 1)\n assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])\n\n\nclass TestBeta(TestCase):\n def test_logpdf(self):\n # Regression test for Ticket #1326: avoid nan with 0*log(0) situation\n logpdf = stats.beta.logpdf(0,1,0.5)\n assert_almost_equal(logpdf, -0.69314718056)\n logpdf = stats.beta.logpdf(0,0.5,1)\n assert_almost_equal(logpdf, np.inf)\n\n def test_logpdf_ticket_1866(self):\n alpha, beta = 267, 1472\n x = np.array([0.2, 0.5, 0.6])\n b = stats.beta(alpha, beta)\n assert_allclose(b.logpdf(x).sum(), -1201.699061824062)\n assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))\n\n\nclass TestBetaPrime(TestCase):\n def test_logpdf(self):\n alpha, beta = 267, 1472\n x = np.array([0.2, 0.5, 0.6])\n b = stats.betaprime(alpha, beta)\n assert_(np.isfinite(b.logpdf(x)).all())\n assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))\n\n\nclass TestGamma(TestCase):\n def test_pdf(self):\n # a few test cases to compare with R\n pdf = stats.gamma.pdf(90, 394, scale=1./5)\n assert_almost_equal(pdf, 0.002312341)\n\n pdf = stats.gamma.pdf(3, 10, scale=1./5)\n assert_almost_equal(pdf, 0.1620358)\n\n def test_logpdf(self):\n # Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)\n # situation\n logpdf = stats.gamma.logpdf(0,1)\n assert_almost_equal(logpdf, 0)\n\n\nclass TestChi2(TestCase):\n # regression tests after precision improvements, ticket:1041, not verified\n def test_precision(self):\n assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, 14)\n assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, 14)\n\n\nclass TestArrayArgument(TestCase): # test for ticket:992\n def test_noexception(self):\n rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), size=(10,5))\n assert_equal(rvs.shape, (10,5))\n\n\nclass TestDocstring(TestCase):\n def test_docstrings(self):\n # See ticket #761\n if stats.rayleigh.__doc__ is not None:\n self.assertTrue(\"rayleigh\" in stats.rayleigh.__doc__.lower())\n if stats.bernoulli.__doc__ is not None:\n self.assertTrue(\"bernoulli\" in stats.bernoulli.__doc__.lower())\n\n def test_no_name_arg(self):\n # If name is not given, construction shouldn't fail. See #1508.\n stats.rv_continuous()\n stats.rv_discrete()\n\n\nclass TestEntropy(TestCase):\n def test_entropy_positive(self):\n # See ticket #497\n pk = [0.5,0.2,0.3]\n qk = [0.1,0.25,0.65]\n eself = stats.entropy(pk,pk)\n edouble = stats.entropy(pk,qk)\n assert_(0.0 == eself)\n assert_(edouble >= 0.0)\n\n def test_entropy_base(self):\n pk = np.ones(16, float)\n S = stats.entropy(pk, base=2.)\n assert_(abs(S - 4.) < 1.e-5)\n\n qk = np.ones(16, float)\n qk[:8] = 2.\n S = stats.entropy(pk, qk)\n S2 = stats.entropy(pk, qk, base=2.)\n assert_(abs(S/S2 - np.log(2.)) < 1.e-5)\n\n def test_entropy_zero(self):\n # Test for PR-479\n assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,\n decimal=12)\n\n\ndef TestArgsreduce():\n a = array([1,3,2,1,2,3,3])\n b,c = argsreduce(a > 1, a, 2)\n\n assert_array_equal(b, [3,2,2,3,3])\n assert_array_equal(c, [2,2,2,2,2])\n\n b,c = argsreduce(2 > 1, a, 2)\n assert_array_equal(b, a[0])\n assert_array_equal(c, [2])\n\n b,c = argsreduce(a > 0, a, 2)\n assert_array_equal(b, a)\n assert_array_equal(c, [2] * numpy.size(a))\n\n\nclass TestFitMethod(object):\n skip = ['ncf']\n\n @dec.slow\n def test_fit(self):\n def check(func, dist, args, alpha):\n if dist in self.skip:\n raise SkipTest(\"%s fit known to fail\" % dist)\n distfunc = getattr(stats, dist)\n with np.errstate(all='ignore'):\n res = distfunc.rvs(*args, **{'size':200})\n vals = distfunc.fit(res)\n vals2 = distfunc.fit(res, optimizer='powell')\n # Only check the length of the return\n # FIXME: should check the actual results to see if we are 'close'\n # to what was created --- but what is 'close' enough\n if dist == 'frechet':\n assert_(len(vals) == len(args))\n assert_(len(vals2) == len(args))\n else:\n assert_(len(vals) == 2+len(args))\n assert_(len(vals2) == 2+len(args))\n\n for func, dist, args, alpha in test_all_distributions():\n yield check, func, dist, args, alpha\n\n @dec.slow\n def test_fix_fit(self):\n def check(func, dist, args, alpha):\n # Not sure why 'ncf', and 'beta' are failing\n # frechet has different len(args) than distfunc.numargs\n if dist in self.skip + ['frechet']:\n raise SkipTest(\"%s fit known to fail\" % dist)\n distfunc = getattr(stats, dist)\n with np.errstate(all='ignore'):\n res = distfunc.rvs(*args, **{'size':200})\n vals = distfunc.fit(res,floc=0)\n vals2 = distfunc.fit(res,fscale=1)\n assert_(len(vals) == 2+len(args))\n assert_(vals[-2] == 0)\n assert_(vals2[-1] == 1)\n assert_(len(vals2) == 2+len(args))\n if len(args) > 0:\n vals3 = distfunc.fit(res, f0=args[0])\n assert_(len(vals3) == 2+len(args))\n assert_(vals3[0] == args[0])\n if len(args) > 1:\n vals4 = distfunc.fit(res, f1=args[1])\n assert_(len(vals4) == 2+len(args))\n assert_(vals4[1] == args[1])\n if len(args) > 2:\n vals5 = distfunc.fit(res, f2=args[2])\n assert_(len(vals5) == 2+len(args))\n assert_(vals5[2] == args[2])\n\n for func, dist, args, alpha in test_all_distributions():\n yield check, func, dist, args, alpha\n\n def test_fix_fit_2args_lognorm(self):\n \"\"\"Regression test for #1551.\"\"\"\n np.random.seed(12345)\n with np.errstate(all='ignore'):\n x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)\n assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),\n [0.25888672, 0, 20], atol=1e-5)\n\n def test_fix_fit_norm(self):\n x = np.arange(1, 6)\n\n loc, scale = stats.norm.fit(x)\n assert_almost_equal(loc, 3)\n assert_almost_equal(scale, np.sqrt(2))\n\n loc, scale = stats.norm.fit(x, floc=2)\n assert_equal(loc, 2)\n assert_equal(scale, np.sqrt(3))\n\n loc, scale = stats.norm.fit(x, fscale=2)\n assert_almost_equal(loc, 3)\n assert_equal(scale, 2)\n\n def test_fix_fit_gamma(self):\n x = np.arange(1, 6)\n meanlog = np.log(x).mean()\n\n # A basic test of gamma.fit with floc=0.\n floc = 0\n a, loc, scale = stats.gamma.fit(x, floc=floc)\n s = np.log(x.mean()) - meanlog\n assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)\n assert_equal(loc, floc)\n assert_almost_equal(scale, x.mean()/a, decimal=8)\n\n # Regression tests for gh-2514.\n # The problem was that if `floc=0` was given, any other fixed\n # parameters were ignored.\n f0 = 1\n floc = 0\n a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)\n assert_equal(a, f0)\n assert_equal(loc, floc)\n assert_almost_equal(scale, x.mean()/a, decimal=8)\n\n f0 = 2\n floc = 0\n a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)\n assert_equal(a, f0)\n assert_equal(loc, floc)\n assert_almost_equal(scale, x.mean()/a, decimal=8)\n\n # loc and scale fixed.\n floc = 0\n fscale = 2\n a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)\n assert_equal(loc, floc)\n assert_equal(scale, fscale)\n c = meanlog - np.log(fscale)\n assert_almost_equal(special.digamma(a), c)\n\n def test_fix_fit_beta(self):\n # Test beta.fit when both floc and fscale are given.\n\n def mlefunc(a, b, x):\n # Zeros of this function are critical points of\n # the maximum likelihood function.\n n = len(x)\n s1 = np.log(x).sum()\n s2 = np.log(1-x).sum()\n psiab = special.psi(a + b)\n func = [s1 - n * (-psiab + special.psi(a)),\n s2 - n * (-psiab + special.psi(b))]\n return func\n\n # Basic test with floc and fscale given.\n x = np.array([0.125, 0.25, 0.5])\n a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)\n assert_equal(loc, 0)\n assert_equal(scale, 1)\n assert_allclose(mlefunc(a, b, x), [0,0], atol=1e-6)\n\n # Basic test with f0, floc and fscale given.\n # This is also a regression test for gh-2514.\n x = np.array([0.125, 0.25, 0.5])\n a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)\n assert_equal(a, 2)\n assert_equal(loc, 0)\n assert_equal(scale, 1)\n da, db = mlefunc(a, b, x)\n assert_allclose(db, 0, atol=1e-5)\n\n # Same floc and fscale values as above, but reverse the data\n # and fix b (f1).\n x2 = 1 - x\n a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)\n assert_equal(b2, 2)\n assert_equal(loc2, 0)\n assert_equal(scale2, 1)\n da, db = mlefunc(a2, b2, x2)\n assert_allclose(da, 0, atol=1e-5)\n # a2 of this test should equal b from above.\n assert_almost_equal(a2, b)\n\n # Check for detection of data out of bounds when floc and fscale\n # are given.\n assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)\n y = np.array([0, .5, 1])\n assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)\n assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)\n assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)\n\n # Check that attempting to fix all the parameters raises a ValueError.\n assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,\n floc=2, fscale=3)\n\n\nclass TestFrozen(TestCase):\n \"\"\"Test that a frozen distribution gives the same results as the original object.\n\n Only tested for the normal distribution (with loc and scale specified) and for the\n gamma distribution (with a shape parameter specified).\n \"\"\"\n def test_norm(self):\n dist = stats.norm\n frozen = stats.norm(loc=10.0, scale=3.0)\n\n result_f = frozen.pdf(20.0)\n result = dist.pdf(20.0, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.cdf(20.0)\n result = dist.cdf(20.0, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.ppf(0.25)\n result = dist.ppf(0.25, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.isf(0.25)\n result = dist.isf(0.25, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.sf(10.0)\n result = dist.sf(10.0, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.median()\n result = dist.median(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.mean()\n result = dist.mean(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.var()\n result = dist.var(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.std()\n result = dist.std(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.entropy()\n result = dist.entropy(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.moment(2)\n result = dist.moment(2,loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n def test_gamma(self):\n a = 2.0\n dist = stats.gamma\n frozen = stats.gamma(a)\n\n result_f = frozen.pdf(20.0)\n result = dist.pdf(20.0, a)\n assert_equal(result_f, result)\n\n result_f = frozen.cdf(20.0)\n result = dist.cdf(20.0, a)\n assert_equal(result_f, result)\n\n result_f = frozen.ppf(0.25)\n result = dist.ppf(0.25, a)\n assert_equal(result_f, result)\n\n result_f = frozen.isf(0.25)\n result = dist.isf(0.25, a)\n assert_equal(result_f, result)\n\n result_f = frozen.sf(10.0)\n result = dist.sf(10.0, a)\n assert_equal(result_f, result)\n\n result_f = frozen.median()\n result = dist.median(a)\n assert_equal(result_f, result)\n\n result_f = frozen.mean()\n result = dist.mean(a)\n assert_equal(result_f, result)\n\n result_f = frozen.var()\n result = dist.var(a)\n assert_equal(result_f, result)\n\n result_f = frozen.std()\n result = dist.std(a)\n assert_equal(result_f, result)\n\n result_f = frozen.entropy()\n result = dist.entropy(a)\n assert_equal(result_f, result)\n\n result_f = frozen.moment(2)\n result = dist.moment(2, a)\n assert_equal(result_f, result)\n\n def test_regression_ticket_1293(self):\n # Create a frozen distribution.\n frozen = stats.lognorm(1)\n # Call one of its methods that does not take any keyword arguments.\n m1 = frozen.moment(2)\n # Now call a method that takes a keyword argument.\n frozen.stats(moments='mvsk')\n # Call moment(2) again.\n # After calling stats(), the following was raising an exception.\n # So this test passes if the following does not raise an exception.\n m2 = frozen.moment(2)\n # The following should also be true, of course. But it is not\n # the focus of this test.\n assert_equal(m1, m2)\n\n\nclass TestExpect(TestCase):\n \"\"\"Test for expect method.\n\n Uses normal distribution and beta distribution for finite bounds, and\n hypergeom for discrete distribution with finite support\n\n \"\"\"\n def test_norm(self):\n v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)\n assert_almost_equal(v, 4, decimal=14)\n\n m = stats.norm.expect(lambda x: (x), loc=5, scale=2)\n assert_almost_equal(m, 5, decimal=14)\n\n lb = stats.norm.ppf(0.05, loc=5, scale=2)\n ub = stats.norm.ppf(0.95, loc=5, scale=2)\n prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)\n assert_almost_equal(prob90, 0.9, decimal=14)\n\n prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,\n conditional=True)\n assert_almost_equal(prob90c, 1., decimal=14)\n\n def test_beta(self):\n # case with finite support interval\n v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10,5),\n loc=5, scale=2)\n assert_almost_equal(v, 1./18., decimal=13)\n\n m = stats.beta.expect(lambda x: x, args=(10,5), loc=5., scale=2.)\n assert_almost_equal(m, 19/3., decimal=13)\n\n ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)\n lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)\n prob90 = stats.beta.expect(lambda x: 1., args=(10,10), loc=5.,\n scale=2.,lb=lb, ub=ub, conditional=False)\n assert_almost_equal(prob90, 0.9, decimal=13)\n\n prob90c = stats.beta.expect(lambda x: 1, args=(10,10), loc=5,\n scale=2, lb=lb, ub=ub, conditional=True)\n assert_almost_equal(prob90c, 1., decimal=13)\n\n def test_hypergeom(self):\n # test case with finite bounds\n\n # without specifying bounds\n m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)\n m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)\n assert_almost_equal(m, m_true, decimal=13)\n\n v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),\n loc=5.)\n assert_almost_equal(v, v_true, decimal=14)\n\n # with bounds, bounds equal to shifted support\n v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),\n loc=5., lb=5, ub=13)\n assert_almost_equal(v_bounds, v_true, decimal=14)\n\n # drop boundary points\n prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()\n prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),\n loc=5., lb=6, ub=12)\n assert_almost_equal(prob_bounds, prob_true, decimal=13)\n\n # conditional\n prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,\n lb=6, ub=12, conditional=True)\n assert_almost_equal(prob_bc, 1, decimal=14)\n\n # check simple integral\n prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),\n lb=0, ub=8)\n assert_almost_equal(prob_b, 1, decimal=13)\n\n def test_poisson(self):\n # poisson, use lower bound only\n prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,\n conditional=False)\n prob_b_true = 1-stats.poisson.cdf(2,2)\n assert_almost_equal(prob_bounds, prob_b_true, decimal=14)\n\n prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,\n conditional=True)\n assert_almost_equal(prob_lb, 1, decimal=14)\n\n def test_genhalflogistic(self):\n # genhalflogistic, changes upper bound of support in _argcheck\n # regression test for gh-2622\n halflog = stats.genhalflogistic\n # check consistency when calling expect twice with the same input\n res1 = halflog.expect(args=(1.5,))\n halflog.expect(args=(0.5,))\n res2 = halflog.expect(args=(1.5,))\n assert_almost_equal(res1, res2, decimal=14)\n\n\nclass TestNct(TestCase):\n def test_nc_parameter(self):\n # Parameter values c<=0 were not enabled (gh-2402).\n # For negative values c and for c=0 results of rv.cdf(0) below were nan\n rv = stats.nct(5, 0)\n assert_equal(rv.cdf(0), 0.5)\n rv = stats.nct(5, -1)\n assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)\n\n def test_broadcasting(self):\n res = stats.nct.pdf(5, np.arange(4,7)[:,None], np.linspace(0.1, 1, 4))\n expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],\n [0.00217142, 0.00395366, 0.00683888, 0.01126276],\n [0.00153078, 0.00291093, 0.00525206, 0.00900815]])\n assert_allclose(res, expected, rtol=1e-5)\n\n def text_variance_gh_issue_2401():\n # Computation of the variance of a non-central t-distribution resulted\n # in a TypeError: ufunc 'isinf' not supported for the input types,\n # and the inputs could not be safely coerced to any supported types\n # according to the casting rule 'safe'\n rv = stats.nct(4, 0)\n assert_equal(rv.var(), 2.0)\n\n\ndef test_regression_ticket_1316():\n # The following was raising an exception, because _construct_default_doc()\n # did not handle the default keyword extradoc=None. See ticket #1316.\n g = stats.distributions.gamma_gen(name='gamma')\n\n\ndef test_regression_ticket_1326():\n # adjust to avoid nan with 0*log(0)\n assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)\n\n\ndef test_regression_tukey_lambda():\n # Make sure that Tukey-Lambda distribution correctly handles non-positive lambdas.\n x = np.linspace(-5.0, 5.0, 101)\n\n olderr = np.seterr(divide='ignore')\n try:\n for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:\n p = stats.tukeylambda.pdf(x, lam)\n assert_((p != 0.0).all())\n assert_(~np.isnan(p).all())\n\n lam = np.array([[-1.0], [0.0], [2.0]])\n p = stats.tukeylambda.pdf(x, lam)\n finally:\n np.seterr(**olderr)\n\n assert_(~np.isnan(p).all())\n assert_((p[0] != 0.0).all())\n assert_((p[1] != 0.0).all())\n assert_((p[2] != 0.0).any())\n assert_((p[2] == 0.0).any())\n\n\[email protected](DOCSTRINGS_STRIPPED)\ndef test_regression_ticket_1421():\n assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)\n assert_('pmf(x,' in stats.poisson.__doc__)\n\n\ndef test_nan_arguments_gh_issue_1362():\n assert_(np.isnan(stats.t.logcdf(1, np.nan)))\n assert_(np.isnan(stats.t.cdf(1, np.nan)))\n assert_(np.isnan(stats.t.logsf(1, np.nan)))\n assert_(np.isnan(stats.t.sf(1, np.nan)))\n assert_(np.isnan(stats.t.pdf(1, np.nan)))\n assert_(np.isnan(stats.t.logpdf(1, np.nan)))\n assert_(np.isnan(stats.t.ppf(1, np.nan)))\n assert_(np.isnan(stats.t.isf(1, np.nan)))\n\n assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))\n\n\ndef test_frozen_fit_ticket_1536():\n np.random.seed(5678)\n true = np.array([0.25, 0., 0.5])\n x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)\n\n olderr = np.seterr(divide='ignore')\n try:\n params = np.array(stats.lognorm.fit(x, floc=0.))\n finally:\n np.seterr(**olderr)\n\n assert_almost_equal(params, true, decimal=2)\n\n params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))\n assert_almost_equal(params, true, decimal=2)\n\n params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))\n assert_almost_equal(params, true, decimal=2)\n\n params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))\n assert_almost_equal(params, true, decimal=2)\n\n np.random.seed(5678)\n loc = 1\n floc = 0.9\n x = stats.norm.rvs(loc, 2., size=100)\n params = np.array(stats.norm.fit(x, floc=floc))\n expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])\n assert_almost_equal(params, expected, decimal=4)\n\n\ndef test_regression_ticket_1530():\n # Check the starting value works for Cauchy distribution fit.\n np.random.seed(654321)\n rvs = stats.cauchy.rvs(size=100)\n params = stats.cauchy.fit(rvs)\n expected = (0.045, 1.142)\n assert_almost_equal(params, expected, decimal=1)\n\n\ndef test_tukeylambda_stats_ticket_1545():\n # Some test for the variance and kurtosis of the Tukey Lambda distr.\n # See test_tukeylamdba_stats.py for more tests.\n\n mv = stats.tukeylambda.stats(0, moments='mvsk')\n # Known exact values:\n expected = [0, np.pi**2/3, 0, 1.2]\n assert_almost_equal(mv, expected, decimal=10)\n\n mv = stats.tukeylambda.stats(3.13, moments='mvsk')\n # 'expected' computed with mpmath.\n expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]\n assert_almost_equal(mv, expected, decimal=10)\n\n mv = stats.tukeylambda.stats(0.14, moments='mvsk')\n # 'expected' computed with mpmath.\n expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]\n assert_almost_equal(mv, expected, decimal=10)\n\n\ndef test_poisson_logpmf_ticket_1436():\n assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))\n\n\ndef test_powerlaw_stats():\n \"\"\"Test the powerlaw stats function.\n\n This unit test is also a regression test for ticket 1548.\n\n The exact values are:\n mean:\n mu = a / (a + 1)\n variance:\n sigma**2 = a / ((a + 2) * (a + 1) ** 2)\n skewness:\n One formula (see http://en.wikipedia.org/wiki/Skewness) is\n gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3\n A short calculation shows that E[X**k] is a / (a + k), so gamma_1\n can be implemented as\n n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3\n d = sqrt(a/((a+2)*(a+1)**2)) ** 3\n gamma_1 = n/d\n Either by simplifying, or by a direct calculation of mu_3 / sigma**3,\n one gets the more concise formula:\n gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)\n kurtosis: (See http://en.wikipedia.org/wiki/Kurtosis)\n The excess kurtosis is\n gamma_2 = mu_4 / sigma**4 - 3\n A bit of calculus and algebra (sympy helps) shows that\n mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))\n so\n gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3\n which can be rearranged to\n gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))\n \"\"\"\n cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),\n (2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]\n for a, exact_mvsk in cases:\n mvsk = stats.powerlaw.stats(a, moments=\"mvsk\")\n assert_array_almost_equal(mvsk, exact_mvsk)\n\n\ndef test_ksone_fit_freeze():\n # Regression test for ticket #1638.\n d = np.array(\n [-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,\n -0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,\n 0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,\n 0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,\n 0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,\n 0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,\n -0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,\n -0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,\n -0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,\n -0.06037974, 0.37670779, -0.21684405])\n\n olderr = np.seterr(invalid='ignore')\n warn_ctx = WarningManager()\n warn_ctx.__enter__()\n try:\n warnings.simplefilter('ignore', UserWarning)\n warnings.simplefilter('ignore', RuntimeWarning)\n stats.ksone.fit(d)\n finally:\n warn_ctx.__exit__()\n np.seterr(**olderr)\n\n\ndef test_norm_logcdf():\n # Test precision of the logcdf of the normal distribution.\n # This precision was enhanced in ticket 1614.\n x = -np.asarray(list(range(0, 120, 4)))\n # Values from R\n expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,\n -131.69539607, -203.91715537, -292.09872100, -396.25241451,\n -516.38564863, -652.50322759, -804.60844201, -972.70364403,\n -1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,\n -2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,\n -3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,\n -4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,\n -6277.63751711, -6733.67260303]\n\n olderr = np.seterr(divide='ignore')\n try:\n assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)\n finally:\n np.seterr(**olderr)\n\n\ndef test_hypergeom_interval_1802():\n # these two had endless loops\n assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),\n (152.0, 197.0))\n assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),\n (152.0, 197.0))\n # this was working also before\n assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),\n (153.0, 196.0))\n\n # degenerate case .a == .b\n assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)\n assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)\n\n\ndef test_distribution_too_many_args():\n # Check that a TypeError is raised when too many args are given to a method\n # Regression test for ticket 1815.\n x = np.linspace(0.1, 0.7, num=5)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)\n\n # These should not give errors\n stats.gamma.pdf(x, 2, 3) # loc=3\n stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4\n stats.gamma.stats(2., 3)\n stats.gamma.stats(2., 3, 4)\n stats.gamma.stats(2., 3, 4, 'mv')\n stats.gamma.rvs(2., 3, 4, 5)\n stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)\n\n # Also for a discrete distribution\n stats.geom.pmf(x, 2, loc=3) # no error, loc=3\n assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)\n assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)\n\n # And for distributions with 0, 2 and 3 args respectively\n assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)\n assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)\n assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)\n assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)\n assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)\n stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale\n\n\ndef test_ncx2_tails_ticket_955():\n # Trac #955 -- check that the cdf computed by special functions\n # matches the integrated pdf\n a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)\n b = stats.ncx2.veccdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)\n assert_allclose(a, b, rtol=1e-3, atol=0)\n\n\ndef test_foldnorm_zero():\n # Parameter value c=0 was not enabled, see gh-2399.\n rv = stats.foldnorm(0, scale=1)\n assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan\n\n\n## Test subclassing distributions w/ explicit shapes\n\nclass _distr_gen(stats.rv_continuous):\n def _pdf(self, x, a):\n return 42\n\n\nclass _distr2_gen(stats.rv_continuous):\n def _cdf(self, x, a):\n return 42 * a + x\n\n\nclass _distr3_gen(stats.rv_continuous):\n def _pdf(self, x, a, b):\n return a + b\n\n def _cdf(self, x, a):\n \"\"\"Different # of shape params from _pdf, to be able to check that\n inspection catches the inconsistency.\"\"\"\n return 42 * a + x\n\n\nclass _distr6_gen(stats.rv_continuous):\n #Two shape parameters (both _pdf and _cdf defined, consistent shapes.)\n def _pdf(self, x, a, b):\n return a*x + b\n\n def _cdf(self, x, a, b):\n return 42 * a + x\n\n\nclass TestSubclassingExplicitShapes(TestCase):\n \"\"\"Construct a distribution w/ explicit shapes parameter and test it.\"\"\"\n\n def test_correct_shapes(self):\n dummy_distr = _distr_gen(name='dummy', shapes='a')\n assert_equal(dummy_distr.pdf(1, a=1), 42)\n\n def test_wrong_shapes_1(self):\n dummy_distr = _distr_gen(name='dummy', shapes='A')\n assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))\n\n def test_wrong_shapes_2(self):\n dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')\n dct =dict(a=1, b=2, c=3)\n assert_raises(TypeError, dummy_distr.pdf, 1, **dct)\n\n def test_shapes_string(self):\n # shapes must be a string\n dct = dict(name='dummy', shapes=42)\n assert_raises(TypeError, _distr_gen, **dct)\n\n def test_shapes_identifiers_1(self):\n # shapes must be a comma-separated list of valid python identifiers\n dct = dict(name='dummy', shapes='(!)')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_identifiers_2(self):\n dct = dict(name='dummy', shapes='4chan')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_identifiers_3(self):\n dct = dict(name='dummy', shapes='m(fti)')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_identifiers_nodefaults(self):\n dct = dict(name='dummy', shapes='a=2')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_args(self):\n dct = dict(name='dummy', shapes='*args')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_kwargs(self):\n dct = dict(name='dummy', shapes='**kwargs')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_keywords(self):\n # python keywords cannot be used for shape parameters\n dct = dict(name='dummy', shapes='a, b, c, lambda')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n\n def test_shapes_signature(self):\n # test explicit shapes which agree w/ the signature of _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a):\n return stats.norm._pdf(x) * a\n\n dist = _dist_gen(shapes='a')\n assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)\n\n\n def test_shapes_signature_inconsistent(self):\n # test explicit shapes which do not agree w/ the signature of _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a):\n return stats.norm._pdf(x) * a\n\n dist = _dist_gen(shapes='a, b')\n assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))\n\n\n def test_star_args(self):\n # test _pdf with only starargs\n # NB: **kwargs of pdf will never reach _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, *args):\n extra_kwarg = args[0]\n return stats.norm._pdf(x) * extra_kwarg\n\n dist = _dist_gen(shapes='extra_kwarg')\n assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)\n assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)\n assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))\n\n def test_star_args_2(self):\n # test _pdf with named & starargs\n # NB: **kwargs of pdf will never reach _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, offset, *args):\n extra_kwarg = args[0]\n return stats.norm._pdf(x) * extra_kwarg + offset\n\n dist = _dist_gen(shapes='offset, extra_kwarg')\n assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),\n stats.norm.pdf(0.5)*33 + 111)\n assert_equal(dist.pdf(0.5, 111, 33),\n stats.norm.pdf(0.5)*33 + 111)\n\n def test_extra_kwarg(self):\n # **kwargs to _pdf are ignored.\n # this is a limitation of the framework (_pdf(x, *goodargs))\n class _distr_gen(stats.rv_continuous):\n def _pdf(self, x, *args, **kwargs):\n # _pdf should handle *args, **kwargs itself. Here \"handling\" is\n # ignoring *args and looking for ``extra_kwarg`` and using that.\n extra_kwarg = kwargs.pop('extra_kwarg', 1)\n return stats.norm._pdf(x) * extra_kwarg\n\n dist = _distr_gen(shapes='extra_kwarg')\n assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))\n\n\n def shapes_empty_string(self):\n # shapes='' is equivalent to shapes=None\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x):\n return stats.norm.pdf(x)\n\n dist = _dist_gen(shapes='')\n assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))\n\n\nclass TestSubclassingNoShapes(TestCase):\n \"\"\"Construct a distribution w/o explicit shapes parameter and test it.\"\"\"\n\n def test_only__pdf(self):\n dummy_distr = _distr_gen(name='dummy')\n assert_equal(dummy_distr.pdf(1, a=1), 42)\n\n def test_only__cdf(self):\n # _pdf is determined from _cdf by taking numerical derivative\n dummy_distr = _distr2_gen(name='dummy')\n assert_almost_equal(dummy_distr.pdf(1, a=1), 1)\n\n @dec.skipif(DOCSTRINGS_STRIPPED)\n def test_signature_inspection(self):\n # check that _pdf signature inspection works correctly, and is used in\n # the class docstring\n dummy_distr = _distr_gen(name='dummy')\n assert_equal(dummy_distr.numargs, 1)\n assert_equal(dummy_distr.shapes, 'a')\n res = re.findall('logpdf\\(x, a, loc=0, scale=1\\)',\n dummy_distr.__doc__)\n assert_(len(res) == 1)\n\n @dec.skipif(DOCSTRINGS_STRIPPED)\n def test_signature_inspection_2args(self):\n # same for 2 shape params and both _pdf and _cdf defined\n dummy_distr = _distr6_gen(name='dummy')\n assert_equal(dummy_distr.numargs, 2)\n assert_equal(dummy_distr.shapes, 'a, b')\n res = re.findall('logpdf\\(x, a, b, loc=0, scale=1\\)',\n dummy_distr.__doc__)\n assert_(len(res) == 1)\n\n def test_signature_inspection_2args_incorrect_shapes(self):\n # both _pdf and _cdf defined, but shapes are inconsistent: raises\n try:\n _distr3_gen(name='dummy')\n except TypeError:\n pass\n else:\n raise AssertionError('TypeError not raised.')\n\n def test_defaults_raise(self):\n # default arguments should raise\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a=42):\n return 42\n assert_raises(TypeError, _dist_gen, **dict(name='dummy'))\n\n def test_starargs_raise(self):\n # without explicit shapes, *args are not allowed\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a, *args):\n return 42\n assert_raises(TypeError, _dist_gen, **dict(name='dummy'))\n\n def test_kwargs_raise(self):\n # without explicit shapes, **kwargs are not allowed\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a, **kwargs):\n return 42\n assert_raises(TypeError, _dist_gen, **dict(name='dummy'))\n\n\[email protected](DOCSTRINGS_STRIPPED)\ndef test_docstrings():\n badones = [',\\s*,', '\\(\\s*,', '^\\s*:']\n for distname in stats.__all__:\n dist = getattr(stats, distname)\n if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):\n for regex in badones:\n assert_( re.search(regex, dist.__doc__) is None)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"from __future__ import division, absolute_import, print_function\n\nfrom numpy.testing import *\nfrom numpy import array\nfrom numpy.compat import asbytes\nimport util\n\nclass TestReturnCharacter(util.F2PyTest):\n def check_function(self, t):\n tname = t.__doc__.split()[0]\n if tname in ['t0', 't1', 's0', 's1']:\n assert_( t(23)==asbytes('2'))\n r = t('ab');assert_( r==asbytes('a'), repr(r))\n r = t(array('ab'));assert_( r==asbytes('a'), repr(r))\n r = t(array(77, 'u1'));assert_( r==asbytes('M'), repr(r))\n #assert_(_raises(ValueError, t, array([77,87])))\n #assert_(_raises(ValueError, t, array(77)))\n elif tname in ['ts', 'ss']:\n assert_( t(23)==asbytes('23 '), repr(t(23)))\n assert_( t('123456789abcdef')==asbytes('123456789a'))\n elif tname in ['t5', 's5']:\n assert_( t(23)==asbytes('23 '), repr(t(23)))\n assert_( t('ab')==asbytes('ab '), repr(t('ab')))\n assert_( t('123456789abcdef')==asbytes('12345'))\n else:\n raise NotImplementedError\n\nclass TestF77ReturnCharacter(TestReturnCharacter):\n code = \"\"\"\n function t0(value)\n character value\n character t0\n t0 = value\n end\n function t1(value)\n character*1 value\n character*1 t1\n t1 = value\n end\n function t5(value)\n character*5 value\n character*5 t5\n t5 = value\n end\n function ts(value)\n character*(*) value\n character*(*) ts\n ts = value\n end\n\n subroutine s0(t0,value)\n character value\n character t0\ncf2py intent(out) t0\n t0 = value\n end\n subroutine s1(t1,value)\n character*1 value\n character*1 t1\ncf2py intent(out) t1\n t1 = value\n end\n subroutine s5(t5,value)\n character*5 value\n character*5 t5\ncf2py intent(out) t5\n t5 = value\n end\n subroutine ss(ts,value)\n character*(*) value\n character*10 ts\ncf2py intent(out) ts\n ts = value\n end\n \"\"\"\n\n @dec.slow\n def test_all(self):\n for name in \"t0,t1,t5,s0,s1,s5,ss\".split(\",\"):\n self.check_function(getattr(self.module, name))\n\nclass TestF90ReturnCharacter(TestReturnCharacter):\n suffix = \".f90\"\n code = \"\"\"\nmodule f90_return_char\n contains\n function t0(value)\n character :: value\n character :: t0\n t0 = value\n end function t0\n function t1(value)\n character(len=1) :: value\n character(len=1) :: t1\n t1 = value\n end function t1\n function t5(value)\n character(len=5) :: value\n character(len=5) :: t5\n t5 = value\n end function t5\n function ts(value)\n character(len=*) :: value\n character(len=10) :: ts\n ts = value\n end function ts\n\n subroutine s0(t0,value)\n character :: value\n character :: t0\n!f2py intent(out) t0\n t0 = value\n end subroutine s0\n subroutine s1(t1,value)\n character(len=1) :: value\n character(len=1) :: t1\n!f2py intent(out) t1\n t1 = value\n end subroutine s1\n subroutine s5(t5,value)\n character(len=5) :: value\n character(len=5) :: t5\n!f2py intent(out) t5\n t5 = value\n end subroutine s5\n subroutine ss(ts,value)\n character(len=*) :: value\n character(len=10) :: ts\n!f2py intent(out) ts\n ts = value\n end subroutine ss\nend module f90_return_char\n \"\"\"\n\n @dec.slow\n def test_all(self):\n for name in \"t0,t1,t5,ts,s0,s1,s5,ss\".split(\",\"):\n self.check_function(getattr(self.module.f90_return_char, name))\n\nif __name__ == \"__main__\":\n import nose\n nose.runmodule()\n",
"# Author: Travis Oliphant\n# 2003\n#\n# Feb. 2010: Updated by Warren Weckesser:\n# Rewrote much of chirp()\n# Added sweep_poly()\nfrom __future__ import division, print_function, absolute_import\n\n\nfrom numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \\\n exp, cos, sin, polyval, polyint\n\n__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']\n\n\ndef sawtooth(t, width=1):\n \"\"\"\n Return a periodic sawtooth or triangle waveform.\n\n The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the\n interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval\n ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].\n\n Note that this is not band-limited. It produces an infinite number\n of harmonics, which are aliased back and forth across the frequency\n spectrum.\n\n Parameters\n ----------\n t : array_like\n Time.\n width : array_like, optional\n Width of the rising ramp as a proportion of the total cycle.\n Default is 1, producing a rising ramp, while 0 produces a falling\n ramp. `t` = 0.5 produces a triangle wave.\n If an array, causes wave shape to change over time, and must be the\n same length as t.\n\n Returns\n -------\n y : ndarray\n Output array containing the sawtooth waveform.\n\n Examples\n --------\n A 5 Hz waveform sampled at 500 Hz for 1 second:\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n >>> t = np.linspace(0, 1, 500)\n >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))\n\n \"\"\"\n t, w = asarray(t), asarray(width)\n w = asarray(w + (t - t))\n t = asarray(t + (w - w))\n if t.dtype.char in ['fFdD']:\n ytype = t.dtype.char\n else:\n ytype = 'd'\n y = zeros(t.shape, ytype)\n\n # width must be between 0 and 1 inclusive\n mask1 = (w > 1) | (w < 0)\n place(y, mask1, nan)\n\n # take t modulo 2*pi\n tmod = mod(t, 2 * pi)\n\n # on the interval 0 to width*2*pi function is\n # tmod / (pi*w) - 1\n mask2 = (1 - mask1) & (tmod < w * 2 * pi)\n tsub = extract(mask2, tmod)\n wsub = extract(mask2, w)\n place(y, mask2, tsub / (pi * wsub) - 1)\n\n # on the interval width*2*pi to 2*pi function is\n # (pi*(w+1)-tmod) / (pi*(1-w))\n\n mask3 = (1 - mask1) & (1 - mask2)\n tsub = extract(mask3, tmod)\n wsub = extract(mask3, w)\n place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))\n return y\n\n\ndef square(t, duty=0.5):\n \"\"\"\n Return a periodic square-wave waveform.\n\n The square wave has a period ``2*pi``, has value +1 from 0 to\n ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in\n the interval [0,1].\n\n Note that this is not band-limited. It produces an infinite number\n of harmonics, which are aliased back and forth across the frequency\n spectrum.\n\n Parameters\n ----------\n t : array_like\n The input time array.\n duty : array_like, optional\n Duty cycle. Default is 0.5 (50% duty cycle).\n If an array, causes wave shape to change over time, and must be the\n same length as t.\n\n Returns\n -------\n y : ndarray\n Output array containing the square waveform.\n\n Examples\n --------\n A 5 Hz waveform sampled at 500 Hz for 1 second:\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n >>> t = np.linspace(0, 1, 500, endpoint=False)\n >>> plt.plot(t, signal.square(2 * np.pi * 5 * t))\n >>> plt.ylim(-2, 2)\n\n A pulse-width modulated sine wave:\n\n >>> plt.figure()\n >>> sig = np.sin(2 * np.pi * t)\n >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)\n >>> plt.subplot(2, 1, 1)\n >>> plt.plot(t, sig)\n >>> plt.subplot(2, 1, 2)\n >>> plt.plot(t, pwm)\n >>> plt.ylim(-1.5, 1.5)\n\n \"\"\"\n t, w = asarray(t), asarray(duty)\n w = asarray(w + (t - t))\n t = asarray(t + (w - w))\n if t.dtype.char in ['fFdD']:\n ytype = t.dtype.char\n else:\n ytype = 'd'\n\n y = zeros(t.shape, ytype)\n\n # width must be between 0 and 1 inclusive\n mask1 = (w > 1) | (w < 0)\n place(y, mask1, nan)\n\n # on the interval 0 to duty*2*pi function is 1\n tmod = mod(t, 2 * pi)\n mask2 = (1 - mask1) & (tmod < w * 2 * pi)\n place(y, mask2, 1)\n\n # on the interval duty*2*pi to 2*pi function is\n # (pi*(w+1)-tmod) / (pi*(1-w))\n mask3 = (1 - mask1) & (1 - mask2)\n place(y, mask3, -1)\n return y\n\n\ndef gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,\n retenv=False):\n \"\"\"\n Return a Gaussian modulated sinusoid:\n\n ``exp(-a t^2) exp(1j*2*pi*fc*t).``\n\n If `retquad` is True, then return the real and imaginary parts\n (in-phase and quadrature).\n If `retenv` is True, then return the envelope (unmodulated signal).\n Otherwise, return the real part of the modulated sinusoid.\n\n Parameters\n ----------\n t : ndarray or the string 'cutoff'\n Input array.\n fc : int, optional\n Center frequency (e.g. Hz). Default is 1000.\n bw : float, optional\n Fractional bandwidth in frequency domain of pulse (e.g. Hz).\n Default is 0.5.\n bwr : float, optional\n Reference level at which fractional bandwidth is calculated (dB).\n Default is -6.\n tpr : float, optional\n If `t` is 'cutoff', then the function returns the cutoff\n time for when the pulse amplitude falls below `tpr` (in dB).\n Default is -60.\n retquad : bool, optional\n If True, return the quadrature (imaginary) as well as the real part\n of the signal. Default is False.\n retenv : bool, optional\n If True, return the envelope of the signal. Default is False.\n\n Returns\n -------\n yI : ndarray\n Real part of signal. Always returned.\n yQ : ndarray\n Imaginary part of signal. Only returned if `retquad` is True.\n yenv : ndarray\n Envelope of signal. Only returned if `retenv` is True.\n\n See Also\n --------\n scipy.signal.morlet\n\n Examples\n --------\n Plot real component, imaginary component, and envelope for a 5 Hz pulse,\n sampled at 100 Hz for 2 seconds:\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)\n >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)\n >>> plt.plot(t, i, t, q, t, e, '--')\n\n \"\"\"\n if fc < 0:\n raise ValueError(\"Center frequency (fc=%.2f) must be >=0.\" % fc)\n if bw <= 0:\n raise ValueError(\"Fractional bandwidth (bw=%.2f) must be > 0.\" % bw)\n if bwr >= 0:\n raise ValueError(\"Reference level for bandwidth (bwr=%.2f) must \"\n \"be < 0 dB\" % bwr)\n\n # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)\n\n ref = pow(10.0, bwr / 20.0)\n # fdel = fc*bw/2: g(fdel) = ref --- solve this for a\n #\n # pi^2/a * fc^2 * bw^2 /4=-log(ref)\n a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))\n\n if t == 'cutoff': # compute cut_off point\n # Solve exp(-a tc**2) = tref for tc\n # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)\n if tpr >= 0:\n raise ValueError(\"Reference level for time cutoff must be < 0 dB\")\n tref = pow(10.0, tpr / 20.0)\n return sqrt(-log(tref) / a)\n\n yenv = exp(-a * t * t)\n yI = yenv * cos(2 * pi * fc * t)\n yQ = yenv * sin(2 * pi * fc * t)\n if not retquad and not retenv:\n return yI\n if not retquad and retenv:\n return yI, yenv\n if retquad and not retenv:\n return yI, yQ\n if retquad and retenv:\n return yI, yQ, yenv\n\n\ndef chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):\n \"\"\"Frequency-swept cosine generator.\n\n In the following, 'Hz' should be interpreted as 'cycles per unit';\n there is no requirement here that the unit is one second. The\n important distinction is that the units of rotation are cycles, not\n radians. Likewise, `t` could be a measurement of space instead of time.\n\n Parameters\n ----------\n t : ndarray\n Times at which to evaluate the waveform.\n f0 : float\n Frequency (e.g. Hz) at time t=0.\n t1 : float\n Time at which `f1` is specified.\n f1 : float\n Frequency (e.g. Hz) of the waveform at time `t1`.\n method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional\n Kind of frequency sweep. If not given, `linear` is assumed. See\n Notes below for more details.\n phi : float, optional\n Phase offset, in degrees. Default is 0.\n vertex_zero : bool, optional\n This parameter is only used when `method` is 'quadratic'.\n It determines whether the vertex of the parabola that is the graph\n of the frequency is at t=0 or t=t1.\n\n Returns\n -------\n y : ndarray\n A numpy array containing the signal evaluated at `t` with the\n requested time-varying frequency. More precisely, the function\n returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral\n (from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.\n\n See Also\n --------\n sweep_poly\n\n Notes\n -----\n There are four options for the `method`. The following formulas give\n the instantaneous frequency (in Hz) of the signal generated by\n `chirp()`. For convenience, the shorter names shown below may also be\n used.\n\n linear, lin, li:\n\n ``f(t) = f0 + (f1 - f0) * t / t1``\n\n quadratic, quad, q:\n\n The graph of the frequency f(t) is a parabola through (0, f0) and\n (t1, f1). By default, the vertex of the parabola is at (0, f0).\n If `vertex_zero` is False, then the vertex is at (t1, f1). The\n formula is:\n\n if vertex_zero is True:\n\n ``f(t) = f0 + (f1 - f0) * t**2 / t1**2``\n\n else:\n\n ``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``\n\n To use a more general quadratic function, or an arbitrary\n polynomial, use the function `scipy.signal.waveforms.sweep_poly`.\n\n logarithmic, log, lo:\n\n ``f(t) = f0 * (f1/f0)**(t/t1)``\n\n f0 and f1 must be nonzero and have the same sign.\n\n This signal is also known as a geometric or exponential chirp.\n\n hyperbolic, hyp:\n\n ``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``\n\n f1 must be positive, and f0 must be greater than f1.\n\n \"\"\"\n # 'phase' is computed in _chirp_phase, to make testing easier.\n phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)\n # Convert phi to radians.\n phi *= pi / 180\n return cos(phase + phi)\n\n\ndef _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):\n \"\"\"\n Calculate the phase used by chirp_phase to generate its output.\n\n See `chirp_phase` for a description of the arguments.\n\n \"\"\"\n f0 = float(f0)\n t1 = float(t1)\n f1 = float(f1)\n if method in ['linear', 'lin', 'li']:\n beta = (f1 - f0) / t1\n phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)\n\n elif method in ['quadratic', 'quad', 'q']:\n beta = (f1 - f0) / (t1 ** 2)\n if vertex_zero:\n phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)\n else:\n phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)\n\n elif method in ['logarithmic', 'log', 'lo']:\n if f0 * f1 <= 0.0:\n raise ValueError(\"For a geometric chirp, f0 and f1 must be \"\n \"nonzero and have the same sign.\")\n if f0 == f1:\n phase = 2 * pi * f0 * t\n else:\n beta = t1 / log(f1 / f0)\n phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)\n\n elif method in ['hyperbolic', 'hyp']:\n if f1 <= 0.0 or f0 <= f1:\n raise ValueError(\"hyperbolic chirp requires f0 > f1 > 0.0.\")\n c = f1 * t1\n df = f0 - f1\n phase = 2 * pi * (f0 * c / df) * log((df * t + c) / c)\n\n else:\n raise ValueError(\"method must be 'linear', 'quadratic', 'logarithmic',\"\n \" or 'hyperbolic', but a value of %r was given.\" % method)\n\n return phase\n\n\ndef sweep_poly(t, poly, phi=0):\n \"\"\"\n Frequency-swept cosine generator, with a time-dependent frequency.\n\n This function generates a sinusoidal function whose instantaneous\n frequency varies with time. The frequency at time `t` is given by\n the polynomial `poly`.\n\n Parameters\n ----------\n t : ndarray\n Times at which to evaluate the waveform.\n poly : 1-D array-like or instance of numpy.poly1d\n The desired frequency expressed as a polynomial. If `poly` is\n a list or ndarray of length n, then the elements of `poly` are\n the coefficients of the polynomial, and the instantaneous\n frequency is\n\n ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``\n\n If `poly` is an instance of numpy.poly1d, then the\n instantaneous frequency is\n\n ``f(t) = poly(t)``\n\n phi : float, optional\n Phase offset, in degrees, Default: 0.\n\n Returns\n -------\n sweep_poly : ndarray\n A numpy array containing the signal evaluated at `t` with the\n requested time-varying frequency. More precisely, the function\n returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral\n (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.\n\n See Also\n --------\n chirp\n\n Notes\n -----\n .. versionadded:: 0.8.0\n\n If `poly` is a list or ndarray of length `n`, then the elements of\n `poly` are the coefficients of the polynomial, and the instantaneous\n frequency is:\n\n ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``\n\n If `poly` is an instance of `numpy.poly1d`, then the instantaneous\n frequency is:\n\n ``f(t) = poly(t)``\n\n Finally, the output `s` is:\n\n ``cos(phase + (pi/180)*phi)``\n\n where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,\n ``f(t)`` as defined above.\n\n \"\"\"\n # 'phase' is computed in _sweep_poly_phase, to make testing easier.\n phase = _sweep_poly_phase(t, poly)\n # Convert to radians.\n phi *= pi / 180\n return cos(phase + phi)\n\n\ndef _sweep_poly_phase(t, poly):\n \"\"\"\n Calculate the phase used by sweep_poly to generate its output.\n\n See `sweep_poly` for a description of the arguments.\n\n \"\"\"\n # polyint handles lists, ndarrays and instances of poly1d automatically.\n intpoly = polyint(poly)\n phase = 2 * pi * polyval(intpoly, t)\n return phase\n",
"from __future__ import division, print_function, absolute_import\n\n__all__ = ['imread']\n\nfrom numpy import array\n\n\ndef imread(fname, flatten=False, mode=None):\n \"\"\"\n Load an image from file.\n\n Parameters\n ----------\n fname : str\n Image file name, e.g. ``test.jpg``.\n flatten : bool, optional\n If true, convert the output to grey-scale. Default is False.\n mode : str, optional\n mode to convert image to, e.g. ``RGB``.\n\n\n Returns\n -------\n img_array : ndarray\n The different colour bands/channels are stored in the\n third dimension, such that a grey-image is MxN, an\n RGB-image MxNx3 and an RGBA-image MxNx4.\n\n Raises\n ------\n ImportError\n If the Python Imaging Library (PIL) can not be imported.\n\n \"\"\"\n try:\n from PIL import Image\n except ImportError:\n raise ImportError(\"Could not import the Python Imaging Library (PIL)\"\n \" required to load image files. Please refer to\"\n \" http://pypi.python.org/pypi/PIL/ for installation\"\n \" instructions.\")\n\n with open(fname, \"rb\") as fp:\n im = Image.open(fp)\n if mode:\n im = im.convert(mode)\n if flatten:\n im = im.convert('F')\n result = array(im)\n return result\n",
"\"\"\"benchmarks for the scipy.sparse.linalg._expm_multiply module\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport time\n\nimport numpy as np\nfrom numpy.testing import (Tester, TestCase, assert_allclose, run_module_suite)\n\nimport scipy.linalg\nfrom scipy.sparse.linalg import expm_multiply\n\n\ndef random_sparse(m, n, nnz_per_row):\n # Copied from the scipy.sparse benchmark.\n rows = np.arange(m).repeat(nnz_per_row)\n cols = np.random.random_integers(low=0, high=n-1, size=nnz_per_row*m)\n vals = np.random.random_sample(m*nnz_per_row)\n M = scipy.sparse.coo_matrix((vals,(rows,cols)), (m,n), dtype=float)\n return M.tocsr()\n\n\nclass BenchmarkExpmMultiply(TestCase):\n\n def _help_bench_expm_multiply(self, A, i, j):\n n = A.shape[0]\n print('converting the sparse matrix to a dense array...')\n tm_start = time.clock()\n A_dense = A.toarray()\n tm_end = time.clock()\n print(tm_end - tm_start, ' seconds')\n print()\n print('computing full expm of the dense array...')\n tm_start = time.clock()\n A_expm = scipy.linalg.expm(A_dense)\n full_expm_entry = A_expm[i, j]\n tm_end = time.clock()\n print('expm(A)[%d, %d]:' % (i, j), full_expm_entry)\n print(tm_end - tm_start, ' seconds')\n print()\n print('computing only column', j, 'of expm of the sparse matrix...')\n tm_start = time.clock()\n v = np.zeros(n, dtype=float)\n v[j] = 1\n A_expm_col_j = expm_multiply(A, v)\n expm_col_entry = A_expm_col_j[i]\n tm_end = time.clock()\n print('expm(A)[%d, %d]:' % (i, j), expm_col_entry)\n print(tm_end - tm_start, ' seconds')\n print()\n if np.allclose(full_expm_entry, expm_col_entry):\n print('The two methods give the same answer.')\n else:\n print('!!! The two methods give different answers. !!!')\n print()\n\n def bench_expm_multiply(self):\n np.random.seed(1234)\n n = 2000\n i = 100\n j = 200\n shape = (n, n)\n nnz_per_row = 25\n print()\n print('expm multiply benchmarking')\n print('--------------------------')\n print()\n print('sampling a random sparse matrix...')\n print('shape:', shape)\n print('nnz per row:', nnz_per_row)\n tm_start = time.clock()\n A = random_sparse(n, n, nnz_per_row)\n tm_end = time.clock()\n print(tm_end - tm_start, ' seconds')\n print()\n self._help_bench_expm_multiply(A, i, j)\n print()\n\n\nif __name__ == '__main__':\n Tester().bench()\n",
"\"\"\"\n====================================\nLinear algebra (:mod:`scipy.linalg`)\n====================================\n\n.. currentmodule:: scipy.linalg\n\nLinear algebra functions.\n\n.. seealso::\n\n `numpy.linalg` for more linear algebra functions. Note that\n although `scipy.linalg` imports most of them, identically named\n functions from `scipy.linalg` may offer more or slightly differing\n functionality.\n\n\nBasics\n======\n\n.. autosummary::\n :toctree: generated/\n\n inv - Find the inverse of a square matrix\n solve - Solve a linear system of equations\n solve_banded - Solve a banded linear system\n solveh_banded - Solve a Hermitian or symmetric banded system\n solve_triangular - Solve a triangular matrix\n det - Find the determinant of a square matrix\n norm - Matrix and vector norm\n lstsq - Solve a linear least-squares problem\n pinv - Pseudo-inverse (Moore-Penrose) using lstsq\n pinv2 - Pseudo-inverse using svd\n pinvh - Pseudo-inverse of hermitian matrix\n kron - Kronecker product of two arrays\n tril - Construct a lower-triangular matrix from a given matrix\n triu - Construct an upper-triangular matrix from a given matrix\n\nEigenvalue Problems\n===================\n\n.. autosummary::\n :toctree: generated/\n\n eig - Find the eigenvalues and eigenvectors of a square matrix\n eigvals - Find just the eigenvalues of a square matrix\n eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix\n eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix\n eig_banded - Find the eigenvalues and eigenvectors of a banded matrix\n eigvals_banded - Find just the eigenvalues of a banded matrix\n\nDecompositions\n==============\n\n.. autosummary::\n :toctree: generated/\n\n lu - LU decomposition of a matrix\n lu_factor - LU decomposition returning unordered matrix and pivots\n lu_solve - Solve Ax=b using back substitution with output of lu_factor\n svd - Singular value decomposition of a matrix\n svdvals - Singular values of a matrix\n diagsvd - Construct matrix of singular values from output of svd\n orth - Construct orthonormal basis for the range of A using svd\n cholesky - Cholesky decomposition of a matrix\n cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix\n cho_factor - Cholesky decomposition for use in solving a linear system\n cho_solve - Solve previously factored linear system\n cho_solve_banded - Solve previously factored banded linear system\n polar - Compute the polar decomposition.\n qr - QR decomposition of a matrix\n qr_multiply - QR decomposition and multiplication by Q\n qz - QZ decomposition of a pair of matrices\n schur - Schur decomposition of a matrix\n rsf2csf - Real to complex Schur form\n hessenberg - Hessenberg form of a matrix\n\n.. seealso::\n\n `scipy.linalg.interpolative` -- Interpolative matrix decompositions\n\n\nMatrix Functions\n================\n\n.. autosummary::\n :toctree: generated/\n\n expm - Matrix exponential\n logm - Matrix logarithm\n cosm - Matrix cosine\n sinm - Matrix sine\n tanm - Matrix tangent\n coshm - Matrix hyperbolic cosine\n sinhm - Matrix hyperbolic sine\n tanhm - Matrix hyperbolic tangent\n signm - Matrix sign\n sqrtm - Matrix square root\n funm - Evaluating an arbitrary matrix function\n expm_frechet - Frechet derivative of the matrix exponential\n fractional_matrix_power - Fractional matrix power\n\n\nMatrix Equation Solvers\n=======================\n\n.. autosummary::\n :toctree: generated/\n\n solve_sylvester - Solve the Sylvester matrix equation\n solve_continuous_are - Solve the continuous-time algebraic Riccati equation\n solve_discrete_are - Solve the discrete-time algebraic Riccati equation\n solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation\n solve_lyapunov - Solve the (continous-time) Lyapunov equation\n\n\nSpecial Matrices\n================\n\n.. autosummary::\n :toctree: generated/\n\n block_diag - Construct a block diagonal matrix from submatrices\n circulant - Circulant matrix\n companion - Companion matrix\n hadamard - Hadamard matrix of order 2**n\n hankel - Hankel matrix\n hilbert - Hilbert matrix\n invhilbert - Inverse Hilbert matrix\n leslie - Leslie matrix\n pascal - Pascal matrix\n toeplitz - Toeplitz matrix\n tri - Construct a matrix filled with ones at and below a given diagonal\n\nLow-level routines\n==================\n\n.. autosummary::\n :toctree: generated/\n\n get_blas_funcs\n get_lapack_funcs\n find_best_blas_type\n\n.. seealso::\n\n `scipy.linalg.blas` -- Low-level BLAS functions\n\n `scipy.linalg.lapack` -- Low-level LAPACK functions\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom .linalg_version import linalg_version as __version__\n\nfrom .misc import *\nfrom .basic import *\nfrom .decomp import *\nfrom .decomp_lu import *\nfrom .decomp_cholesky import *\nfrom .decomp_qr import *\nfrom ._decomp_qz import *\nfrom .decomp_svd import *\nfrom .decomp_schur import *\nfrom ._decomp_polar import *\nfrom .matfuncs import *\nfrom .blas import *\nfrom .lapack import *\nfrom .special_matrices import *\nfrom ._solvers import *\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n\nfrom numpy.dual import register_func\nfor k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals',\n 'eigvalsh', 'lstsq', 'cholesky']:\n try:\n register_func(k, eval(k))\n except ValueError:\n pass\n\ntry:\n register_func('pinv', pinv2)\nexcept ValueError:\n pass\n\ndel k, register_func\n\nfrom numpy.testing import Tester\ntest = Tester().test\nbench = Tester().bench\n"
] | [
[
"numpy.distutils.npy_pkg_config.read_config",
"numpy.distutils.core.Extension",
"numpy.distutils.compat.get_exception",
"numpy.numarray.util.get_numarray_include_dirs",
"numpy.distutils.system_info.system_info.saved_results.items",
"numpy.distutils.core.get_distribution",
"numpy.get_include"
],
[
"numpy.power",
"numpy.asarray",
"scipy.odr.__odrpack._set_exceptions",
"numpy.dual.inv",
"numpy.ravel",
"numpy.zeros"
],
[
"scipy.sparse.isspmatrix",
"scipy.sparse.coo_matrix",
"numpy.sqrt",
"numpy.asarray",
"numpy.issubdtype",
"numpy.concatenate"
],
[
"numpy.testing.assert_equal",
"numpy.testing.run_module_suite",
"numpy.arange",
"numpy.tile",
"numpy.testing.assert_array_equal",
"scipy.interpolate.griddata",
"numpy.array",
"numpy.testing.assert_allclose"
],
[
"numpy.testing.Tester"
],
[
"scipy.sparse.isspmatrix",
"numpy.asarray",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.ma.is_masked",
"scipy.sparse.isspmatrix_csc"
],
[
"numpy.distutils.misc_util.Configuration",
"numpy.distutils.core.setup"
],
[
"numpy.allclose",
"numpy.testing.assert_",
"scipy.linalg.norm",
"numpy.array",
"scipy.sparse.linalg.interface.LinearOperator",
"numpy.zeros"
],
[
"scipy.sparse.construct.eye",
"scipy.sparse.base.isspmatrix",
"numpy.log2",
"scipy.linalg.basic.solve",
"scipy.sparse.linalg.spsolve",
"numpy.eye",
"numpy.linalg.norm",
"numpy.sinh",
"numpy.ceil",
"scipy.linalg.basic.solve_triangular",
"numpy.exp",
"numpy.sum",
"numpy.tril"
],
[
"numpy.dot",
"numpy.testing.run_module_suite",
"numpy.arange",
"scipy.optimize.nnls",
"numpy.testing.assert_"
],
[
"numpy.testing.assert_equal",
"scipy.weave.ext_tools.ext_function",
"scipy.weave.ext_tools.ext_module",
"numpy.numerix.arange",
"scipy.weave.ext_tools.ext_function_from_specs",
"scipy.weave.c_spec.int_converter",
"numpy.testing.assert_",
"scipy.weave.standard_array_spec.array_converter"
],
[
"numpy.deprecate",
"numpy.matrix",
"numpy.amax",
"numpy.dot",
"numpy.diag",
"numpy.allclose",
"numpy.isfinite",
"numpy.absolute",
"numpy.asarray",
"numpy.eye",
"numpy.finfo",
"numpy.real",
"numpy.identity",
"numpy.exp",
"numpy.conjugate"
],
[
"scipy.stats.norm.ppf",
"scipy.stats.bernoulli.logpmf",
"numpy.sqrt",
"numpy.all",
"scipy.stats.norm.expect",
"numpy.where",
"scipy.stats.t.pdf",
"scipy.stats.geom.sf",
"scipy.stats.gamma.fit",
"scipy.stats.hypergeom.interval",
"scipy.stats.norm.fit",
"scipy.stats.nbinom.logpmf",
"scipy.stats.logser.rvs",
"scipy.stats.zipf",
"numpy.log",
"scipy.stats.poisson.logpmf",
"scipy.stats.beta",
"numpy.testing.assert_raises",
"numpy.floor",
"numpy.array",
"scipy.stats.bernoulli.logsf",
"numpy.sum",
"scipy.special.psi",
"scipy.stats.t.logsf",
"scipy.stats.betaprime",
"scipy.stats.chi2.pdf",
"scipy.stats.hypergeom.rvs",
"scipy.stats.expon.cdf",
"scipy.stats.distributions.gamma_gen",
"numpy.testing.assert_array_equal",
"scipy.stats.t.ppf",
"scipy.stats.gamma.rvs",
"scipy.stats.entropy",
"scipy.stats.hypergeom.stats",
"scipy.stats.bernoulli.ppf",
"numpy.shape",
"scipy.stats.randint.pmf",
"scipy.stats.vonmises",
"scipy.stats.beta.logpdf",
"scipy.stats.truncnorm.rvs",
"scipy.stats.bernoulli.isf",
"scipy.stats.tukeylambda.stats",
"scipy.stats.gamma",
"scipy.stats.pearson3",
"scipy.stats.ncf.pdf",
"scipy.stats.rv_continuous",
"scipy.stats.pareto.stats",
"scipy.stats.t.isf",
"numpy.seterr",
"scipy.stats.foldnorm",
"numpy.testing.utils.WarningManager",
"scipy.stats.nbinom",
"scipy.stats.cauchy.fit",
"scipy.stats.beta.expect",
"scipy.stats.ksone.fit",
"numpy.size",
"numpy.testing.rand",
"scipy.stats.ksoneisf",
"scipy.stats.bernoulli.logcdf",
"scipy.stats.binom.pmf",
"scipy.stats.norm._pdf",
"scipy.stats.hypergeom.sf",
"scipy.stats.norm.rvs",
"scipy.stats.binom",
"numpy.testing.assert_",
"numpy.errstate",
"scipy.stats.exponpow.cdf",
"scipy.special.xlogy",
"scipy.stats.rayleigh.__doc__.lower",
"scipy.stats.kstest",
"numpy.testing.run_module_suite",
"scipy.stats.norm.pdf",
"scipy.stats.bernoulli.cdf",
"numpy.ones",
"scipy.stats.hypergeom",
"scipy.stats.pearson3.pdf",
"scipy.stats.pearson3.rvs",
"scipy.stats.norm",
"scipy.stats.t.logpdf",
"scipy.stats.gamma.stats",
"scipy.stats.poisson",
"scipy.stats.skellam.cdf",
"scipy.stats.truncnorm.ppf",
"scipy.stats.hypergeom.expect",
"numpy.linspace",
"scipy.stats.geom.pmf",
"scipy.stats.randint.cdf",
"scipy.stats.expon.pdf",
"scipy.stats.expon.sf",
"scipy.stats.bernoulli.rvs",
"scipy.stats.nbinom.rvs",
"scipy.stats.gamma.logpdf",
"scipy.stats.geom.rvs",
"numpy.testing.assert_equal",
"scipy.stats.tukeylambda.pdf",
"scipy.stats.exponpow.sf",
"scipy.special.digamma",
"scipy.stats.beta.ppf",
"scipy.stats.t.cdf",
"scipy.stats.dlaplace",
"numpy.testing.assert_array_almost_equal",
"scipy.stats.nbinom.pmf",
"numpy.isnan",
"scipy.stats.lognorm",
"numpy.testing.assert_allclose",
"scipy.stats.pearson3.cdf",
"scipy.stats.beta.fit",
"scipy.stats.t.sf",
"scipy.stats.bernoulli.__doc__.lower",
"scipy.stats.t.logcdf",
"scipy.stats.geom.logpmf",
"scipy.stats.distributions.argsreduce",
"scipy.stats.poisson.stats",
"scipy.stats.geom",
"numpy.testing.dec.skipif",
"scipy.stats.poisson.expect",
"scipy.stats.truncnorm.isf",
"scipy.stats.lognorm.rvs",
"numpy.select",
"scipy.stats.binom.rvs",
"scipy.stats.poisson.cdf",
"scipy.stats.bernoulli.sf",
"numpy.arange",
"scipy.stats.powerlaw.stats",
"scipy.stats.cauchy.rvs",
"scipy.stats.skellam.pmf",
"numpy.testing.assert_almost_equal",
"scipy.stats.logser",
"scipy.stats.randint.rvs",
"scipy.stats.nct",
"scipy.stats.t.std",
"scipy.stats.randint",
"scipy.stats.hypergeom.ppf",
"scipy.stats.lognorm.fit",
"scipy.stats.hypergeom.pmf",
"scipy.stats.loggamma.stats",
"scipy.stats.geom.cdf",
"scipy.stats.zipf.rvs",
"scipy.stats.dlaplace.rvs",
"numpy.random.seed",
"scipy.stats.poisson.rvs",
"scipy.stats.bernoulli.pmf",
"scipy.stats.gamma.pdf",
"scipy.stats.lognorm.pdf",
"scipy.stats.rv_discrete",
"scipy.stats.bernoulli"
],
[
"numpy.array",
"numpy.compat.asbytes"
],
[
"numpy.log",
"numpy.asarray",
"numpy.polyint",
"numpy.cos",
"numpy.sin",
"numpy.exp",
"numpy.mod",
"numpy.polyval",
"numpy.extract",
"numpy.zeros",
"numpy.place"
],
[
"numpy.array"
],
[
"scipy.sparse.linalg.expm_multiply",
"numpy.allclose",
"numpy.random.seed",
"numpy.arange",
"numpy.random.random_sample",
"numpy.random.random_integers",
"numpy.testing.Tester",
"numpy.zeros"
],
[
"numpy.dual.register_func",
"numpy.testing.Tester"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.12"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VEDANTGHODKE/Swayatta-Autonomous-Driver-Assistance-System-ADAS-For-Indian-Environments | [
"7f0361c0f52e4e7623d975725497648cf582f36f"
] | [
"Swayatta - Autonomous Car Follower System/src/synchronous_mode.py"
] | [
"#!/usr/bin/env python\n\n# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de\n# Barcelona (UAB).\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\nimport glob\nimport os\nimport sys\nfrom CarDetector import CarDetector\nfrom DrivingControl import DrivingControl\nfrom VizualizeDrivingPath import VizualizeDrivingPath\nfrom PurePursuitAlgorithm import PurePursuitAlgorithm\nfrom SemanticSegmentation import SemanticSegmentation\nfrom DrivingControlAdvanced import DrivingControlAdvanced\nimport math\nimport pickle\n\ntry:\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n\nimport carla\n\nimport random\n\n# try:\nimport pygame\n# except ImportError:\n# raise RuntimeError('cannot import pygame, make sure pygame package is installed')\n\n# try:\nimport numpy as np\n# except ImportError:\n# raise RuntimeError('cannot import numpy, make sure numpy package is installed')\n\n# try:\nimport queue\n# except ImportError:\n# import Queue as queue\n\n\nclass CarlaSyncMode(object):\n \"\"\"\n Context manager to synchronize output from different sensors. Synchronous\n mode is enabled as long as we are inside this context\n\n with CarlaSyncMode(world, sensors) as sync_mode:\n while True:\n data = sync_mode.tick(timeout=1.0)\n\n \"\"\"\n\n def __init__(self, world, *sensors, **kwargs):\n self.world = world\n self.sensors = sensors\n self.frame = None\n self.delta_seconds = 1.0 / kwargs.get('fps', 20)\n self._queues = []\n self._settings = None\n\n def __enter__(self):\n self._settings = self.world.get_settings()\n self.frame = self.world.apply_settings(carla.WorldSettings(\n no_rendering_mode=False,\n synchronous_mode=True,\n fixed_delta_seconds=self.delta_seconds))\n\n def make_queue(register_event):\n q = queue.Queue()\n register_event(q.put)\n self._queues.append(q)\n\n make_queue(self.world.on_tick)\n for sensor in self.sensors:\n make_queue(sensor.listen)\n return self\n\n def tick(self, timeout):\n self.frame = self.world.tick()\n data = [self._retrieve_data(q, timeout) for q in self._queues]\n assert all(x.frame == self.frame for x in data)\n return data\n\n def __exit__(self, *args, **kwargs):\n self.world.apply_settings(self._settings)\n\n def _retrieve_data(self, sensor_queue, timeout):\n while True:\n data = sensor_queue.get(timeout=timeout)\n if data.frame == self.frame:\n return data\n\n\ndef BresenhamLine(x0,y0, x1,y1):\n if x0 > x1:\n tmpX = x1\n tmpY = y1\n x1 = x0\n x0 = tmpX\n y1 = y0\n y0 = tmpY\n\n coords = []\n dx = x1 - x0\n dy = abs(y1 - y0)\n D = 2*dy - dx\n y = y0\n\n for x in range(x0,x1+1):\n coords.append([x,y])\n if D > 0:\n y = y + (1 if y1 >= y0 else -1)\n D = D - 2*dx\n D = D + 2*dy\n return coords\n\nimport os\ndef myPrint(angle,predicted_angle, possibleAngle,real_dist, predicted_distance, chaseMode=True):\n return\n os.system('clear')\n if chaseMode == True:\n print('----- Chase mode -----')\n else:\n print('----- Follow mode -----')\n if chaseMode == False:\n print('The predicted angle is between the chasing car and some point in the trajectory.')\n print('Real angle:',angle)\n print('Predicted angle:',predicted_angle)\n print('Possible angle:',possibleAngle)\n print('Real distance:',real_dist)\n print('Predicted distance:',predicted_distance)\n\nimport imageio\nfrom copy import deepcopy\ndef draw_image(surface, image, image2,location1, location2, blend=False, record=False,driveName='',smazat=[]):\n if False:#image2.frame%5 == 0:\n # coords1 = BresenhamLine(0,image2.height-1,image2.width//2,image2.height//2)\n # coords2 = BresenhamLine(image2.width - 1, image2.height - 1, image2.width // 2, image2.height // 2)\n # print(coords2)\n # print(len(coords1),len(coords2))\n\n array = np.frombuffer(image2.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image2.height, image2.width, 4))\n # array = np.reshape(array, (image2.width, image2.height, 4))\n array = array[:, :, :3]\n # array = array[:, :, ::-1]\n arr = deepcopy(array)\n arr = np.array(arr,dtype=int)\n for i in range(len(array)):\n for j in range(len(array[i])):\n if array[i][j][2] == 7 or array[i][j][2] == 6:\n arr[i][j][0] = 0\n arr[i][j][1] = 255\n arr[i][j][2] = 43\n # for i in range(len(coords1)):\n # arr[coords1[i][1]][coords1[i][0]][0] = 255\n # arr[coords1[i][1]][coords1[i][0]][1] = 0\n # arr[coords1[i][1]][coords1[i][0]][2] = 0\n #\n # for i in range(len(coords2)):\n # arr[coords2[i][1]][coords2[i][0]][0] = 255\n # arr[coords2[i][1]][coords2[i][0]][1] = 0\n # arr[coords2[i][1]][coords2[i][0]][2] = 0\n\n for i in range(len(smazat)):\n arr[smazat[i][1]][smazat[i][0]][0] = 255\n arr[smazat[i][1]][smazat[i][0]][1] = 0\n arr[smazat[i][1]][smazat[i][0]][2] = 0\n\n dirName = os.path.join('test')\n if not os.path.exists(dirName):\n os.mkdir(dirName)\n filename = dirName + '/' + str(image2.frame) + '.png'\n imageio.imwrite(filename, arr)\n # image2.save_to_disk(dirName + '/%07d' % image2.frame)\n if image.frame % 10 == 0:#record:#image.frame % 10 == 0:\n driveName = driveName.split('/')[1]\n dirName = os.path.join('output',driveName)\n if not os.path.exists(dirName):\n os.mkdir(dirName)\n image.save_to_disk(dirName+'/%07d' % image.frame)#_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f.png' % (image.frame,location1.location.x,location1.location.y,location1.location.z, location1.rotation.pitch,location1.rotation.yaw, location1.rotation.roll,location2.location.x,location2.location.y,location2.location.z, location2.rotation.pitch,location2.rotation.yaw, location2.rotation.roll ))\n #image2.save_to_disk('output2/%07d_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f_%f.png' % (image2.frame,location1.location.x,location1.location.y,location1.location.z, location1.rotation.pitch,location1.rotation.yaw, location1.rotation.roll\n # ,location2.location.x,location2.location.y,location2.location.z, location2.rotation.pitch,location2.rotation.yaw, location2.rotation.roll ))\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if blend:\n image_surface.set_alpha(100)\n surface.blit(image_surface, (0, 0))\n\n\ndef get_font():\n fonts = [x for x in pygame.font.get_fonts()]\n default_font = 'ubuntumono'\n font = default_font if default_font in fonts else fonts[0]\n font = pygame.font.match_font(font)\n return pygame.font.Font(font, 14)\n\n\ndef should_quit():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_ESCAPE:\n return True\n return False\n\n# try:\nimport pygame\nfrom pygame.locals import KMOD_CTRL\nfrom pygame.locals import KMOD_SHIFT\nfrom pygame.locals import K_0\nfrom pygame.locals import K_9\nfrom pygame.locals import K_BACKQUOTE\nfrom pygame.locals import K_BACKSPACE\nfrom pygame.locals import K_COMMA\nfrom pygame.locals import K_DOWN\nfrom pygame.locals import K_ESCAPE\nfrom pygame.locals import K_F1\nfrom pygame.locals import K_LEFT\nfrom pygame.locals import K_PERIOD\nfrom pygame.locals import K_RIGHT\nfrom pygame.locals import K_SLASH\nfrom pygame.locals import K_SPACE\nfrom pygame.locals import K_TAB\nfrom pygame.locals import K_UP\nfrom pygame.locals import K_a\nfrom pygame.locals import K_c\nfrom pygame.locals import K_d\nfrom pygame.locals import K_h\nfrom pygame.locals import K_m\nfrom pygame.locals import K_p\nfrom pygame.locals import K_q\nfrom pygame.locals import K_r\nfrom pygame.locals import K_s\nfrom pygame.locals import K_w\nfrom pygame.locals import K_MINUS\nfrom pygame.locals import K_EQUALS\n\nclass ManualControl(object):\n def __init__(self,filename,name=''):\n self.history = []\n self._control = carla.VehicleControl()\n self._steer_cache = 0.0\n self.outputDir = 'chaseOutput'+name\n self.fileName = filename.split('/')[-1]\n if self.fileName == '':\n self.fileName = 'test.p'\n self.startRecording = False\n\n def _parse_vehicle_keys(self, keys, milliseconds):\n self._control.throttle = 1.0 if keys[K_UP] or keys[K_w] else 0.0\n self._control.reverse=False\n if keys[K_DOWN] or keys[K_s]:\n self._control.throttle = 1\n self._control.reverse=True\n steer_increment = 8e-4 * milliseconds\n if keys[K_LEFT] or keys[K_a]:\n self._steer_cache -= steer_increment\n elif keys[K_RIGHT] or keys[K_d]:\n self._steer_cache += steer_increment\n else:\n self._steer_cache = 0.0\n self._steer_cache = min(0.5, max(-0.5, self._steer_cache))\n self._control.steer = round(self._steer_cache, 1)\n self._control.hand_brake = keys[K_SPACE]\n\n if keys[K_r]:\n self.startRecording = True\n \n def Run(self):\n clock = pygame.time.Clock()\n while True:\n clock.tick_busy_loop(30)\n #pygame.event.get()\n self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())\n self.vehicle.apply_control(self._control)\n pygame.display.flip()\n\n def SaveCarPosition(self,location):\n if self.startRecording:\n self.history.append([location.location.x, location.location.y, location.location.z, location.rotation.pitch, location.rotation.yaw, location.rotation.roll])\n\n def SaveHistoryToFile(self):\n if not os.path.exists(self.outputDir):\n os.mkdir(self.outputDir)\n if len(self.history) > 0:\n pickle.dump(self.history, open(os.path.join(self.outputDir,self.fileName), \"wb\"))\n\n\nclass Evaluation():\n def __init__(self):\n self.sumMAE = 0\n self.sumRMSE = 0\n self.n_of_frames = 0\n self.n_of_collisions = 0\n self.history = []\n\n def AddError(self, distance, goalDistance):\n self.n_of_frames += 1\n self.sumMAE += abs(goalDistance-distance)\n self.sumRMSE += abs(goalDistance-distance)*abs(goalDistance-distance)\n\n def WriteIntoFileFinal(self, filename, driveName):\n if self.n_of_frames > 0:\n self.sumMAE = self.sumMAE / float(self.n_of_frames)\n self.sumRMSE = self.sumRMSE / float(self.n_of_frames)\n\n with open(filename,'a') as f:\n f.write(str(driveName)+', '+str(self.sumMAE)+', '+str(self.sumRMSE)+', '+str(self.n_of_collisions)+'\\n')\n\n def LoadHistoryFromFile(self, fileName):\n self.history = pickle.load( open(fileName, \"rb\"))\n\n def CollisionHandler(self,event):\n self.n_of_collisions += 1\n\ndef DrawDrivable(indexes, w, h, display):\n if len(indexes) != 0:\n BB_COLOR = (11, 102, 35)\n for i in range(10):\n for j in range(10):\n if indexes[i*10+j] == 1:\n pygame.draw.line(display, BB_COLOR, (j*w,i*h) , (j*w+w,i*h))\n pygame.draw.line(display, BB_COLOR, (j*w,i*h), (j*w,i*h+h))\n pygame.draw.line(display, BB_COLOR, (j*w+w,i*h), (j*w+w,i*h+h))\n pygame.draw.line(display, BB_COLOR, (j*w,i*h+h), (j*w+w,i*h+h))\n\nimport copy\ndef main(optimalDistance, followDrivenPath, chaseMode, evaluateChasingCar, driveName='',record=False, followMode=False,\n resultsName='results',P=None,I=None,D=None,nOfFramesToSkip=0):\n counter = 1\n\n actor_list = []\n pygame.init()\n\n carDetector = CarDetector()\n drivingControl = DrivingControl(optimalDistance=optimalDistance)\n if P!=None:\n drivingControlAdvanced = DrivingControlAdvanced(optimalDistance=optimalDistance,P=P,I=I,D=D)\n else:\n drivingControlAdvanced = DrivingControlAdvanced(optimalDistance=optimalDistance)\n visualisation = VizualizeDrivingPath()\n myControl = ManualControl(driveName,name=str(nOfFramesToSkip))\n myControl.startRecording = True\n advanced = False\n extrapolate = True\n\n evaluation = Evaluation()\n semantic = SemanticSegmentation()\n\n lookAheadDistance = 5\n purePursuit = PurePursuitAlgorithm(lookAheadDistance=lookAheadDistance)\n\n display = pygame.display.set_mode(\n (800, 600),\n pygame.HWSURFACE | pygame.DOUBLEBUF)\n font = get_font()\n clock = pygame.time.Clock()\n\n client = carla.Client('localhost', 2000)\n client.set_timeout(2.0)\n\n world = client.get_world()\n\n vehicleToFollowSpawned = False\n\n\n\n try:\n # if True:\n m = world.get_map()\n # if not followDrivenPath:\n start_pose = random.choice(m.get_spawn_points())\n # else:\n # first = evaluation.history[0]\n # print(first)\n # start_pose = random.choice(m.get_spawn_points())\n # start_pose = carla.Transform(carla.Location(first[0],first[1],first[2]),carla.Rotation(first[3],first[4],first[5]))\n # print('Start pose:',start_pose)\n\n blueprint_library = world.get_blueprint_library()\n \n vehicle = world.spawn_actor(\n random.choice(blueprint_library.filter('jeep')),\n start_pose)\n actor_list.append(vehicle)\n vehicle.set_simulate_physics(True)\n if followDrivenPath:\n evaluation.LoadHistoryFromFile(driveName)\n first = evaluation.history[0]\n start_pose = carla.Transform(carla.Location(first[0], first[1], first[2]),\n carla.Rotation(first[3], first[4], first[5]))\n vehicle.set_transform(start_pose)\n\n collision_sensor = world.spawn_actor(blueprint_library.find('sensor.other.collision'),\n carla.Transform(), attach_to=vehicle)\n\n collision_sensor.listen(lambda event: evaluation.CollisionHandler(event))\n actor_list.append(collision_sensor)\n\n # Find the blueprint of the sensor.\n blueprint = world.get_blueprint_library().find('sensor.camera.rgb')\n # Modify the attributes of the blueprint to set image resolution and field of view.\n blueprint.set_attribute('image_size_x', '800')\n blueprint.set_attribute('image_size_y', '600')\n blueprint.set_attribute('fov', '90')\n\n camera_rgb = world.spawn_actor(\n blueprint_library.find('sensor.camera.rgb'),\n carla.Transform(carla.Location(x=1.5, z=1.4,y=0.3), carla.Rotation(pitch=0)), #5,3,0 # -0.3\n attach_to=vehicle)\n actor_list.append(camera_rgb)\n\n camera_rgb2 = world.spawn_actor(\n blueprint_library.find('sensor.camera.rgb'),\n carla.Transform(carla.Location(x=1.5, z=1.4,y=-0.3), carla.Rotation(pitch=0))) #x=-5.5, z=4.4,y=0\n #attach_to=vehicle)\n actor_list.append(camera_rgb2)\n\n camera_segmentation = world.spawn_actor(\n blueprint_library.find('sensor.camera.semantic_segmentation'),\n carla.Transform(carla.Location(x=1.5, z=1.4,y=0), carla.Rotation(pitch=0)), #5,3,0 # -0.3\n attach_to=vehicle)\n actor_list.append(camera_segmentation)\n \n\n # Create a synchronous mode context.\n with CarlaSyncMode(world,camera_rgb, camera_rgb2, camera_segmentation, fps=30) as sync_mode:\n\n while True:\n if should_quit():\n return\n clock.tick(30)\n\n # Advance the simulation and wait for the data.\n snapshot, img_rgb, image_rgb2, image_segmentation = sync_mode.tick(timeout=2.0)\n\n line = []\n \n if not vehicleToFollowSpawned and not followDrivenPath:\n vehicleToFollowSpawned = True\n start_pose2 = carla.Transform()\n start_pose2.rotation = start_pose.rotation\n\n start_pose2.location.x = start_pose.location.x\n start_pose2.location.y = start_pose.location.y\n start_pose2.location.z = start_pose.location.z\n\n location1 = vehicle.get_transform()\n rotation1 = location1.rotation\n print(rotation1.yaw,abs(rotation1.yaw))\n if abs(rotation1.yaw - 180.0) < 45.0 or abs(rotation1.yaw + 180.0) < 45.0:\n print('1')\n start_pose2.location.x = start_pose.location.x - 5\n elif abs(rotation1.yaw) < 45.0:\n print('2')\n start_pose2.location.x = start_pose.location.x + 5\n elif abs(rotation1.yaw + 90.0) < 45.0:\n print('3')\n start_pose2.location.y = start_pose.location.y - 5\n elif abs(rotation1.yaw - 90.0) < 45.0:\n print('4')\n start_pose2.location.y = start_pose.location.y + 5\n\n bp = blueprint_library.filter('model3')[0]\n\n bp.set_attribute('color', '0,101,189')\n vehicleToFollow = world.spawn_actor(\n bp,\n start_pose2)\n\n actor_list.append(vehicleToFollow)\n vehicleToFollow.set_simulate_physics(True)\n vehicleToFollow.set_autopilot(True)\n elif not vehicleToFollowSpawned and followDrivenPath:\n vehicleToFollowSpawned = True\n location1 = vehicle.get_transform()\n newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,\n location1.rotation.yaw)\n diffX = newX - location1.location.x\n diffY = newY - location1.location.y\n newX = location1.location.x - (diffX*5)\n newY = location1.location.y - (diffY*5)\n\n start_pose.location.x = newX\n start_pose.location.y = newY\n\n vehicle.set_transform(start_pose)\n\n start_pose2 = random.choice(m.get_spawn_points())\n\n bp = blueprint_library.filter('model3')[0]\n bp.set_attribute('color', '0,101,189')\n vehicleToFollow = world.spawn_actor(\n bp,\n start_pose2)\n\n start_pose2 = carla.Transform()\n start_pose2.rotation = start_pose.rotation\n\n start_pose2.location.x = start_pose.location.x\n start_pose2.location.y = start_pose.location.y\n start_pose2.location.z = start_pose.location.z\n\n vehicleToFollow.set_transform(start_pose2)\n\n actor_list.append(vehicleToFollow)\n vehicleToFollow.set_simulate_physics(True)\n vehicleToFollow.set_autopilot(False)\n\n if followDrivenPath:\n if counter >= len(evaluation.history):\n break\n tmp = evaluation.history[counter]\n currentPos = carla.Transform(carla.Location(tmp[0],tmp[1],tmp[2]),carla.Rotation(tmp[3],tmp[4],tmp[5]))\n vehicleToFollow.set_transform(currentPos)\n counter += 1\n\n fps = round(1.0 / snapshot.timestamp.delta_seconds)\n\n # manual control\n if not followDrivenPath:\n myControl._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())\n vehicle.apply_control(myControl._control)\n\n\n location1 = vehicle.get_transform()\n location2 = vehicleToFollow.get_transform()\n\n myControl.SaveCarPosition(location1)\n newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,location1.rotation.yaw)\n angle = carDetector.getAngle([location1.location.x, location1.location.y], [newX, newY],\n [location2.location.x, location2.location.y])\n\n possibleAngle = 0\n drivableIndexes = []\n bbox = []\n if chaseMode:\n carInTheImage = semantic.IsThereACarInThePicture(image_segmentation)\n bbox, predicted_distance,predicted_angle = carDetector.getDistance(vehicleToFollow, camera_rgb,carInTheImage,extrapolation=extrapolate,nOfFramesToSkip=nOfFramesToSkip)\n\n if advanced:\n possibleAngle, drivableIndexes = semantic.FindPossibleAngle(image_segmentation,bbox,predicted_angle)\n\n steer, throttle = drivingControlAdvanced.PredictSteerAndThrottle(predicted_distance, possibleAngle,None)\n else:\n steer, throttle = drivingControl.PredictSteerAndThrottle(predicted_distance,predicted_angle,None)\n\n # if followDrivenPath:\n vehicle.apply_control(carla.VehicleControl(throttle=throttle,steer=steer))\n\n if evaluateChasingCar:\n evaluation.AddError(location1.location.distance(location2.location),optimalDistance)\n elif followMode:\n angle = 0\n carInTheImage = semantic.IsThereACarInThePicture(image_segmentation)\n bbox, predicted_distance, predicted_angle = carDetector.getDistance(vehicleToFollow, camera_rgb,carInTheImage)\n purePursuit.AddPathPoint(location2.location.x,location2.location.y)\n newX, newY = carDetector.CreatePointInFrontOFCar(location1.location.x, location1.location.y,\n location1.rotation.yaw)\n targetX, targetY = purePursuit.GetNextPoint(location1.location.x,location1.location.y)\n predicted_angle = carDetector.getAngle([location1.location.x,location1.location.y],[newX,newY],[targetX,targetY])\n possibleAngle = predicted_angle\n steer, throttle = drivingControl.PredictSteerAndThrottle(predicted_distance,predicted_angle,None)\n\n # if followDrivenPath:\n vehicle.apply_control(carla.VehicleControl(throttle=throttle,steer=steer))\n if evaluateChasingCar:\n evaluation.AddError(location1.location.distance(location2.location),optimalDistance)\n\n velocity1 = vehicle.get_velocity()\n velocity2 = vehicleToFollow.get_velocity()\n\n visualisation.Add(velocity1,velocity2,location1.location.distance(location2.location), angle)\n\n\n draw_image(display, image_rgb2, image_segmentation,location1, location2,record=record,driveName=driveName,smazat=line)\n display.blit(\n font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)),\n (8, 10))\n display.blit(\n font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)),\n (8, 28))\n\n if len(bbox) != 0:\n points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)]\n BB_COLOR = (248, 64, 24)\n # draw lines\n # base\n pygame.draw.line(display, BB_COLOR, points[0], points[1])\n pygame.draw.line(display, BB_COLOR, points[1], points[2])\n pygame.draw.line(display, BB_COLOR, points[2], points[3])\n pygame.draw.line(display, BB_COLOR, points[3], points[0])\n # top\n pygame.draw.line(display, BB_COLOR, points[4], points[5])\n pygame.draw.line(display, BB_COLOR, points[5], points[6])\n pygame.draw.line(display, BB_COLOR, points[6], points[7])\n pygame.draw.line(display, BB_COLOR, points[7], points[4])\n # base-top\n pygame.draw.line(display, BB_COLOR, points[0], points[4])\n pygame.draw.line(display, BB_COLOR, points[1], points[5])\n pygame.draw.line(display, BB_COLOR, points[2], points[6])\n pygame.draw.line(display, BB_COLOR, points[3], points[7])\n DrawDrivable(drivableIndexes, image_segmentation.width // 10, image_segmentation.height // 10, display)\n\n real_dist = location1.location.distance(location2.location)\n if chaseMode or followMode:\n myPrint(angle,predicted_angle, possibleAngle,real_dist, predicted_distance,chaseMode)\n pygame.display.flip()\n except Exception as ex:\n print(ex)\n finally:\n print('Ending')\n if evaluateChasingCar:\n evaluation.WriteIntoFileFinal(os.path.join('res',resultsName+'.txt'),driveName=driveName)\n myControl.SaveHistoryToFile()\n print('destroying actors.')\n for actor in actor_list:\n actor.destroy()\n\n pygame.quit()\n print('done.')\n\nimport os\nif __name__ == '__main__':\n nOfFramesToSkip = 0\n try:\n # if True:\n optimalDistance = 8\n followDrivenPath = True\n evaluateChasingCar = True\n record = False\n chaseMode = True\n followMode = False\n\n drivesDir = 'drives'\n drivesFileNames = os.listdir(drivesDir)\n drivesFileNames.sort()\n\n # drivesFileNames = ['ride7.p']\n # drivesFileNames = ['ride1.p','ride2.p','ride3.p','ride4.p','ride5.p','ride6.p','ride7.p','ride8.p','ride9.p','ride10.p']\n # drivesFileNames = ['ride11.p', 'ride12.p', 'ride13.p', 'ride14.p', 'ride15.p', 'ride16.p', 'ride17.p', 'ride18.p','ride19.p', 'ride20.p']\n drivesFileNames = ['ride1.p','ride2.p','ride3.p','ride4.p','ride5.p','ride6.p','ride7.p','ride8.p','ride9.p','ride10.p',\n 'ride11.p', 'ride12.p', 'ride13.p', 'ride14.p', 'ride15.p', 'ride16.p', 'ride17.p', 'ride18.p','ride19.p', 'ride20.p']\n\n if evaluateChasingCar:\n for i in range(0, 101, 5):\n nOfFramesToSkip = i\n for fileName in drivesFileNames:\n main(optimalDistance=optimalDistance,followDrivenPath=followDrivenPath,chaseMode=chaseMode, evaluateChasingCar=evaluateChasingCar,driveName=os.path.join(drivesDir,fileName),record=record,followMode=followMode,nOfFramesToSkip=nOfFramesToSkip)\n os.rename('res/results.txt','chaseOutput'+str(nOfFramesToSkip)+'/results.txt')\n\n else:\n main(optimalDistance=optimalDistance, followDrivenPath=followDrivenPath, chaseMode=chaseMode, evaluateChasingCar=evaluateChasingCar,followMode=followMode)\n\n except Exception as ex:\n with open('problem.txt','a') as f:\n f.write(str(ex)+'\\n')\n # print('\\nCancelled by user. Bye!')\n"
] | [
[
"numpy.reshape",
"numpy.array",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lauromoraes/CapsNet-promoter | [
"9b08912648ff5d58a11ebb42225d9ad9851c61ac"
] | [
"teste_plot.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 1 19:35:08 2018\n\n@author: fnord\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n################################################################################# MCC\n\n#k = [0.8593737651, 0.8553389745, 0.7784318972, 0.9113220823, 0.8003214083, 0.8503145005, 0.8212621392, 0.8153599144, 0.8392105873, 0.7344867993]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.8914618592 , 0.8569780601 , 0.8909567193 , 0.9027168258 , 0.8788289546]),\n# 'Arabidopsis_tata' : np.array([0.945435053 , 0.965500981 , 0.950218117 , 0.959896776 , 0.955468017]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.83631664, 0.81214853, 0.84394381, 0.79153428, 0.79061714]),\n# 'Human_non_tata' : np.array([0.6829905720948718, 0.6806508241752528, 0.6771045795907203, 0.7131432179128229, 0.6889706432976804]),\n# 'Mouse_tata' : np.array([0.9262202846, 0.921279092, 0.9362495065, 0.9030511015, 0.8914067925]),\n# 'Mouse_non_tata' : np.array([0.8219135468, 0.8070214523, 0.8126171665, 0.8446262532, 0.8433015854]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Coeficiente de Matthews\")\n#plt.ylabel(\"MCC\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#plt.savefig('mcc.eps', format='eps', dpi=1000)\n\n####################################################################### F1\n\n#k = [0.89743590, 0.89473684, 0.83783784, 0.93506494, 0.85365854, 0.88888889, 0.86746988, 0.86486486, 0.88311688, 0.80000000]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.92863140, 0.90632506, 0.92845528, 0.93620547, 0.92058347]),\n# 'Arabidopsis_tata' : np.array([0.96416938, 0.97719870, 0.96732026, 0.97368421, 0.97068404]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.86956522, 0.85380117, 0.87861272, 0.83798883, 0.83720930]),\n# 'Human_non_tata' : np.array([0.8113842173350582, 0.8190294047334449, 0.816718712466971, 0.8360535931790499, 0.8244705882352941]),\n# 'Mouse_tata' : np.array([0.94573643, 0.94208494, 0.95312500, 0.92830189, 0.91935484]),\n# 'Mouse_non_tata' : np.array([0.89419994, 0.88541973, 0.88648982, 0.90741840, 0.90660321]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"F-score\")\n#plt.ylabel(\"F1\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#plt.savefig('f1.eps', format='eps', dpi=1000)\n\n####################################################################### Sn\n\n#k = [0.94594595, 0.91891892, 0.83783784, 0.97297297, 0.94594595, 0.86486486, 0.97297297, 0.86486486, 0.91891892, 0.75675676]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.93570220, 0.95769882, 0.96615905, 0.95600677, 0.96108291]),\n# 'Arabidopsis_tata' : np.array([0.98666667, 1.00000000, 0.98666667, 0.98666667, 0.99333333]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.83333333, 0.86904762, 0.90476190, 0.89285714, 0.85714286]),\n# 'Human_non_tata' : np.array([0.79151943, 0.86471479, 0.85815245, 0.86622918, 0.88440182]),\n# 'Mouse_tata' : np.array([0.96825397, 0.96825397, 0.96825397, 0.97619048, 0.90476190]),\n# 'Mouse_non_tata' : np.array([0.94226044, 0.92321867, 0.88267813, 0.93918919, 0.93611794]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Sensibilidade\")\n#plt.ylabel(\"Sn\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#plt.savefig('sn.eps', format='eps', dpi=1000)\n\n####################################################################### Sp\n\n#k = [0.94059406, 0.95049505, 0.94059406, 0.96039604, 0.90099010, 0.97029703, 0.90099010, 0.95049505, 0.94059406, 0.95049505]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.95898778, 0.91972077, 0.94066318, 0.95549738, 0.93455497]),\n# 'Arabidopsis_tata' : np.array([0.96875000, 0.97569444, 0.97222222, 0.97916667, 0.97222222]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.97666667, 0.95333333, 0.95666667, 0.93333333, 0.94666667]),\n# 'Human_non_tata' : np.array([0.88608508, 0.82372026, 0.82624369, 0.85291997, 0.81362653]),\n# 'Mouse_tata' : np.array([0.97167139, 0.96883853, 0.97733711, 0.95467422, 0.97733711]),\n# 'Mouse_non_tata' : np.array([0.89166331, 0.89367700, 0.92871526, 0.91421667, 0.91542489]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Especificidade\")\n#plt.ylabel(\"Sp\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#\n#plt.savefig('sp.eps', format='eps', dpi=1000)\n\n####################################################################### Acc\n\n#k = [0.94202899, 0.94202899, 0.91304348, 0.96376812, 0.91304348, 0.94202899, 0.92028986, 0.92753623, 0.93478261, 0.89855072]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.95106505, 0.93264249, 0.94933794, 0.95567070, 0.94358089]),\n# 'Arabidopsis_tata' : np.array([0.97488584, 0.98401826, 0.97716895, 0.98173516, 0.97945205]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.94531250, 0.93489583, 0.94531250, 0.92447917, 0.92708333]),\n# 'Human_non_tata' : np.array([0.84668770, 0.84079916, 0.83953733, 0.85846477, 0.84311251]),\n# 'Mouse_tata' : np.array([0.97077244, 0.96868476, 0.97494781, 0.96033403, 0.95824635]),\n# 'Mouse_non_tata' : np.array([0.91170032, 0.90537582, 0.91048407, 0.92410606, 0.92361956]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Acurácia\")\n#plt.ylabel(\"Acc\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#\n#plt.savefig('acc.eps', format='eps', dpi=1000)\n\n####################################################################### Prec\n\n#k = [0.85365854, 0.87179487, 0.83783784, 0.90000000, 0.77777778, 0.91428571, 0.78260870, 0.86486486, 0.85000000, 0.84848485]\n#x = list()\n#\n#for i in range(0,10,2):\n# x.append( (k[i]+k[i+1])/2 )\n#print(x)\n#\n#df_mcc = pd.DataFrame({\n# 'Arabidopsis_non_tata' : np.array([0.92166667, 0.86018237, 0.89358372, 0.91720779, 0.88335925]),\n# 'Arabidopsis_tata' : np.array([0.94267516, 0.95541401, 0.94871795, 0.96103896, 0.94904459]),\n# 'Bacillus' : np.array(x),\n# 'Ecoli' : np.array([0.90909091, 0.83908046, 0.85393258, 0.78947368, 0.81818182]),\n# 'Human_non_tata' : np.array([0.83227176, 0.77792916, 0.77910174, 0.80790960, 0.77214632]),\n# 'Mouse_tata' : np.array([0.92424242, 0.91729323, 0.93846154, 0.88489209, 0.93442623]),\n# 'Mouse_non_tata' : np.array([0.85080422, 0.85059423, 0.89033457, 0.87772675, 0.87889273]),\n#})\n#\n#df_mcc.boxplot(rot=90)\n#\n#plt.title(\"Precisão\")\n#plt.ylabel(\"Prec\")\n##plt.xlabel(\"Bases de dados\")\n#plt.tight_layout()\n#\n#\n#plt.savefig('prec.eps', format='eps', dpi=1000)\n\n\n####################################################################### Comp Mcc\n#\n#cnn = np.array([0.86, 0.91, 0.86, 0.84, 0.90, 0.83, 0.93])\n#caps = np.array([0.88, 0.96, 0.83, 0.81, 0.69, 0.83, 0.92])\n#std = np.array([0.02, 0.01, 0.05, 0.02, 0.01, 0.02, 0.02])\n#\n#ind = np.arange(len(cnn))\n#width = 0.2\n#\n#ax = plt.subplot(111)\n#ax.bar(ind, cnn, width, color='#EDC951', label='CNN')\n#ax.bar(ind+width, caps, width, yerr=std, color='#EB6841', label='CapsNet')\n#\n#ax.set_ylabel('MCC')\n#plt.xticks(ind, ('Arabidopsis_non_tata', 'Arabidopsis_tata', 'Bacillus', 'Ecoli', 'Human_non_tata', 'Mouse_non_tata', 'Mouse_tata'), rotation='vertical')\n##plt.subplots_adjust(top=10.0)\n#\n##ax.legend(loc='upper right', shadow=True)\n#plt.title(\"Comparação do Coeficiente de Matthews\")\n##plt.legend(loc='upper left', prop={'size':10}, bbox_to_anchor=(1,1))\n#plt.legend(loc='bottom right')\n#\n#\n#\n#\n#plt.tight_layout()\n#plt.savefig('comp_mcc.eps', format='eps', dpi=1000)\n#plt.show()\n\n####################################################################### Comp Sn\n\n#cnn = np.array([0.94, 0.95, 0.91, 0.90, 0.90, 0.88, 0.97])\n#caps = np.array([0.96, 0.99, 0.90, 0.87, 0.85, 0.92, 0.96])\n#std = np.array([0.01, 0.01, 0.07, 0.03, 0.04, 0.02, 0.03])\n#\n#ind = np.arange(len(cnn))\n#width = 0.2\n#\n#ax = plt.subplot(111)\n#ax.bar(ind, cnn, width, color='#EDC951', label='CNN')\n#ax.bar(ind+width, caps, width, yerr=std, color='#EB6841', label='CapsNet')\n#\n#ax.set_ylabel('Sn')\n#plt.xticks(ind, ('Arabidopsis_non_tata', 'Arabidopsis_tata', 'Bacillus', 'Ecoli', 'Human_non_tata', 'Mouse_non_tata', 'Mouse_tata'), rotation='vertical')\n##plt.subplots_adjust(top=10.0)\n#\n##ax.legend(loc='upper right', shadow=True)\n#plt.title(\"Comparação da Sensibilidade\")\n##plt.legend(loc='upper left', prop={'size':10}, bbox_to_anchor=(1,1))\n#plt.legend(loc='bottom right')\n#\n#\n#\n#\n#plt.tight_layout()\n#plt.savefig('comp_sn.eps', format='eps', dpi=1000)\n#plt.show()\n\n####################################################################### Comp Sp\n\ncnn = np.array([0.94, 0.97, 0.95, 0.96, 0.98, 0.94, 0.97])\ncaps = np.array([0.94, 0.97, 0.94, 0.95, 0.84, 0.91, 0.97])\nstd = np.array([0.02, 0.00, 0.02, 0.02, 0.03, 0.02, 0.01])\n\nind = np.arange(len(cnn))\nwidth = 0.2\n\nax = plt.subplot(111)\nax.bar(ind, cnn, width, color='#EDC951', label='CNN')\nax.bar(ind+width, caps, width, yerr=std, color='#EB6841', label='CapsNet')\n\nax.set_ylabel('Sp')\nplt.xticks(ind, ('Arabidopsis_non_tata', 'Arabidopsis_tata', 'Bacillus', 'Ecoli', 'Human_non_tata', 'Mouse_non_tata', 'Mouse_tata'), rotation='vertical')\n#plt.subplots_adjust(top=10.0)\n\n#ax.legend(loc='upper right', shadow=True)\nplt.title(\"Comparação da Especificidade\")\n#plt.legend(loc='upper left', prop={'size':10}, bbox_to_anchor=(1,1))\nplt.legend(loc='bottom right')\n\n\n\n\nplt.tight_layout()\nplt.savefig('comp_sp.eps', format='eps', dpi=1000)\nplt.show()\n\n\nprint('END')"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xticks",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
unseenme/mindspore | [
"4ba052f0cd9146ac0ccc4880a778706f1b2d0af8",
"4ba052f0cd9146ac0ccc4880a778706f1b2d0af8",
"930a1fb0a8fa9432025442c4f4732058bb7af592",
"4ba052f0cd9146ac0ccc4880a778706f1b2d0af8",
"930a1fb0a8fa9432025442c4f4732058bb7af592",
"930a1fb0a8fa9432025442c4f4732058bb7af592",
"4ba052f0cd9146ac0ccc4880a778706f1b2d0af8",
"4ba052f0cd9146ac0ccc4880a778706f1b2d0af8",
"4ba052f0cd9146ac0ccc4880a778706f1b2d0af8"
] | [
"tests/ut/python/dataset/test_pyfunc.py",
"tests/ut/python/ops/test_array_ops_check.py",
"tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py",
"tests/st/networks/test_network_main.py",
"tests/ut/python/pipeline/parse/test_parse.py",
"tests/ut/python/dataset/test_random_crop_decode_resize.py",
"tests/ut/python/nn/test_dense.py",
"tests/st/ops/gpu/test_lessequal_op.py",
"tests/st/mem_reuse/resnet.py"
] | [
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport numpy as np\nimport pytest\n\nimport mindspore.dataset as ds\nfrom mindspore import log as logger\n\nDATA_DIR = [\"../data/dataset/testPyfuncMap/data.data\"]\nSCHEMA_DIR = \"../data/dataset/testPyfuncMap/schema.json\"\nCOLUMNS = [\"col0\", \"col1\", \"col2\"]\nGENERATE_GOLDEN = False\n\n\ndef test_case_0():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test 1-1 PyFunc : lambda x : x + x\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations=(lambda x: x + x))\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_case_1():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test 1-n PyFunc : lambda x : (x , x + x) \")\n\n col = \"col0\"\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n data1 = data1.map(input_columns=col, output_columns=[\"out0\", \"out1\"], operations=(lambda x: (x, x + x)),\n columns_order=[\"out0\", \"out1\"])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n assert np.array_equal(item[\"out0\"], golden)\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out1\"], golden)\n i = i + 4\n\n\ndef test_case_2():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test n-1 PyFunc : lambda x, y : x + y \")\n\n col = [\"col0\", \"col1\"]\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=col, output_columns=\"out\", operations=(lambda x, y: x + y),\n columns_order=[\"out\"])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_case_3():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test n-m PyFunc : lambda x, y : (x , x + 1, x + y)\")\n\n col = [\"col0\", \"col1\"]\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=col, output_columns=[\"out0\", \"out1\", \"out2\"],\n operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=[\"out0\", \"out1\", \"out2\"])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n assert np.array_equal(item[\"out0\"], golden)\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out1\"], golden)\n golden = np.array([[i * 2 + 1, (i + 1) * 2 + 1], [(i + 2) * 2 + 1, (i + 3) * 2 + 1]])\n assert np.array_equal(item[\"out2\"], golden)\n i = i + 4\n\n\ndef test_case_4():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test Parallel n-m PyFunc : lambda x, y : (x , x + 1, x + y)\")\n\n col = [\"col0\", \"col1\"]\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=col, output_columns=[\"out0\", \"out1\", \"out2\"], num_parallel_workers=4,\n operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=[\"out0\", \"out1\", \"out2\"])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n assert np.array_equal(item[\"out0\"], golden)\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out1\"], golden)\n golden = np.array([[i * 2 + 1, (i + 1) * 2 + 1], [(i + 2) * 2 + 1, (i + 3) * 2 + 1]])\n assert np.array_equal(item[\"out2\"], golden)\n i = i + 4\n\n\n# The execution of this function will acquire GIL\ndef func_5(x):\n return np.ones(x.shape, dtype=x.dtype)\n\n\ndef test_case_5():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test 1-1 PyFunc : lambda x: np.ones(x.shape)\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations=func_5)\n\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[1, 1], [1, 1]])\n assert np.array_equal(item[\"out\"], golden)\n\n\ndef test_case_6():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test PyFunc ComposeOp : (lambda x : x + x), (lambda x : x + x)\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\",\n operations=[(lambda x: x + x), (lambda x: x + x)])\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 4, (i + 1) * 4], [(i + 2) * 4, (i + 3) * 4]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_case_7():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test 1-1 PyFunc Multiprocess: lambda x : x + x\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations=(lambda x: x + x),\n num_parallel_workers=4, python_multiprocessing = True)\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_case_8():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test Multiprocess n-m PyFunc : lambda x, y : (x , x + 1, x + y)\")\n\n col = [\"col0\", \"col1\"]\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=col, output_columns=[\"out0\", \"out1\", \"out2\"], num_parallel_workers=4,\n operations=(lambda x, y: (x, x + y, x + y + 1)), columns_order=[\"out0\", \"out1\", \"out2\"],\n python_multiprocessing=True)\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n assert np.array_equal(item[\"out0\"], golden)\n golden = np.array([[i * 2, (i + 1) * 2], [(i + 2) * 2, (i + 3) * 2]])\n assert np.array_equal(item[\"out1\"], golden)\n golden = np.array([[i * 2 + 1, (i + 1) * 2 + 1], [(i + 2) * 2 + 1, (i + 3) * 2 + 1]])\n assert np.array_equal(item[\"out2\"], golden)\n i = i + 4\n\n\ndef test_case_9():\n \"\"\"\n Test PyFunc\n \"\"\"\n logger.info(\"Test multiple 1-1 PyFunc Multiprocess: lambda x : x + x\")\n\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations=[(lambda x: x + x), (lambda x: x + 1),\n (lambda x: x + 2)],\n num_parallel_workers=4, python_multiprocessing=True)\n\n i = 0\n for item in data1.create_dict_iterator(): # each data is a dictionary\n # In this test, the dataset is 2x2 sequential tensors\n golden = np.array([[i * 2 + 3, (i + 1) * 2 + 3], [(i + 2) * 2 + 3, (i + 3) * 2 + 3]])\n assert np.array_equal(item[\"out\"], golden)\n i = i + 4\n\n\ndef test_pyfunc_execption():\n logger.info(\"Test PyFunc Execption Throw: lambda x : raise Execption()\")\n\n def pyfunc(x):\n raise Exception(\"Pyfunc Throw\")\n\n with pytest.raises(RuntimeError) as info:\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations= pyfunc,\n num_parallel_workers=4)\n for _ in data1:\n pass\n assert \"Pyfunc Throw\" in str(info.value)\n\n\ndef skip_test_pyfunc_execption_multiprocess():\n logger.info(\"Test Multiprocess PyFunc Execption Throw: lambda x : raise Execption()\")\n\n def pyfunc(x):\n raise Exception(\"MP Pyfunc Throw\")\n\n with pytest.raises(RuntimeError) as info:\n # apply dataset operations\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n data1 = data1.map(input_columns=\"col0\", output_columns=\"out\", operations= pyfunc,\n num_parallel_workers=4, python_multiprocessing = True)\n for _ in data1:\n pass\n assert \"MP Pyfunc Throw\" in str(info.value)\n\n\nif __name__ == \"__main__\":\n test_case_0()\n test_case_1()\n test_case_2()\n test_case_3()\n test_case_4()\n test_case_5()\n test_case_6()\n test_case_7()\n test_case_8()\n test_case_9()\n test_pyfunc_execption()\n skip_test_pyfunc_execption_multiprocess()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test ops \"\"\"\nimport functools\nimport numpy as np\nfrom mindspore import ops\nfrom mindspore.ops import functional as F\nfrom mindspore.ops import operations as P\nfrom mindspore.ops.operations import _grad_ops as G\nimport mindspore.ops.composite as C\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common import dtype as mstype\nfrom mindspore.common.parameter import Parameter\nfrom ..ut_filter import non_graph_engine\nfrom mindspore.common.api import _executor\n\nfrom ....mindspore_test_framework.mindspore_test import mindspore_test\nfrom ....mindspore_test_framework.pipeline.forward.compile_forward\\\n import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config,\n pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception)\nfrom ....mindspore_test_framework.pipeline.gradient.compile_gradient\\\n import pipeline_for_compile_grad_ge_graph_for_case_by_case_config\n\n\nclass ExpandDimsNet(nn.Cell):\n def __init__(self, axis):\n super(ExpandDimsNet, self).__init__()\n self.axis = axis\n self.op = P.ExpandDims()\n\n def construct(self, x):\n return self.op(x, self.axis)\n\n\nclass IsInstanceNet(nn.Cell):\n def __init__(self, inst):\n super(IsInstanceNet, self).__init__()\n self.inst = inst\n self.op = P.IsInstance()\n\n def construct(self, t):\n return self.op(self.inst, t)\n\n\nclass ReshapeNet(nn.Cell):\n def __init__(self, shape):\n super(ReshapeNet, self).__init__()\n self.shape = shape\n self.op = P.Reshape()\n\n def construct(self, x):\n return self.op(x, self.shape)\n\n\nraise_set = [\n # input is scala, not Tensor\n ('ExpandDims0', {\n 'block': (P.ExpandDims(), {'exception': TypeError, 'error_keywords': ['ExpandDims']}),\n 'desc_inputs': [5.0, 1],\n 'skip': ['backward']}),\n # axis is as a parameter\n ('ExpandDims1', {\n 'block': (P.ExpandDims(), {'exception': TypeError, 'error_keywords': ['ExpandDims']}),\n 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), 1],\n 'skip': ['backward']}),\n # axis as an attribute, but less then lower limit\n ('ExpandDims2', {\n 'block': (ExpandDimsNet(-4), {'exception': ValueError, 'error_keywords': ['ExpandDims']}),\n 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))],\n 'skip': ['backward']}),\n # axis as an attribute, but greater then upper limit\n ('ExpandDims3', {\n 'block': (ExpandDimsNet(3), {'exception': ValueError, 'error_keywords': ['ExpandDims']}),\n 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))],\n 'skip': ['backward']}),\n\n # input is scala, not Tensor\n ('DType0', {\n 'block': (P.DType(), {'exception': TypeError, 'error_keywords': ['DType']}),\n 'desc_inputs': [5.0],\n 'skip': ['backward']}),\n\n # input x scala, not Tensor\n ('SameTypeShape0', {\n 'block': (P.SameTypeShape(), {'exception': TypeError, 'error_keywords': ['SameTypeShape']}),\n 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))],\n 'skip': ['backward']}),\n # input y scala, not Tensor\n ('SameTypeShape1', {\n 'block': (P.SameTypeShape(), {'exception': TypeError, 'error_keywords': ['SameTypeShape']}),\n 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), 5.0],\n 'skip': ['backward']}),\n # type of x and y not match\n ('SameTypeShape2', {\n 'block': (P.SameTypeShape(), {'exception': TypeError, 'error_keywords': ['SameTypeShape']}),\n 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.int32))],\n 'skip': ['backward']}),\n # shape of x and y not match\n ('SameTypeShape3', {\n 'block': (P.SameTypeShape(), {'exception': ValueError, 'error_keywords': ['SameTypeShape']}),\n 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 3]).astype(np.float32))],\n 'skip': ['backward']}),\n\n # sub_type is None\n ('IsSubClass0', {\n 'block': (P.IsSubClass(), {'exception': TypeError, 'error_keywords': ['IsSubClass']}),\n 'desc_inputs': [None, mstype.number],\n 'skip': ['backward']}),\n # type_ is None\n ('IsSubClass1', {\n 'block': (P.IsSubClass(), {'exception': TypeError, 'error_keywords': ['IsSubClass']}),\n 'desc_inputs': [mstype.number, None],\n 'skip': ['backward']}),\n\n # inst is var\n ('IsInstance0', {\n 'block': (P.IsInstance(), {'exception': ValueError, 'error_keywords': ['IsInstance']}),\n 'desc_inputs': [5.0, mstype.number],\n 'skip': ['backward']}),\n # t is not mstype.Type\n ('IsInstance1', {\n 'block': (IsInstanceNet(5.0), {'exception': TypeError, 'error_keywords': ['IsInstance']}),\n 'desc_inputs': [None],\n 'skip': ['backward']}),\n\n # input x is scalar, not Tensor\n ('Reshape0', {\n 'block': (P.Reshape(), {'exception': TypeError, 'error_keywords': ['Reshape']}),\n 'desc_inputs': [5.0, (1, 2)],\n 'skip': ['backward']}),\n # input shape is var\n ('Reshape1', {\n 'block': (P.Reshape(), {'exception': TypeError, 'error_keywords': ['Reshape']}),\n 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), (2, 3, 2)],\n 'skip': ['backward']}),\n # element of shape is not int\n ('Reshape3', {\n 'block': (ReshapeNet((2, 3.0, 2)), {'exception': TypeError, 'error_keywords': ['Reshape']}),\n 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))],\n 'skip': ['backward']}),\n]\n\n\n@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception)\ndef test_check_exception():\n return raise_set\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\nimport mindspore.nn as nn\nfrom mindspore.common.api import ms_function\nimport numpy as np\nimport mindspore.context as context\nfrom mindspore.common.initializer import initializer\nfrom mindspore.common.parameter import Parameter\ncontext.set_context(device_target=\"Ascend\")\nclass Net(nn.Cell):\n def __init__(self, keep_dims, axis):\n super(Net, self).__init__()\n self.reduce_mean = P.ReduceMean(keep_dims=keep_dims)\n self.axis = axis\n\n @ms_function\n def construct(self, inputs):\n return self.reduce_mean(inputs, self.axis)\n\nx1 = np.random.randn(64).astype(np.float32)\n\ndef test_net():\n keepdims = False\n axis = -1\n Reduce_mean = Net(keepdims, axis)\n output = Reduce_mean(Tensor(x1))\n print(x1)\n print(output.asnumpy())\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"\r\nFunction:\r\n test network\r\nUsage:\r\n python test_network_main.py --net lenet --target Ascend\r\n\"\"\"\r\nimport os\r\nimport time\r\nimport numpy as np\r\nimport argparse\r\nimport mindspore.context as context\r\nimport mindspore.nn as nn\r\nfrom mindspore import Tensor\r\nfrom mindspore.nn import TrainOneStepCell, WithLossCell\r\nfrom mindspore.nn.optim import Momentum\r\nfrom models.lenet import LeNet\r\nfrom models.resnetv1_5 import resnet50\r\nfrom models.alexnet import AlexNet\r\n\r\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\r\n\r\n\r\ndef train(net, data, label):\r\n learning_rate = 0.01\r\n momentum = 0.9\r\n\r\n optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum)\r\n criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)\r\n net_with_criterion = WithLossCell(net, criterion)\r\n train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer\r\n train_network.set_train()\r\n res = train_network(data, label)\r\n print(res)\r\n assert res\r\n\r\n\r\ndef test_resnet50():\r\n data = Tensor(np.ones([32, 3, 224, 224]).astype(np.float32) * 0.01)\r\n label = Tensor(np.ones([32]).astype(np.int32))\r\n net = resnet50(32, 10)\r\n train(net, data, label)\r\n\r\n\r\ndef test_lenet():\r\n data = Tensor(np.ones([32, 1, 32, 32]).astype(np.float32) * 0.01)\r\n label = Tensor(np.ones([32]).astype(np.int32))\r\n net = LeNet()\r\n train(net, data, label)\r\n\r\n\r\ndef test_alexnet():\r\n data = Tensor(np.ones([32, 3, 227, 227]).astype(np.float32) * 0.01)\r\n label = Tensor(np.ones([32]).astype(np.int32))\r\n net = AlexNet()\r\n train(net, data, label)\r\n\r\n\r\nparser = argparse.ArgumentParser(description='MindSpore Testing Network')\r\nparser.add_argument('--net', default='resnet50', type=str, help='net name')\r\nparser.add_argument('--device', default='Ascend', type=str, help='device target')\r\nif __name__ == \"__main__\":\r\n args = parser.parse_args()\r\n context.set_context(device_target=args.device)\r\n if args.net == 'resnet50':\r\n test_resnet50()\r\n elif args.net == 'lenet':\r\n test_lenet()\r\n elif args.net == 'alexnet':\r\n test_alexnet()\r\n else:\r\n print(\"Please add net name like --net lenet\")\r\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n@File : test_parse.py\n@Author:\n@Date : 2019-01-23 17:13\n@Desc :\n\"\"\"\nimport logging\nimport numpy as np\n\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.common.api import ms_function, _executor\nfrom mindspore.ops.composite import core\nfrom mindspore.ops.functional import tensor_add\nfrom ...ut_filter import non_graph_engine\n# pylint: disable=W0613\n# W0613: unused-argument\n\n\nlog = logging.getLogger(\"test\")\nlog.setLevel(level=logging.ERROR)\n\n# Test case: use the parse obj interface use default parameter\nclass Net(nn.Cell):\n \"\"\" Net definition \"\"\"\n def __init__(self, dim):\n super(Net, self).__init__()\n self.softmax1 = nn.Softmax(dim)\n self.softmax2 = nn.Softmax(dim + 1)\n\n def construct(self, input_data, input1=ms.Tensor(np.random.randn(2, 3, 4, 5).astype(np.float32))):\n return self.softmax1(input_data)\n\n\n@non_graph_engine\ndef test_parse_defalut_parameter_case2():\n \"\"\" test_parse_defalut_parameter_case2 \"\"\"\n log.debug(\"begin test_parse_defalut_parameter_case2\")\n net = Net(0)\n npd = np.array([[1.2, 2.1], [2.2, 3.2]]).astype('float32')\n log.debug(\"input value is: %r\", npd)\n input_data = ms.Tensor(npd)\n input_data.set_dtype(ms.float32)\n\n log.debug(\"start run\")\n output = net(input_data)\n\n value = output.asnumpy()\n log.debug(\"output value = %r\", value)\n\n\n\n# Test case: use the variable parameter for parse object\nclass Net1(nn.Cell):\n \"\"\" Net1 definition \"\"\"\n def __init__(self):\n super(Net1, self).__init__()\n\n def construct(self, *args):\n x = args[0]\n return x\n\n\ndef test_var_parameter_case2():\n \"\"\" test_var_parameter_case2 \"\"\"\n log.debug(\"begin test_var_parameter_case2\")\n net = Net1()\n npd = np.array([[1.2, 2.1], [2.2, 3.2]]).astype('float32')\n log.debug(\"input value is: %r\", npd)\n input_data = ms.Tensor(npd)\n input_data.set_dtype(ms.float32)\n\n np1 = np.random.randn(2, 3, 4, 5).astype(np.float32)\n input1 = ms.Tensor(np1)\n np2 = np.random.randn(2, 3, 4, 5).astype(np.float32)\n input2 = ms.Tensor(np2)\n\n _executor.compile(net, input_data, input1, input2)\n\n\n\n# Test case: test the global flag\ng_x = Tensor(np.ones([3, 3]).astype(np.float32))\n\n@ms_function\ndef tensor_add_global(x):\n \"\"\" tensor_add_global \"\"\"\n global g_x\n res = tensor_add(x, g_x)\n return res\n\n\n@non_graph_engine\ndef test_global_flag():\n \"\"\" test_global_flag \"\"\"\n log.debug(\"begin test_global_flag\")\n x = Tensor(np.ones([3, 3]).astype(np.float32))\n res = tensor_add_global(x)\n log.debug(\"finished test_global_flag, ret = %r\", res)\n\n\nclass NetWithNDarray(nn.Cell):\n \"\"\" NetWithNDarray definition \"\"\"\n def __init__(self, dim):\n super(NetWithNDarray, self).__init__()\n self.softmax = nn.Softmax(dim)\n self.x = ms.Tensor(np.ones(shape=(1)).astype(np.float32))\n\n def construct(self, input_data):\n return self.softmax(input_data) * self.x\n\n@non_graph_engine\ndef test_net_with_ndarray():\n \"\"\" test_net_with_ndarray \"\"\"\n net = NetWithNDarray(0)\n input_data = np.array([[1.2, 2.1], [2.2, 3.2]]).astype('float32')\n\n net(ms.Tensor(input_data))\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting RandomCropDecodeResize op in DE\n\"\"\"\nimport cv2\nimport matplotlib.pyplot as plt\nimport mindspore.dataset.transforms.vision.c_transforms as vision\nimport numpy as np\n\nimport mindspore.dataset as ds\nfrom mindspore import log as logger\n\nDATA_DIR = [\"../data/dataset/test_tf_file_3_images/train-0000-of-0001.data\"]\nSCHEMA_DIR = \"../data/dataset/test_tf_file_3_images/datasetSchema.json\"\n\n\ndef visualize(a, mse, original):\n \"\"\"\n visualizes the image using DE op and Numpy Op\n \"\"\"\n plt.subplot(141)\n plt.imshow(original)\n plt.title(\"Original image\")\n\n plt.subplot(142)\n plt.imshow(a)\n plt.title(\"DE random_crop_decode_resize image\")\n\n plt.subplot(143)\n plt.imshow(a - original)\n plt.title(\"Difference image, mse : {}\".format(mse))\n plt.show()\n\n\ndef test_random_crop_decode_resize_op():\n \"\"\"\n Test RandomCropDecodeResize op\n \"\"\"\n logger.info(\"test_random_decode_resize_op\")\n\n # First dataset\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n decode_op = vision.Decode()\n random_crop_decode_resize_op = vision.RandomCropDecodeResize((256, 512), (1, 1), (0.5, 0.5))\n data1 = data1.map(input_columns=[\"image\"], operations=random_crop_decode_resize_op)\n\n # Second dataset\n data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n data2 = data2.map(input_columns=[\"image\"], operations=decode_op)\n\n num_iter = 0\n for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):\n\n if num_iter > 0:\n break\n crop_and_resize = item1[\"image\"]\n original = item2[\"image\"]\n original = cv2.resize(original, (512, 256))\n diff = crop_and_resize - original\n mse = np.sum(np.power(diff, 2))\n logger.info(\"random_crop_decode_resize_op_{}, mse: {}\".format(num_iter + 1, mse))\n # Uncomment below line if you want to visualize images\n # visualize(crop_and_resize, mse, original)\n num_iter += 1\n\n\nif __name__ == \"__main__\":\n test_random_crop_decode_resize_op()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test nn.Dense \"\"\"\nimport numpy as np\nimport pytest\nimport mindspore.nn as nn\nfrom mindspore.common.api import _executor\nimport mindspore.context as context\nfrom mindspore import Tensor\nfrom ..ut_filter import non_graph_engine\n\n\ndef test_dense_none():\n with pytest.raises(TypeError):\n nn.Dense(3, 2, None, None)\n\n\n@non_graph_engine\ndef test_dense_str_activation():\n dense = nn.Dense(1, 1, activation='relu')\n assert isinstance(dense.activation, nn.ReLU)\n\n input_data = Tensor(np.random.randint(0, 255, [1, 1]).astype(np.float32))\n dense.construct(input_data)\n\n\ndef test_dense_weight_error():\n dim_error = Tensor(np.array([[[0.1], [0.3], [0.6]], [[0.4], [0.5], [0.2]]]))\n with pytest.raises(ValueError):\n nn.Dense(3, 2, dim_error)\n\n shape_error = Tensor(np.array([[0.1, 0.3, 0.6], [0.4, 0.5, 0.2]]))\n with pytest.raises(ValueError):\n nn.Dense(2, 2, shape_error)\n with pytest.raises(ValueError):\n nn.Dense(3, 3, shape_error)\n\n\ndef test_dense_bias_error():\n dim_error = Tensor(np.array([[0.5, 0.3]]))\n with pytest.raises(ValueError):\n nn.Dense(3, 2, bias_init=dim_error)\n\n shape_error = Tensor(np.array([0.5, 0.3, 0.4]))\n with pytest.raises(ValueError):\n nn.Dense(3, 2, bias_init=shape_error)\n\n\ndef test_dense_channels_error():\n with pytest.raises(ValueError):\n nn.Dense(3, 0)\n\n with pytest.raises(ValueError):\n nn.Dense(-1, 2)\n\n\nclass Net(nn.Cell):\n \"\"\" Net definition \"\"\"\n def __init__(self,\n input_channels,\n output_channels,\n weight='normal',\n bias='zeros',\n has_bias=True,\n activation=''):\n super(Net, self).__init__()\n self.dense = nn.Dense(input_channels,\n output_channels,\n weight,\n bias,\n has_bias,\n activation=activation)\n\n def construct(self, input_x):\n return self.dense(input_x)\n\n\ndef test_compile():\n \"\"\" test_compile \"\"\"\n # has bias\n weight = Tensor(np.random.randint(0, 255, [8, 64]).astype(np.float32))\n bias = Tensor(np.random.randint(0, 255, [8]).astype(np.float32))\n net = Net(64, 8, weight=weight, bias=bias)\n input_data = Tensor(np.random.randint(0, 255, [128, 64]).astype(np.float32))\n _executor.compile(net, input_data)\n\n # training\n net_train = Net(64, 8, weight=weight, bias=bias)\n net_train.set_train()\n _executor.compile(net_train, input_data)\n\n\ndef test_compile_2():\n \"\"\" test_compile_2 \"\"\"\n # no bias\n weight = Tensor(np.random.randint(0, 255, [8, 64]).astype(np.float32))\n net = Net(64, 8, weight=weight, has_bias=False)\n input_data = Tensor(np.random.randint(0, 255, [128, 64]).astype(np.float32))\n _executor.compile(net, input_data)\n\n # training\n net_train = Net(64, 8, weight=weight, has_bias=False)\n net_train.set_train()\n _executor.compile(net_train, input_data)\n\n\ndef test_compile_3():\n \"\"\" test_compile_3 \"\"\"\n # test for Graph mode\n # has bias\n context.set_context(mode=context.GRAPH_MODE)\n net = Net(128, 10)\n input_data = Tensor(np.random.randint(0, 255, [128, 128]).astype(np.float32))\n _executor.compile(net, input_data)\n\n # training\n net_train = Net(128, 10)\n net_train.set_train()\n _executor.compile(net_train, input_data)\n\n\ndef test_compile_4():\n \"\"\" test_compile_4 \"\"\"\n # test for Graph mode\n # no bias\n context.set_context(mode=context.GRAPH_MODE)\n net = Net(128, 10, has_bias=False)\n input_data = Tensor(np.random.randint(0, 255, [128, 128]).astype(np.float32))\n _executor.compile(net, input_data)\n\n # training\n net_train = Net(128, 10, has_bias=False)\n net_train.set_train()\n _executor.compile(net_train, input_data)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport pytest\nfrom mindspore.ops import operations as P\nfrom mindspore.nn import Cell\nfrom mindspore.common.tensor import Tensor\nimport mindspore.context as context\nimport numpy as np\n\n\nclass Net(Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.lessequal = P.LessEqual()\n\n def construct(self, x, y):\n return self.lessequal(x, y)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_lessequal():\n x = Tensor(np.array([[1, 2, 3]]).astype(np.float32))\n y = Tensor(np.array([[2]]).astype(np.float32))\n expect = [[True, True, False]]\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")\n lessequal = Net()\n output = lessequal(x, y)\n assert np.all(output.asnumpy() == expect)\n\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n lessequal = Net()\n output = lessequal(x, y)\n assert np.all(output.asnumpy() == expect)\n\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\nfrom mindspore.common.initializer import initializer\nfrom mindspore.common import dtype as mstype\n\n\ndef weight_variable(shape):\n return initializer('XavierUniform', shape=shape, dtype=mstype.float32)\n\n\ndef weight_variable_uniform(shape):\n return initializer('Uniform', shape=shape, dtype=mstype.float32)\n\n\ndef weight_variable_0(shape):\n zeros = np.zeros(shape).astype(np.float32)\n return Tensor(zeros)\n\n\ndef weight_variable_1(shape):\n ones = np.ones(shape).astype(np.float32)\n return Tensor(ones)\n\n\ndef conv3x3(in_channels, out_channels, stride=1, padding=0):\n \"\"\"3x3 convolution \"\"\"\n weight_shape = (out_channels, in_channels, 3, 3)\n weight = weight_variable(weight_shape)\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=3, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode=\"same\")\n\n\ndef conv1x1(in_channels, out_channels, stride=1, padding=0):\n \"\"\"1x1 convolution\"\"\"\n weight_shape = (out_channels, in_channels, 1, 1)\n weight = weight_variable(weight_shape)\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=1, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode=\"same\")\n\n\ndef conv7x7(in_channels, out_channels, stride=1, padding=0):\n \"\"\"1x1 convolution\"\"\"\n weight_shape = (out_channels, in_channels, 7, 7)\n weight = weight_variable(weight_shape)\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=7, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode=\"same\")\n\n\ndef bn_with_initialize(out_channels):\n shape = (out_channels)\n mean = weight_variable_0(shape)\n var = weight_variable_1(shape)\n beta = weight_variable_0(shape)\n gamma = weight_variable_uniform(shape)\n bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init=gamma,\n beta_init=beta, moving_mean_init=mean, moving_var_init=var)\n return bn\n\n\ndef bn_with_initialize_last(out_channels):\n shape = (out_channels)\n mean = weight_variable_0(shape)\n var = weight_variable_1(shape)\n beta = weight_variable_0(shape)\n gamma = weight_variable_uniform(shape)\n bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init=gamma,\n beta_init=beta, moving_mean_init=mean, moving_var_init=var)\n return bn\n\n\ndef fc_with_initialize(input_channels, out_channels):\n weight_shape = (out_channels, input_channels)\n weight = weight_variable(weight_shape)\n bias_shape = (out_channels)\n bias = weight_variable_uniform(bias_shape)\n return nn.Dense(input_channels, out_channels, weight, bias)\n\n\nclass ResidualBlock(nn.Cell):\n expansion = 4\n\n def __init__(self,\n in_channels,\n out_channels,\n stride=1,\n down_sample=False):\n super(ResidualBlock, self).__init__()\n\n out_chls = out_channels // self.expansion\n self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)\n self.bn1 = bn_with_initialize(out_chls)\n\n self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0)\n self.bn2 = bn_with_initialize(out_chls)\n\n self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)\n self.bn3 = bn_with_initialize_last(out_channels)\n\n self.relu = P.ReLU()\n self.add = P.TensorAdd()\n\n def construct(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out = self.add(out, identity)\n out = self.relu(out)\n\n return out\n\n\nclass ResidualBlockWithDown(nn.Cell):\n expansion = 4\n\n def __init__(self,\n in_channels,\n out_channels,\n stride=1,\n down_sample=False):\n super(ResidualBlockWithDown, self).__init__()\n\n out_chls = out_channels // self.expansion\n self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)\n self.bn1 = bn_with_initialize(out_chls)\n\n self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0)\n self.bn2 = bn_with_initialize(out_chls)\n\n self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)\n self.bn3 = bn_with_initialize_last(out_channels)\n\n self.relu = P.ReLU()\n self.downSample = down_sample\n\n self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0)\n self.bn_down_sample = bn_with_initialize(out_channels)\n self.add = P.TensorAdd()\n\n def construct(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n identity = self.conv_down_sample(identity)\n identity = self.bn_down_sample(identity)\n\n out = self.add(out, identity)\n out = self.relu(out)\n\n return out\n\n\nclass MakeLayer0(nn.Cell):\n\n def __init__(self, block, layer_num, in_channels, out_channels, stride):\n super(MakeLayer0, self).__init__()\n self.a = ResidualBlockWithDown(in_channels, out_channels, stride=1, down_sample=True)\n self.b = block(out_channels, out_channels, stride=stride)\n self.c = block(out_channels, out_channels, stride=1)\n\n def construct(self, x):\n x = self.a(x)\n x = self.b(x)\n x = self.c(x)\n\n return x\n\n\nclass MakeLayer1(nn.Cell):\n\n def __init__(self, block, layer_num, in_channels, out_channels, stride):\n super(MakeLayer1, self).__init__()\n self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)\n self.b = block(out_channels, out_channels, stride=1)\n self.c = block(out_channels, out_channels, stride=1)\n self.d = block(out_channels, out_channels, stride=1)\n\n def construct(self, x):\n x = self.a(x)\n x = self.b(x)\n x = self.c(x)\n x = self.d(x)\n\n return x\n\n\nclass MakeLayer2(nn.Cell):\n\n def __init__(self, block, layer_num, in_channels, out_channels, stride):\n super(MakeLayer2, self).__init__()\n self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)\n self.b = block(out_channels, out_channels, stride=1)\n self.c = block(out_channels, out_channels, stride=1)\n self.d = block(out_channels, out_channels, stride=1)\n self.e = block(out_channels, out_channels, stride=1)\n self.f = block(out_channels, out_channels, stride=1)\n\n def construct(self, x):\n x = self.a(x)\n x = self.b(x)\n x = self.c(x)\n x = self.d(x)\n x = self.e(x)\n x = self.f(x)\n\n return x\n\n\nclass MakeLayer3(nn.Cell):\n\n def __init__(self, block, layer_num, in_channels, out_channels, stride):\n super(MakeLayer3, self).__init__()\n self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)\n self.b = block(out_channels, out_channels, stride=1)\n self.c = block(out_channels, out_channels, stride=1)\n\n def construct(self, x):\n x = self.a(x)\n x = self.b(x)\n x = self.c(x)\n\n return x\n\n\nclass ResNet(nn.Cell):\n\n def __init__(self, block, layer_num, num_classes=100, batch_size=32):\n super(ResNet, self).__init__()\n self.batch_size = batch_size\n self.num_classes = num_classes\n\n self.conv1 = conv7x7(3, 64, stride=2, padding=0)\n\n self.bn1 = bn_with_initialize(64)\n self.relu = P.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode=\"same\")\n\n self.layer1 = MakeLayer0(block, layer_num[0], in_channels=64, out_channels=256, stride=1)\n self.layer2 = MakeLayer1(block, layer_num[1], in_channels=256, out_channels=512, stride=2)\n self.layer3 = MakeLayer2(block, layer_num[2], in_channels=512, out_channels=1024, stride=2)\n self.layer4 = MakeLayer3(block, layer_num[3], in_channels=1024, out_channels=2048, stride=2)\n\n self.pool = P.ReduceMean(keep_dims=True)\n self.squeeze = P.Squeeze(axis=(2, 3))\n self.fc = fc_with_initialize(512 * block.expansion, num_classes)\n\n def construct(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.pool(x, (2, 3))\n x = self.squeeze(x)\n x = self.fc(x)\n return x\n\n\ndef resnet50(batch_size, num_classes):\n return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes, batch_size)\n"
] | [
[
"numpy.array",
"numpy.array_equal",
"numpy.ones"
],
[
"numpy.ones"
],
[
"numpy.random.randn"
],
[
"numpy.ones"
],
[
"numpy.array",
"numpy.random.randn",
"numpy.ones"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.power",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
],
[
"numpy.array",
"numpy.random.randint"
],
[
"numpy.array"
],
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cimat/data-visualization-patterns | [
"7ca363ffd50d3d2d9da48b650588cd5503449cb3"
] | [
"display-patterns/Discrete Quantities/Pruebas/A36Span_Chart_Seaborn.py"
] | [
"import seaborn as sns\nimport matplotlib.pyplot as plt\nfrom datos import data\nimport pandas as pd\n\nsns.set(style=\"white\")\nf, ax = plt.subplots(figsize=(6, 15))\nd=data('mtcars')\nsubset1, subset2, subset3= d[d.cyl==4], d[d.cyl==6], d[d.cyl==8]\ndatos=pd.DataFrame ({'Max': [max(subset1.mpg), max(subset2.mpg), max(subset3.mpg)],\n\t\t\t \t 'Min': [min(subset1.mpg), min(subset2.mpg), min(subset3.mpg)],\n\t\t\t \t 'Span': [max(subset1.mpg)-min(subset1.mpg), max(subset2.mpg)-min(subset2.mpg), max(subset3.mpg)-min(subset3.mpg)]})\ndatos.index=[4,6,8]\nsns.barplot(x=datos.index, y=datos.Max, color=\"#2ecc71\", linewidth=0)\nsns.barplot(x=datos.index, y=datos.Min, color=\"white\", linewidth=0)\nsns.axlabel('Cylindres','Milles Per Gall')\nplt.title('Range of Milles per Gallon (mpg) by Cylindres (cyl)', family='Serif', size=16)\nplt.show()"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.title"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rahulk29/sram22 | [
"9539f4bebd8577163fbab2181c1aef8f33e0ded4"
] | [
"sramgen/testbenches/column_mux_4/column_mux_4.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\nsaved = [\n \"din0\",\n \"din1\",\n \"din2\",\n \"din3\",\n \"sel0\",\n \"sel1\",\n \"sel_b0\",\n \"sel_b1\",\n \"dout\",\n]\n\n\ndef read_data(f):\n data = defaultdict(lambda: [])\n for line in f.readlines():\n values = line.split()\n ctr = 0\n for key in saved:\n if ctr == 0:\n data[\"time\"].append(float(values[ctr]))\n ctr += 1\n data[key].append(float(values[ctr]))\n ctr += 1\n return {k: np.array(v) for k, v in data.items()}\n\n\ndef read_test_data():\n with open(\"./column_mux_4.dat\") as f:\n return read_data(f)\n\n\ndef plot_data(data):\n plt.figure()\n plt.plot(data[\"time\"], data[\"sel0\"])\n plt.plot(data[\"time\"], data[\"sel1\"])\n plt.plot(data[\"time\"], data[\"dout\"])\n plt.legend([\"sel0\", \"sel1\", \"dout\"])\n plt.savefig(\"column_mux_4.png\")\n\n\nif __name__ == \"__main__\":\n data = read_test_data()\n plot_data(data)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
comword/TCD20CS4CS7-MLFinal | [
"bb1b1cba25ce4c5cf0338b7b75af3b6f12931c96",
"bb1b1cba25ce4c5cf0338b7b75af3b6f12931c96"
] | [
"src/bert/train_early_access.py",
"src/xlnet/train_voted_up.py"
] | [
"import numpy as np\nfrom keras_bert import load_trained_model_from_checkpoint\nimport os\n\nfrom dataloader import Tokeniser, load_data\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.layers import *\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\npretrained_path = 'pretrained/uncased_L-12_H-768_A-12'\nconfig_path = os.path.join(pretrained_path, 'bert_config.json')\ncheckpoint_path = os.path.join(pretrained_path, 'bert_model.ckpt')\nvocab_path = os.path.join(pretrained_path, 'vocab.txt')\n\nSEQ_LEN = 128\nBATCH_SIZE = 25\nEPOCHS = 5\nLR = 1e-5\nMODEL_NAME = \"bert_early_access\"\n\nbert_model = load_trained_model_from_checkpoint(\n config_path,\n checkpoint_path,\n training=True,\n trainable=True,\n seq_len=SEQ_LEN,\n)\n\ntokeniser = Tokeniser(vocab_path)\nX, y = load_data(tokeniser, 'data/reviews_112_trans-en.jl', target_label='early_access', max_len=SEQ_LEN, batch_size=BATCH_SIZE)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n\ninputs = bert_model.inputs[:2]\ndense = bert_model.get_layer('NSP-Dense').output\noutputs = Dense(units=2, activation='softmax')(dense)\n\nmodel = Model(inputs, outputs)\nmodel.compile(\n optimizer=Adam(LR),\n loss='sparse_categorical_crossentropy',\n metrics=['sparse_categorical_accuracy'],\n)\nmodel.summary()\n\nmcp_save = ModelCheckpoint(\"result/\"+MODEL_NAME+'.best.h5', save_best_only=True, monitor='val_sparse_categorical_accuracy', mode='max')\n\nmodel.fit(\n [X_train, np.zeros_like(X_train)],\n y_train,\n epochs=EPOCHS,\n validation_split=0.1,\n batch_size=BATCH_SIZE,\n callbacks=[EarlyStopping(monitor='val_loss', patience=4), mcp_save]\n)\n\nmodel.save_weights(\"result/\"+MODEL_NAME+\".h5\")\n\npredicts = model.predict([X_test, np.zeros_like(X_test)], verbose=True).argmax(axis=-1)\n\ntp, fp, fn, tn = 0, 0, 0, 0\nfor i in range(len(predicts)):\n if predicts[i] == 1:\n if y_test[i] == 1:\n tp += 1\n else:\n fp += 1\n else:\n if y_test[i] == 1:\n fn += 1\n else:\n tn += 1\n\nprint('Confusion matrix:')\nprint('[{}, {}]'.format(tp, fp))\nprint('[{}, {}]'.format(fn, tn))\n\nprint('Accuracy: %.2f' % (100.0 * (tp + tn) / len(results)))",
"import numpy as np\n\nfrom keras_xlnet.backend import keras\nfrom keras_xlnet import PretrainedList, get_pretrained_paths\nfrom keras_xlnet import Tokenizer\n\nfrom sklearn.model_selection import train_test_split\n\nfrom dataloader import load_data, get_X_array\nfrom model import get_xlnet_model\nfrom model_eval import model_eval\n\nEPOCH = 10\nBATCH_SIZE = 20\nSEQ_LEN = 128\nLR = 5e-6\nMODEL_NAME = 'xlnet_voted_up'\n\npaths = get_pretrained_paths(PretrainedList.en_cased_base)\ntokenizer = Tokenizer(paths.vocab)\n\nmodel = get_xlnet_model(paths, BATCH_SIZE, SEQ_LEN, LR)\n\nX, y = load_data(tokenizer, 'data/reviews_112_trans-en.jl', SEQ_LEN=SEQ_LEN, target_label='voted_up')\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n\nmcp_save = keras.callbacks.ModelCheckpoint(\"result/\"+MODEL_NAME+'.best.h5', save_best_only=True, monitor='val_sparse_categorical_accuracy', mode='max')\n\nmodel.fit(\n get_X_array(X_train),\n y_train,\n epochs=EPOCH,\n batch_size=BATCH_SIZE,\n validation_split=0.1,\n callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', patience=2), mcp_save]\n)\n\nmodel.save_weights(\"result/\"+MODEL_NAME+\".h5\")\n\nmodel_eval(model, get_X_array(X_test), y_test, BATCH_SIZE)"
] | [
[
"numpy.zeros_like",
"sklearn.model_selection.train_test_split"
],
[
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qing42102/deep_learning_examples | [
"d7695673e0c4bfe211f303ea5444765e8d4fe5f4"
] | [
"Logistic_Regression.py"
] | [
"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nimport numpy as np\n# import tensorflow as tf\nfrom PIL import Image\nimport os\nimport matplotlib.pyplot as plt\nimport pickle\n\n# %%\ndef load_images(path: str) -> list:\n '''\n Load images from a directory. Normalize the image to [0, 1]. Return a list of images array\n '''\n imgs = []\n for f in os.listdir(path):\n ext = os.path.splitext(f)[1]\n if ext.lower() == \".jpg\":\n file_name = os.path.join(path,f)\n \n # Conver the image to an array of floats normalized to [0, 1]\n image = Image.open(file_name).convert(\"F\")\n image_array = np.array(image.getdata())/255.\n\n imgs.append(image_array)\n\n return imgs\n\ntrain_imgs = np.array(load_images(\"train_data/\"))\ntest_imgs = np.array(load_images(\"test_data/\"))\n\nos.chdir(\"labels/\")\ntrain_label = np.loadtxt(\"train_label.txt\")\ntest_label = np.loadtxt(\"test_label.txt\")\nos.chdir(\"..\")\n\nprint(train_imgs.shape, train_label.shape)\nprint(test_imgs.shape, test_label.shape)\n\n# %%\ndef softmax_func(X: np.ndarray, W: np.ndarray) -> np.ndarray:\n '''\n Softmax function for calculating the posterior probability\n X should be Mx(N+1)\n Return a MxK matrix\n '''\n\n exp = np.exp(X @ W)\n sum = np.sum(np.exp(X @ W), axis=1)\n return exp/sum[:, None]\n\ndef logistic_loss(X: np.ndarray, W: np.ndarray, y: np.ndarray) -> float:\n '''\n Logistic regression cross-entropy loss\n '''\n\n log_likelihood = np.log(softmax_func(X, W))\n\n # Create the Kx1 binary vector with 1‐of‐K encoding\n t = np.zeros((y.shape[0], W.shape[1]))\n t[np.arange(y.size), y.astype(int)-1] = 1\n\n total_loss = np.tensordot(t, log_likelihood, axes=2)\n \n return -total_loss/X.shape[0]\n\ndef logistic_loss_grad(X: np.ndarray, W: np.ndarray, y: np.ndarray) -> np.ndarray:\n '''\n Calculate the gradient for each class\n Return a (N+1)xK matrix\n '''\n\n # Create the Kx1 binary vector with 1‐of‐K encoding\n t = np.zeros((y.shape[0], W.shape[1]))\n t[np.arange(y.size), y.astype(int)-1] = 1\n\n y_diff = t-softmax_func(X, W)\n total_grad = X.T @ y_diff\n \n return -total_grad/X.shape[0]\n\ndef classification_accuracy(X: np.ndarray, W: np.ndarray, y: np.ndarray) -> float:\n '''\n Classification accuracy for the predicted and true labels\n '''\n\n # Select the largest probability\n y_pred = np.argmax(softmax_func(X, W), axis=1)+1\n \n accuracy = np.sum(y_pred == y)/X.shape[0]\n\n return accuracy*100\n\ndef digit_accuracy(X: np.ndarray, W: np.ndarray, y: np.ndarray):\n '''\n Classification accuracy for each of the digits\n '''\n\n # Select the largest probability\n y_pred = np.argmax(softmax_func(X, W), axis=1)+1\n \n for i in range(W.shape[1]):\n y_i = y[y==i+1]\n y_pred_i = y_pred[y==i+1]\n accuracy = np.sum(y_pred_i == y_i)/y_i.shape[0]\n print(\"Digit\", i+1, \"accuracy:\", accuracy)\n \n print(\"\\n\")\n\n\ndef gradient_descent(train_X: np.ndarray, train_y: np.ndarray, test_X: np.ndarray, test_y: np.ndarray,\\\n W: np.ndarray, tolerance: float):\n '''\n Steepest gradient descent with a stepsize of inverse square root of iteration number\n The stopping condition is the residual of the gradient and a maximum iteration number of 200\n\n X should be Mx(N+1)\n W should be (N+1)xK\n y should be Mx1\n '''\n\n # Add the bias coefficient to the data\n train_X = np.hstack((train_X, np.ones((train_X.shape[0], 1))))\n test_X = np.hstack((test_X, np.ones((test_X.shape[0], 1))))\n\n training_accuracy_list = []\n testing_accuracy_list = []\n training_loss_list = []\n testing_loss_list = []\n\n grad = logistic_loss_grad(train_X, W, train_y)\n\n # Calculate the residual of the gradient\n res = np.linalg.norm(grad)\n\n iteration = 1\n while res > tolerance and iteration != 200:\n alpha = 1/np.sqrt(iteration)\n W = W - alpha*grad\n\n grad = logistic_loss_grad(train_X, W, train_y)\n res = np.linalg.norm(grad)\n\n training_accuracy = classification_accuracy(train_X, W, train_y)\n training_loss = logistic_loss(train_X, W, train_y)\n\n testing_accuracy = classification_accuracy(test_X, W, test_y)\n testing_loss = logistic_loss(test_X, W, test_y)\n\n training_accuracy_list.append(training_accuracy)\n testing_accuracy_list.append(testing_accuracy)\n training_loss_list.append(training_loss)\n testing_loss_list.append(testing_loss)\n\n print(iteration)\n print(\"Norm of gradient:\", res)\n print(\"Training Accuracy:\", training_accuracy, \"Training Loss:\", training_loss)\n print(\"Testing Accuracy:\", testing_accuracy, \"Testing Loss:\", testing_loss)\n print(\"\\n\")\n\n iteration += 1\n\n print(\"Training digits\")\n digit_accuracy(train_X, W, train_y)\n print(\"Testing digits\")\n digit_accuracy(test_X, W, test_y)\n\n return training_accuracy_list, testing_accuracy_list, training_loss_list, testing_loss_list, W\n\n\n# %%\n\nnum_features = test_imgs.shape[1]\nnum_classes = len(np.unique(test_label))\n\n# Initialize the weight vectors including the bias \nW = np.zeros(shape=(num_features+1, num_classes))\n\nresults = gradient_descent(train_X=train_imgs, train_y=train_label, test_X=test_imgs, \\\n test_y=test_label, W=W, tolerance=10**-2)\n\ntraining_accuracy, testing_accuracy, training_loss, testing_loss, W_optimal = results\niteration = np.arange(len(training_accuracy))\n\nplt.figure(figsize=(8, 5))\nplt.plot(iteration, training_accuracy, label=\"Training accuracy\")\nplt.plot(iteration, testing_accuracy, label=\"Testing accuracy\")\nplt.xlabel(\"Iteration\", fontsize=14)\nplt.ylabel(\"Percentage\", fontsize=14)\nplt.legend()\nplt.show()\n\nplt.figure(figsize=(8, 5))\nplt.plot(iteration, training_loss, label=\"Training loss\")\nplt.plot(iteration, testing_loss, label=\"Testing loss\")\nplt.xlabel(\"Iteration\", fontsize=14)\nplt.ylabel(\"Loss\", fontsize=14)\nplt.legend()\nplt.show()\n\nfor i in range(num_classes):\n plt.imshow(W_optimal[:num_features, i].reshape(28,28))\n plt.colorbar()\n plt.show()\n\nfilehandler = open(\"multiclass_parameters.txt\",\"wb\")\npickle.dump(W_optimal, filehandler)\nfilehandler.close()\n\n# %%\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.unique",
"numpy.arange",
"numpy.linalg.norm",
"numpy.ones",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"numpy.tensordot",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jzpang/forte | [
"489fb9cafba6faf5739bda935836b61b5e3d02b6",
"489fb9cafba6faf5739bda935836b61b5e3d02b6"
] | [
"examples/data_augmentation/reinforcement/main.py",
"forte/data/ontology/core.py"
] | [
"# Copyright 2020 The Forte Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nExample of building a reinforcement learning based,\ndata augmentation enhanced sentence classifier\nbased on pre-trained BERT model.\n\"\"\"\nimport argparse\nimport functools\nimport logging\nimport os\n\nimport torch\nimport torch.nn.functional as F\nimport texar.torch as tx\nfrom transformers import BertForMaskedLM\n\nfrom config import config_data, config_classifier\nfrom utils import model_utils\nfrom forte.models.da_rl import MetaAugmentationWrapper, TexarBertMetaModule\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--pretrained-model-name\",\n type=str,\n default=\"bert-base-uncased\",\n choices=tx.modules.BERTEncoder.available_checkpoints(),\n help=\"Name of the pre-trained downstream checkpoint to load.\",\n)\nparser.add_argument(\n \"--output-dir\",\n default=\"output/\",\n help=\"The output directory where the model checkpoints will be written.\",\n)\nparser.add_argument(\n \"--do-train\", action=\"store_true\", help=\"Whether to run training.\"\n)\nparser.add_argument(\n \"--do-eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\"\n)\nparser.add_argument(\n \"--do-test\",\n action=\"store_true\",\n help=\"Whether to run test on the test set.\",\n)\nparser.add_argument(\n \"--augmentation-model-name\",\n type=str,\n default=\"bert-base-uncased\",\n choices=tx.modules.BERTEncoder.available_checkpoints(),\n help=\"Name of the pre-trained augmentation model checkpoint to load.\",\n)\nparser.add_argument(\n \"--num-aug\",\n type=int,\n default=4,\n help=\"number of augmentation samples when fine-tuning aug model\",\n)\nparser.add_argument(\n \"--classifier-pretrain-epoch\",\n type=int,\n default=10,\n help=\"number of epochs to pretrain the classifier\",\n)\nargs = parser.parse_args()\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nlogging.root.setLevel(logging.INFO)\n\n\nclass RLAugmentClassifierTrainer:\n def __init__(self):\n self._prepare_data_iterator()\n self._init_aug_model()\n self._init_classifier()\n\n def _prepare_data_iterator(self):\n tx.utils.maybe_create_dir(args.output_dir)\n\n # Loads data\n num_train_data = config_data.num_train_data\n self.num_train_steps = int(\n num_train_data\n / config_data.train_batch_size\n * config_data.max_train_epoch\n )\n\n train_dataset = tx.data.RecordData(\n hparams=config_data.train_hparam, device=device\n )\n val_dataset = tx.data.RecordData(\n hparams=config_data.eval_hparam, device=device\n )\n test_dataset = tx.data.RecordData(\n hparams=config_data.test_hparam, device=device\n )\n self.iterator = tx.data.DataIterator(\n {\"train\": train_dataset, \"dev\": val_dataset, \"test\": test_dataset}\n )\n\n self.val_data_iterator = tx.data.DataIterator({\"dev\": val_dataset})\n self.val_data_iterator.switch_to_dataset(\"dev\")\n\n def _init_aug_model(self):\n # pylint: disable=protected-access\n # Builds data augmentation BERT\n aug_model = BertForMaskedLM.from_pretrained(\n args.augmentation_model_name\n )\n aug_model.to(device)\n aug_tokenizer = tx.data.BERTTokenizer(\n pretrained_model_name=args.augmentation_model_name\n )\n input_mask_ids = aug_tokenizer._map_token_to_id(\"[MASK]\")\n # Builds augmentation optimizer\n aug_lr = 4e-5\n param_optimizer = list(aug_model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p\n for n, p in param_optimizer\n if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.01,\n },\n {\n \"params\": [\n p\n for n, p in param_optimizer\n if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n aug_optim = tx.core.BertAdam(\n optimizer_grouped_parameters,\n betas=(0.9, 0.999),\n eps=1e-6,\n lr=aug_lr,\n )\n # Builds data augmentation wrapper\n self.aug_wrapper = MetaAugmentationWrapper(\n aug_model, aug_optim, input_mask_ids, device, args.num_aug\n )\n\n def _init_classifier(self):\n # Builds BERT for classification task.\n config_downstream = {\n k: v\n for k, v in config_classifier.__dict__.items()\n if not k.startswith(\"__\") and k != \"hyperparams\"\n }\n\n self.classifier = tx.modules.BERTClassifier(\n pretrained_model_name=args.pretrained_model_name,\n hparams=config_downstream,\n )\n self.classifier.to(device)\n\n # Builds learning rate decay scheduler\n classifier_lr = 4e-5\n vars_with_decay = []\n vars_without_decay = []\n for name, param in self.classifier.named_parameters():\n if \"layer_norm\" in name or name.endswith(\"bias\"):\n vars_without_decay.append(param)\n else:\n vars_with_decay.append(param)\n opt_params = [\n {\n \"params\": vars_with_decay,\n \"weight_decay\": 0.01,\n },\n {\n \"params\": vars_without_decay,\n \"weight_decay\": 0.0,\n },\n ]\n self.optim = tx.core.BertAdam(\n opt_params, betas=(0.9, 0.999), eps=1e-6, lr=classifier_lr\n )\n num_warmup_steps = int(\n self.num_train_steps * config_data.warmup_proportion\n )\n self.scheduler = torch.optim.lr_scheduler.LambdaLR(\n self.optim,\n functools.partial(\n model_utils.get_lr_multiplier,\n total_steps=self.num_train_steps,\n warmup_steps=num_warmup_steps,\n ),\n )\n\n def pre_train_classifier_epoch(self):\n r\"\"\"Pre-trains model on the training set\n for better weight initialization.\n \"\"\"\n self.iterator.switch_to_dataset(\"train\")\n self.classifier.train()\n\n for _ in range(args.classifier_pretrain_epoch):\n for batch in self.iterator:\n self.optim.zero_grad()\n\n input_ids = batch[\"input_ids\"]\n segment_ids = batch[\"segment_ids\"]\n labels = batch[\"label_ids\"]\n input_length = (1 - (input_ids == 0).int()).sum(dim=1)\n\n logits, _ = self.classifier(\n input_ids, input_length, segment_ids\n )\n loss = self._compute_loss(logits, labels)\n\n loss.backward()\n self.optim.step()\n self.scheduler.step()\n\n def train_epoch(self):\n r\"\"\"Trains on the training set, and evaluates on the validation set\n periodically.\n \"\"\"\n self.iterator.switch_to_dataset(\"train\")\n self.classifier.train()\n self.optim.zero_grad()\n\n for batch in self.iterator:\n input_ids = batch[\"input_ids\"]\n input_mask = batch[\"input_mask\"]\n segment_ids = batch[\"segment_ids\"]\n labels = batch[\"label_ids\"]\n\n # Train augmentation model params phi.\n self.aug_wrapper.reset_model()\n # Iterate over training instances.\n num_instances = len(input_ids)\n for i in range(num_instances):\n features = (\n input_ids[i],\n input_mask[i],\n segment_ids[i],\n labels[i],\n )\n\n # Augmented instance with params phi exposed\n (\n aug_probs,\n input_mask_aug,\n segment_ids_aug,\n label_ids_aug,\n ) = self.aug_wrapper.augment_instance(features)\n\n # Compute classifier loss.\n self.classifier.zero_grad()\n input_length_aug = ((input_mask_aug == 1).int()).sum(dim=1)\n logits, _ = self.classifier(\n aug_probs, input_length_aug, segment_ids_aug\n )\n loss = self._compute_loss(logits, label_ids_aug)\n # Update classifier params on meta_model.\n meta_model = TexarBertMetaModule(self.classifier)\n meta_model = self.aug_wrapper.update_meta_model(\n meta_model, loss, self.classifier, self.optim\n )\n\n # Compute grads of aug_model on validation data.\n for val_batch in self.val_data_iterator: # one batch\n val_input_ids = val_batch[\"input_ids\"]\n val_segment_ids = val_batch[\"segment_ids\"]\n val_labels = val_batch[\"label_ids\"]\n val_input_length = (1 - (val_input_ids == 0).int()).sum(\n dim=1\n )\n val_logits, _ = meta_model(\n val_input_ids, val_input_length, val_segment_ids\n )\n val_loss = self._compute_loss(val_logits, val_labels)\n val_loss = (\n val_loss\n / num_instances\n / args.num_aug\n / len(self.val_data_iterator)\n )\n val_loss.backward()\n\n # Update aug_model param phi.\n self.aug_wrapper.update_phi()\n\n # Train classifier with augmented batch\n (\n input_probs,\n input_masks,\n segment_ids,\n label_ids,\n ) = self.aug_wrapper.augment_batch(\n (input_ids, input_mask, segment_ids, labels)\n )\n\n input_length = ((input_masks == 1).int()).sum(dim=1)\n self.optim.zero_grad()\n logits, _ = self.classifier(input_probs, input_length, segment_ids)\n loss = self._compute_loss(logits, label_ids)\n loss.backward()\n self.optim.step()\n self.scheduler.step()\n self._display_logging(loss)\n\n @torch.no_grad()\n def eval_epoch(self):\n \"\"\"Evaluates on the dev set.\"\"\"\n self.iterator.switch_to_dataset(\"dev\")\n self.classifier.eval()\n\n nsamples = 0\n avg_rec = tx.utils.AverageRecorder()\n for batch in self.iterator:\n input_ids = batch[\"input_ids\"]\n segment_ids = batch[\"segment_ids\"]\n labels = batch[\"label_ids\"]\n input_length = (1 - (input_ids == 0).int()).sum(dim=1)\n\n logits, preds = self.classifier(\n input_ids, input_length, segment_ids\n )\n loss = self._compute_loss(logits, labels)\n accu = tx.evals.accuracy(labels, preds)\n\n batch_size = input_ids.size()[0]\n avg_rec.add([accu, loss], batch_size)\n nsamples += batch_size\n logging.info(\n \"eval accu: %.4f; loss: %.4f; nsamples: %d\",\n avg_rec.avg(0),\n avg_rec.avg(1),\n nsamples,\n )\n\n @torch.no_grad()\n def test_epoch(self, test_file):\n \"\"\"Does predictions on the test set.\"\"\"\n self.iterator.switch_to_dataset(\"test\")\n self.classifier.eval()\n\n _all_preds = []\n nsamples = 0\n avg_rec = tx.utils.AverageRecorder()\n for batch in self.iterator:\n input_ids = batch[\"input_ids\"]\n segment_ids = batch[\"segment_ids\"]\n labels = batch[\"label_ids\"]\n input_length = (1 - (input_ids == 0).int()).sum(dim=1)\n\n logits, preds = self.classifier(\n input_ids, input_length, segment_ids\n )\n loss = self._compute_loss(logits, labels)\n accu = tx.evals.accuracy(labels, preds)\n\n batch_size = input_ids.size()[0]\n avg_rec.add([accu, loss], batch_size)\n nsamples += batch_size\n\n _all_preds.extend(preds.tolist())\n\n logging.info(\n \"test accu: %.4f; loss: %.4f; nsamples: %d\",\n avg_rec.avg(0),\n avg_rec.avg(1),\n nsamples,\n )\n\n output_file = os.path.join(args.output_dir, test_file)\n with open(output_file, \"w+\") as writer:\n writer.write(\"\\n\".join(str(p) for p in _all_preds))\n logging.info(\"test output written to %s\", output_file)\n\n def _compute_loss(self, logits, labels):\n r\"\"\"Compute loss.\"\"\"\n if self.classifier.is_binary:\n loss = F.binary_cross_entropy(\n logits.view(-1), labels.view(-1), reduction=\"mean\"\n )\n else:\n loss = F.cross_entropy(\n logits.view(-1, self.classifier.num_classes),\n labels.view(-1),\n reduction=\"mean\",\n )\n return loss\n\n def _display_logging(self, loss):\n step = self.scheduler.last_epoch\n dis_steps = config_data.display_steps\n if dis_steps > 0 and step % dis_steps == 0:\n logging.info(\"step: %d; loss: %f\", step, loss)\n\n eval_steps = config_data.eval_steps\n if eval_steps > 0 and step % eval_steps == 0:\n self._eval_epoch()\n self.classifier.train()\n\n\ndef main():\n trainer = RLAugmentClassifierTrainer()\n trainer.pre_train_classifier_epoch()\n if args.do_train:\n for k in range(config_data.max_train_epoch):\n logging.info(\"training epoch %d\", k)\n trainer.train_epoch()\n if args.do_eval:\n trainer.eval_epoch()\n if args.do_test:\n trainer.test_epoch(\"test_results.tsv\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright 2019 The Forte Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nDefines the basic data structures and interfaces for the Forte data\nrepresentation system.\n\"\"\"\nimport uuid\nimport warnings\nfrom abc import abstractmethod, ABC\nfrom collections.abc import MutableSequence, MutableMapping\nfrom dataclasses import dataclass\nfrom typing import (\n Iterable,\n Optional,\n Type,\n Hashable,\n TypeVar,\n Generic,\n Union,\n Dict,\n Iterator,\n get_type_hints,\n overload,\n List,\n)\n\nimport numpy as np\n\nfrom forte.common import PackDataException\nfrom forte.data.container import ContainerType, BasePointer\n\n__all__ = [\n \"Entry\",\n \"BaseLink\",\n \"BaseGroup\",\n \"LinkType\",\n \"GroupType\",\n \"EntryType\",\n \"Pointer\",\n \"MpPointer\",\n \"FDict\",\n \"FList\",\n \"MultiEntry\",\n]\n\nfrom forte.utils.utils import check_type\n\ndefault_entry_fields = [\n \"_Entry__pack\",\n \"_tid\",\n \"_embedding\",\n \"_span\",\n \"_parent\",\n \"_child\",\n \"_members\",\n \"_Entry__field_modified\",\n \"field_records\",\n \"creation_records\",\n \"_id_manager\",\n]\n\n\n@dataclass\nclass Entry(Generic[ContainerType]):\n r\"\"\"The base class inherited by all NLP entries. This is the main data type\n for all in-text NLP analysis results. The main sub-types are\n ``Annotation``, ``Link`` and ``Group``.\n\n An :class:`forte.data.ontology.top.Annotation` object represents a\n span in text.\n\n A :class:`forte.data.ontology.top.Link` object represents a binary\n link relation between two entries.\n\n A :class:`forte.data.ontology.top.Group` object represents a\n collection of multiple entries.\n\n Attributes:\n self.embedding: The embedding vectors (numpy array of floats) of this\n entry.\n\n Args:\n pack: Each entry should be associated with one pack upon creation.\n \"\"\"\n\n def __init__(self, pack: ContainerType):\n # The Entry should have a reference to the data pack, and the data pack\n # need to store the entries. In order to resolve the cyclic references,\n # we create a generic class EntryContainer to be the place holder of\n # the actual. Whether this entry can be added to the pack is delegated\n # to be checked by the pack.\n super().__init__()\n self.__pack: ContainerType = pack\n self._tid: int = uuid.uuid4().int\n self._embedding: np.ndarray = np.empty(0)\n self.pack._validate(self)\n self.pack.on_entry_creation(self)\n\n def regret_creation(self):\n self.__pack.regret_creation(self)\n\n def __getstate__(self):\n r\"\"\"In serialization, the pack is not serialize, and it will be set\n by the container.\n\n This also implies that it is not advised to serialize an entry on its\n own, without the ``Container`` as the context, there is little semantics\n remained in an entry.\n \"\"\"\n state = self.__dict__.copy()\n # During serialization, convert the numpy array as a list.\n emb = list(self._embedding.tolist())\n if len(emb) == 0:\n state.pop(\"_embedding\")\n else:\n state[\"_embedding\"] = emb\n state.pop(\"_Entry__pack\")\n return state\n\n def __setstate__(self, state):\n # Recover the internal __field_modified dict for the entry.\n # NOTE: the __pack will be set via set_pack from the Pack side.\n # self.__dict__['_Entry__field_modified'] = set()\n\n # During de-serialization, convert the list back to numpy array.\n if \"_embedding\" in state:\n state[\"_embedding\"] = np.array(state[\"_embedding\"])\n else:\n state[\"_embedding\"] = np.empty(0)\n self.__dict__.update(state)\n\n # using property decorator\n # a getter function for self._embedding\n @property\n def embedding(self):\n r\"\"\"Get the embedding vectors (numpy array of floats) of the entry.\"\"\"\n return self._embedding\n\n # a setter function for self._embedding\n @embedding.setter\n def embedding(self, embed):\n r\"\"\"Set the embedding vectors of the entry.\n\n Args:\n embed: The embedding vectors which can be numpy array of floats or\n list of floats.\n \"\"\"\n self._embedding = np.array(embed)\n\n @property\n def tid(self) -> int:\n \"\"\"\n Get the id of this entry.\n\n Returns:\n\n \"\"\"\n return self._tid\n\n @property\n def pack(self) -> ContainerType:\n return self.__pack\n\n @property\n def pack_id(self) -> int:\n \"\"\"\n Get the id of the pack that contains this entry.\n\n Returns:\n\n \"\"\"\n return self.__pack.pack_id # type: ignore\n\n def set_pack(self, pack: ContainerType):\n self.__pack = pack\n\n def as_pointer(self, from_entry: \"Entry\"):\n \"\"\"\n Return this entry as a pointer of this entry relative to the\n ``from_entry``.\n\n Args:\n from_entry: The entry to point from.\n\n Returns:\n A pointer to the this entry from the ``from_entry``.\n \"\"\"\n if isinstance(from_entry, MultiEntry):\n return MpPointer(\n from_entry.pack.get_pack_index(self.pack_id), self.tid\n )\n elif isinstance(from_entry, Entry):\n return Pointer(self.tid)\n\n def resolve_pointer(self, ptr: BasePointer):\n \"\"\"\n Resolve into an entry on the provided pointer ``ptr`` from this entry.\n\n Args:\n ptr:\n\n Returns:\n\n \"\"\"\n if isinstance(ptr, Pointer):\n return self.pack.get_entry(ptr.tid)\n else:\n raise TypeError(\n f\"Unsupported pointer type {ptr.__class__} for entry\"\n )\n\n def entry_type(self) -> str:\n \"\"\"Return the full name of this entry type.\"\"\"\n module = self.__class__.__module__\n if module is None or module == str.__class__.__module__:\n return self.__class__.__name__\n else:\n return module + \".\" + self.__class__.__name__\n\n def _check_attr_type(self, key, value):\n \"\"\"\n Use the type hint to validate whether the provided value is as expected.\n\n Args:\n key: The field name.\n value: The field value.\n\n Returns:\n\n \"\"\"\n if key not in default_entry_fields:\n hints = get_type_hints(self.__class__)\n if key not in hints.keys():\n warnings.warn(\n f\"Base on attributes in entry definition, \"\n f\"the [{key}] attribute_name does not exist in the \"\n f\"[{type(self).__name__}] that you specified to add to.\"\n )\n is_valid = check_type(value, hints[key])\n if not is_valid:\n warnings.warn(\n f\"Based on type annotation, \"\n f\"the [{key}] attribute of [{type(self).__name__}] \"\n f\"should be [{hints[key]}], but got [{type(value)}].\"\n )\n\n def __setattr__(self, key, value):\n self._check_attr_type(key, value)\n\n if isinstance(value, Entry):\n if value.pack == self.pack:\n # Save a pointer to the value from this entry.\n self.__dict__[key] = Pointer(value.tid)\n else:\n raise PackDataException(\n \"An entry cannot refer to entries in another data pack.\"\n )\n else:\n super().__setattr__(key, value)\n\n # We add the record to the system.\n if key not in default_entry_fields:\n self.__pack.record_field(self.tid, key)\n\n def __getattribute__(self, item):\n try:\n v = super().__getattribute__(item)\n except AttributeError:\n # For all unknown attributes, return None.\n return None\n\n if isinstance(v, BasePointer):\n # Using the pointer to get the entry.\n return self.resolve_pointer(v)\n else:\n return v\n\n def __eq__(self, other):\n r\"\"\"The eq function for :class:`Entry` objects.\n To be implemented in each subclass.\n \"\"\"\n if other is None:\n return False\n\n return (type(self), self._tid) == (type(other), other.tid)\n\n def __lt__(self, other):\n r\"\"\"By default, compared based on type string.\"\"\"\n return (str(type(self))) < (str(type(other)))\n\n def __hash__(self) -> int:\n r\"\"\"The hash function for :class:`Entry` objects.\n To be implemented in each subclass.\n \"\"\"\n return hash((type(self), self._tid))\n\n @property\n def index_key(self) -> Hashable:\n # Think about how to use the index key carefully.\n return self._tid\n\n\nclass MultiEntry(Entry, ABC):\n def __setattr__(self, key, value):\n \"\"\"\n Handle the special sub-entry case in the multi pack case.\n\n Args:\n key:\n value:\n\n Returns:\n\n \"\"\"\n self._check_attr_type(key, value)\n\n if isinstance(value, Entry):\n # Save a pointer of the value.\n self.__dict__[key] = value.as_pointer(self)\n else:\n super().__setattr__(key, value)\n\n def as_pointer(self, from_entry: \"Entry\") -> \"Pointer\":\n \"\"\"\n Get a pointer of the entry relative to this entry\n\n Args:\n from_entry: The entry relative from.\n\n Returns:\n A pointer relative to the this entry.\n \"\"\"\n if isinstance(from_entry, MultiEntry):\n return Pointer(self.tid)\n elif isinstance(from_entry, Entry):\n raise ValueError(\n \"Do not support reference a multi pack entry from an entry.\"\n )\n\n def resolve_pointer(self, ptr: BasePointer) -> Entry:\n if isinstance(ptr, Pointer):\n return self.pack.get_entry(ptr.tid)\n elif isinstance(ptr, MpPointer):\n return self.pack.packs[ptr.pack_index].get_entry(ptr.tid)\n else:\n raise TypeError(f\"Unknown pointer type {ptr.__class__}\")\n\n\nEntryType = TypeVar(\"EntryType\", bound=Entry)\n\nParentEntryType = TypeVar(\"ParentEntryType\", bound=Entry)\n\n\n# TODO: Cannot pickle with FList[CorefQuestionAnswers], have generic problems.\nclass FList(Generic[ParentEntryType], MutableSequence):\n \"\"\"\n FList allows the elements to be Forte entries. FList will internally\n stores the entry as their tid to avoid nesting.\n \"\"\"\n\n def __init__(\n self,\n parent_entry: ParentEntryType,\n data: Optional[Iterable[EntryType]] = None,\n ):\n super().__init__()\n self.__parent_entry = parent_entry\n self.__data: List[BasePointer] = []\n if data is not None:\n self.__data = [d.as_pointer(self.__parent_entry) for d in data]\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state.pop(\"_FList__parent_entry\")\n return state\n\n def insert(self, index: int, entry: EntryType):\n self.__data.insert(index, entry.as_pointer(self.__parent_entry))\n\n @overload\n @abstractmethod\n def __getitem__(self, i: int) -> EntryType:\n ...\n\n @overload\n @abstractmethod\n def __getitem__(self, s: slice) -> MutableSequence:\n ...\n\n def __getitem__(\n self, index: Union[int, slice]\n ) -> Union[EntryType, MutableSequence]:\n if isinstance(index, slice):\n return [\n self.__parent_entry.resolve_pointer(d)\n for d in self.__data[index]\n ]\n else:\n return self.__parent_entry.resolve_pointer(self.__data[index])\n\n def __setitem__(\n self,\n index: Union[int, slice],\n value: Union[EntryType, Iterable[EntryType]],\n ) -> None:\n # pylint: disable=isinstance-second-argument-not-valid-type\n # TODO: Disable until fix: https://github.com/PyCQA/pylint/issues/3507\n if isinstance(index, int):\n # Assert for mypy: https://github.com/python/mypy/issues/7858\n assert isinstance(value, Entry)\n self.__data[index] = value.as_pointer(self.__parent_entry)\n else:\n assert isinstance(value, Iterable)\n self.__data[index] = [\n v.as_pointer(self.__parent_entry) for v in value\n ]\n\n def __delitem__(self, index: Union[int, slice]) -> None:\n del self.__data[index]\n\n def __len__(self) -> int:\n return len(self.__data)\n\n\nKeyType = TypeVar(\"KeyType\", bound=Hashable)\nValueType = TypeVar(\"ValueType\", bound=Entry)\n\n\nclass FDict(Generic[KeyType, ValueType], MutableMapping):\n \"\"\"\n FDict allows the values to be Forte entries. FDict will internally\n stores the entry as their tid to avoid nesting. Note that key is not\n supported to be entries now.\n \"\"\"\n\n def __init__(\n self,\n parent_entry: ParentEntryType,\n data: Optional[Dict[KeyType, ValueType]] = None,\n ):\n super().__init__()\n\n self.__parent_entry = parent_entry\n self.__data: Dict[KeyType, BasePointer] = {}\n\n if data is not None:\n self.__data = {\n k: v.as_pointer(self.__parent_entry) for k, v in data.items()\n }\n\n def __setitem__(self, k: KeyType, v: ValueType) -> None:\n try:\n self.__data[k] = v.as_pointer(self.__parent_entry)\n except AttributeError as e:\n raise AttributeError(\n f\"Item of the FDict must be of type entry, \"\n f\"got {v.__class__}\"\n ) from e\n\n def __delitem__(self, k: KeyType) -> None:\n del self.__data[k]\n\n def __getitem__(self, k: KeyType) -> ValueType:\n return self.__parent_entry.resolve_pointer(self.__data[k])\n\n def __len__(self) -> int:\n return len(self.__data)\n\n def __iter__(self) -> Iterator[KeyType]:\n yield from self.__data\n\n\nclass Pointer(BasePointer):\n \"\"\"\n A pointer that points to an entry in the current pack, this is basically\n containing the entry's tid.\n \"\"\"\n\n def __init__(self, tid: int):\n self._tid: int = tid\n\n @property\n def tid(self):\n return self._tid\n\n def __str__(self):\n return str(self.tid)\n\n\nclass MpPointer(BasePointer):\n \"\"\"\n Multi pack Pointer. A pointer that refers to an entry of one of the pack in\n the multi pack. This contains the pack's index and the entries' tid.\n \"\"\"\n\n def __init__(self, pack_index: int, tid: int):\n self._pack_index: int = pack_index\n self._tid: int = tid\n\n @property\n def pack_index(self):\n return self._pack_index\n\n @property\n def tid(self):\n return self._tid\n\n def __str__(self):\n return str((self.pack_index, self.tid))\n\n\nclass BaseLink(Entry, ABC):\n def __init__(\n self,\n pack: ContainerType,\n parent: Optional[Entry] = None,\n child: Optional[Entry] = None,\n ):\n super().__init__(pack)\n\n if parent is not None:\n self.set_parent(parent)\n if child is not None:\n self.set_child(child)\n\n @abstractmethod\n def set_parent(self, parent: Entry):\n r\"\"\"This will set the `parent` of the current instance with given Entry\n The parent is saved internally by its pack specific index key.\n\n Args:\n parent: The parent entry.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def set_child(self, child: Entry):\n r\"\"\"This will set the `child` of the current instance with given Entry\n The child is saved internally by its pack specific index key.\n\n Args:\n child: The child entry\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_parent(self) -> Entry:\n r\"\"\"Get the parent entry of the link.\n\n Returns:\n An instance of :class:`Entry` that is the child of the link\n from the given :class:`DataPack`.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_child(self) -> Entry:\n r\"\"\"Get the child entry of the link.\n\n Returns:\n An instance of :class:`Entry` that is the child of the link\n from the given :class:`DataPack`.\n \"\"\"\n raise NotImplementedError\n\n def __eq__(self, other):\n if other is None:\n return False\n return (type(self), self.get_parent(), self.get_child()) == (\n type(other),\n other.get_parent(),\n other.get_child(),\n )\n\n def __hash__(self):\n return hash((type(self), self.get_parent(), self.get_child()))\n\n @property\n def index_key(self) -> int:\n return self.tid\n\n\nclass BaseGroup(Entry, Generic[EntryType]):\n r\"\"\"Group is an entry that represent a group of other entries. For example,\n a \"coreference group\" is a group of coreferential entities. Each group will\n store a set of members, no duplications allowed.\n\n This is the :class:`BaseGroup` interface. Specific member constraints are\n defined in the inherited classes.\n \"\"\"\n MemberType: Type[EntryType]\n\n def __init__(\n self, pack: ContainerType, members: Optional[Iterable[EntryType]] = None\n ):\n super().__init__(pack)\n if members is not None:\n self.add_members(members)\n\n @abstractmethod\n def add_member(self, member: EntryType):\n r\"\"\"Add one entry to the group.\n\n Args:\n member: One member to be added to the group.\n \"\"\"\n raise NotImplementedError\n\n def add_members(self, members: Iterable[EntryType]):\n r\"\"\"Add members to the group.\n\n Args:\n members: An iterator of members to be added to the group.\n \"\"\"\n for member in members:\n self.add_member(member)\n\n def __hash__(self):\n r\"\"\"The hash function of :class:`Group`.\n\n Users can define their own hash function by themselves but this must\n be consistent to :meth:`eq`.\n \"\"\"\n return hash((type(self), tuple(self.get_members())))\n\n def __eq__(self, other):\n r\"\"\"The eq function of :class:`Group`. By default, :class:`Group`\n objects are regarded as the same if they have the same type, members,\n and are generated by the same component.\n\n Users can define their own eq function by themselves but this must\n be consistent to :meth:`hash`.\n \"\"\"\n if other is None:\n return False\n return (type(self), self.get_members()) == (\n type(other),\n other.get_members(),\n )\n\n @abstractmethod\n def get_members(self) -> List[EntryType]:\n r\"\"\"Get the member entries in the group.\n\n Returns:\n Instances of :class:`Entry` that are the members of the\n group.\n \"\"\"\n raise NotImplementedError\n\n @property\n def index_key(self) -> int:\n return self.tid\n\n\nGroupType = TypeVar(\"GroupType\", bound=BaseGroup)\nLinkType = TypeVar(\"LinkType\", bound=BaseLink)\n"
] | [
[
"torch.no_grad",
"torch.cuda.is_available"
],
[
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChristianFeldmann/PresentationMaterial | [
"a5182a5d50ed944fa7738f3919267ea056b72e63"
] | [
"VideoCodingBasics/Figures/Tnsformation/quantization.py"
] | [
"import numpy\n\ndata = numpy.array(\n [[968., 205.69, 120.6, -38.29, -81., -28.4, 33.12, 41.77],\n [89.13, 224.38, 132.1, -56.46, -102.6, -36.72, 39.05, 47.77],\n [-4.85, 58., 47.38, -62.13, -67.61, -21.83, 22.13, 23.34],\n [-111.6, -23.74, -2.54, -36.53, -19.46, -1.74, 0.23, -3.56],\n [-77., -18.35, -12.42, -7.35, 17., 9.14, -9.74, -13.01],\n [-19.02, 18.74, -9.08, 6.43, 19.95, 4.73, -8.09, -10.21],\n [20.95, 27.95, -11.87, -8.28, -1.22, -4.14, -3.88, -4.11],\n [16.52, 13.44, -14.14, -14.81, -12.66, -6.63, -0.68, 1.91]])\n\ndataQuant = numpy.round(data / 10).astype(int)\nprint(dataQuant)\n\ndataQuant2 = numpy.round(data / 20).astype(int)\nprint(dataQuant2)\n\nreconstructions2 = dataQuant2 * 20\nprint(reconstructions2)\n"
] | [
[
"numpy.round",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cics-nd/rans-uncertainty | [
"1ee554d64550377dfa4295bb05e61bab98e43ee4"
] | [
"training-data/periodic-hills/preProcess.py"
] | [
"\"\"\"\nA simple pre-processing file for converting raw OpenFOAM data to \nPyTorch tensors. This makes reading the data by the neural network\nsignifcantly faster. Additionally, depending on the flow, spacial\naverages can be taken to increase smoothness of R-S fields.\n===\nDistributed by: Notre Dame CICS (MIT Liscense)\n- Associated publication:\nurl: https://www.sciencedirect.com/science/article/pii/S0021999119300464\ndoi: https://doi.org/10.1016/j.jcp.2019.01.021\ngithub: https://github.com/cics-nd/rans-uncertainty\n===\n\"\"\"\nimport sys, random, re, os\nimport numpy as np\nimport torch as th\nimport scipy as sc\n\ndef readFieldData(fileName):\n \"\"\"\n Reads in openFoam field (vector, or tensor)\n Args:\n fileName(string): File name\n Returns:\n data (FloatTensor): tensor of data read from file\n \"\"\"\n #Attempt to read text file and extact data into a list\n try:\n print('Attempting to read file: '+str(fileName))\n rgx = re.compile('[%s]' % '(){}<>')\n rgx2 = re.compile('\\((.*?)\\)') #regex to get stuff in parenthesis\n file_object = open(str(fileName), \"r\").read().splitlines()\n \n #Find line where the internal field starts\n print('Parsing file...')\n fStart = [file_object.index(i) for i in file_object if 'internalField' in i][-1] + 1\n fEnd = [file_object.index(i) for i in file_object[fStart:] if ';' in i][0]\n \n data_list = [[float(rgx.sub('',elem)) for elem in vector.split()] for vector in file_object[fStart+1:fEnd] if not rgx2.search(vector) is None]\n #For scalar fields\n if(len(data_list) == 0):\n data_list = [float(rgx.sub('',elem)) for elem in file_object[fStart+1:fEnd] if not len(rgx.sub('',elem)) is 0]\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n return\n except IOError as err:\n print(\"File read error: {0}\".format(err))\n return\n except:\n print(\"Unexpected error:{0}\".format(sys.exc_info()[0]))\n return\n\n print('Data field file successfully read.')\n data = th.DoubleTensor(data_list)\n return data\n\ndef readScalarData(timeStep, fileName, dir = ''):\n return readFieldData(str(dir)+'/'+str(timeStep)+'/'+fileName)\n\ndef readVectorData(timeStep, fileName, dir = ''):\n return readFieldData(str(dir)+'/'+str(timeStep)+'/'+fileName)\n\ndef readTensorData(timeStep, fileName, dir = ''):\n data0 = readFieldData(str(dir)+'/'+str(timeStep)+'/'+fileName)\n #Reshape into [nCells,3,3] Tensor\n return data0.view(data0.size()[0],3,-1)\n\ndef readSymTensorData(timeStep, fileName, dir = ''):\n data0 = readFieldData(str(dir)+'/'+str(timeStep)+'/'+fileName)\n # Reshape into [nCells,3,3] Tensor\n # Following symmTensor.H indexes since this is RAW openFOAM output\n data = th.DoubleTensor(data0.size()[0], 3, 3)\n data[:,0,:] = data0[:,0:3] #First Row is consistent\n data[:,1,0] = data0[:,1] #YX = XY\n data[:,1,1] = data0[:,3] #YY\n data[:,1,2] = data0[:,4] #YZ\n data[:,2,0] = data0[:,2] #ZX = XZ\n data[:,2,1] = data0[:,4] #ZY = YZ\n data[:,2,2] = data0[:,5]\n\n return data.view(-1,9)\n\ndef readCellCenters(timeStep, dir=''):\n \"\"\"\n Reads in openFoam cellCenters field which contains a list of\n coordinates associated with each finite volume cell center.\n Generated using the following utility:\n https://bitbucket.org/peterjvonk/cellcenters\n Args:\n timeStep (float): Time value to read in at\n fileName(string): File name\n Returns:\n data (FloatTensor): array of data read from file\n \"\"\"\n #Attempt to read text file and extact data into a list\n try:\n file_path = dir+\"/\"+str(timeStep)+\"/cellCenters\"\n print('Reading mesh cell centers '+file_path)\n\n rgx = re.compile('\\((.*?)\\)') #regex to get stuff in parenthesis\n file_object = open(file_path, \"r\").read().splitlines()\n #Find line where the internal field starts\n commentLines = [file_object.index(line) for line in file_object if \"//*****\" in line.replace(\" \", \"\")]\n fStart = [file_object.index(i) for i in file_object if 'internalField' in i][-1] + 1\n fEnd = [file_object.index(i) for i in file_object[fStart:] if ';' in i][0]\n \n cell_list0 = [rgx.search(center).group(1) for center in file_object[fStart+1:fEnd] if not rgx.search(center) is None]\n cell_list = [[float(elem) for elem in c0.split()] for c0 in cell_list0]\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n return\n except IOError as err:\n print(\"File read error: {0}\".format(err))\n return\n except:\n print(\"Unexpected error:{0}\".format(sys.exc_info()[0]))\n return\n\n return th.FloatTensor(cell_list)\n\ndef saveTensor(tensor, fieldName, timeStep, dir=''):\n \"\"\"\n Save PyTorch field tensor\n \"\"\"\n print('Saving tensor field: {}-torch.th'.format(fieldName))\n th.save(tensor, '{}/{}/{}-torch.th'.format(dir, timeStep, fieldName))\n\ndef fieldAverage(field, index_list):\n f0 = []\n for z_i in index_list:\n f0.append(th.sum(field[z_i],0)/len(z_i))\n\n return th.stack(f0)\n\nif __name__ == '__main__':\n\n # LES\n les_dir = 'LES' # Directory\n les_time = 1000 # Time step\n\n # RANS\n rans_dir = 'RANS'\n rans_time = 90\n\n # Cell Centers\n cell_dir = 'RANS'\n cell_time = 90\n\n # First read cell centers\n # Cell centers field is generated using the following utility:\n # https://bitbucket.org/peterjvonk/cellcenters\n cell_centers = readCellCenters(cell_time, cell_dir)\n\n # Get unique x & y coords\n cell_n = cell_centers.numpy()\n cell_coord = np.array([cell_n[:,0], cell_n[:,1]])\n cell_xy = np.unique(cell_n[:,0:2], axis=0)\n saveTensor(cell_xy, 'cellCenters', rans_time, rans_dir)\n \n # Now get averaging indexes (where x & y are the same)\n avg_index = []\n for i in range(cell_xy.shape[0]):\n if(i%100 == 0):\n print('Finding average indexes {}/{}'.format(i, len(cell_xy)))\n avg_index.append(np.where(np.all(cell_n[:,0:2] == cell_xy[i], axis=1))[0])\n\n # Read in fields\n k = readScalarData(rans_time, 'k', dir=rans_dir)\n s = readTensorData(rans_time, 'S', dir=rans_dir)\n r = readTensorData(rans_time, 'R', dir=rans_dir)\n\n les_UPrime = readSymTensorData(les_time, 'UPrime2Mean', dir=les_dir)\n \n # Now average fields in Z direction\n k0 = fieldAverage(k, avg_index)\n r0 = fieldAverage(r, avg_index)\n s0 = fieldAverage(s, avg_index)\n\n les_UPrime0 = fieldAverage(les_UPrime, avg_index)\n \n #Save averaged fields\n saveTensor(k0, 'k', rans_time, rans_dir)\n saveTensor(s0, 'S', rans_time, rans_dir)\n saveTensor(r0, 'R', rans_time, rans_dir)\n\n saveTensor(les_UPrime0, 'UPrime2Mean', les_time, les_dir)\n"
] | [
[
"numpy.unique",
"torch.sum",
"numpy.all",
"torch.DoubleTensor",
"torch.FloatTensor",
"torch.stack",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pyri-project/pyri-robotics | [
"c957b00bfef664519f49140d9dd65736cdc8b053"
] | [
"src/pyri/robotics/util/invkin.py"
] | [
"import numpy as np\nimport general_robotics_toolbox as rox\nfrom scipy.optimize import lsq_linear\n\ndef update_ik_info3(robot_rox, T_desired, q_current): # inverse kinematics that uses Least Square solver\n \n # R_d, p_d: Desired orientation and position\n R_d = T_desired.R\n p_d = T_desired.p\n d_q = q_current\n\n num_joints = len(robot_rox.joint_type)\n \n q_cur = d_q # initial guess on the current joint angles\n q_cur = q_cur.reshape((num_joints,1)) \n \n max_steps = 200 # number of steps to for convergence\n \n # print_div( \"<br> q_cur \" + str(q_cur) ) # DEBUG\n\n hist_b = []\n \n itr = 0 # Iterations\n converged = False\n while itr < max_steps and not converged:\n \n pose = rox.fwdkin(robot_rox,q_cur.flatten())\n R_cur = pose.R\n p_cur = pose.p\n \n #calculate current Jacobian\n J0T = rox.robotjacobian(robot_rox,q_cur.flatten())\n \n # Transform Jacobian to End effector frame from the base frame\n Tr = np.zeros((6,6))\n Tr[:3,:3] = R_cur.T \n Tr[3:,3:] = R_cur.T\n J0T = Tr @ J0T\n \n # Jp=J0T[3:,:]\n # JR=J0T[:3,:] #decompose to position and orientation Jacobian\n \n # Error in position and orientation\n # ER = np.matmul(R_cur, np.transpose(R_d))\n ER = np.matmul(np.transpose(R_d),R_cur)\n #print_div( \"<br> ER \" + str(ER) ) # DEBUG\n\n # EP = p_cur - p_d \n EP = R_cur.T @ (p_cur - p_d) \n #print_div( \"<br> EP \" + str(EP) ) # DEBUG\n\n #decompose ER to (k,theta) pair\n k, theta = rox.R2rot(ER) \n # print_div( \"<br> k \" + str(k) ) # DEBUG\n # print_div( \"<br> theta \" + str(theta) ) # DEBUG\n \n ## set up s for different norm for ER\n # s=2*np.dot(k,np.sin(theta)) #eR1\n # s = np.dot(k,np.sin(theta/2)) #eR2\n s = np.sin(theta/2) * np.array(k) #eR2\n # s=2*theta*k #eR3\n # s=np.dot(J_phi,phi) #eR4\n # print_div( \"<br> s \" + str(s) ) # DEBUG \n\n Kp = np.eye(3)\n KR = np.eye(3) #gains for position and orientation error\n \n vd = - Kp @ EP\n wd = - KR @ s\n \n b = np.concatenate([wd,vd])\n np.nan_to_num(b, copy=False, nan=0.0, posinf=None, neginf=None)\n # print(b)\n # print(J0T)\n \n # DEBUG --------------\n hist_b.append(b)\n if itr > 0:\n error_cur = np.linalg.norm(hist_b[itr-1]) - np.linalg.norm(hist_b[itr])\n #print(\"Error= \" + str(error_cur))\n # DEBUG --------------\n\n res = lsq_linear(J0T,b)\n\n if res.success: \n qdot_star = res.x \n else:\n print(\"Any solution could not found\")\n qdot_star = np.finfo(float).eps * np.ones(num_joints)\n\n # find best step size to take\n # alpha=fminbound(min_alpha,0,1,args=(q_cur,qdot_star,Sawyer_def,Rd,pd,w,Kp))\n alpha = 0.3 # Step size # 1.0 \n delta = alpha * qdot_star \n # print_div( \"<br> delta \" + str(delta) ) # DEBUG\n \n # Convergence Check\n converged = (np.abs(np.hstack((s,EP))) < 0.0001).all()\n\n if not converged:\n # Update for next iteration\n q_cur = q_cur + delta.reshape((num_joints,1))\n\n # Normalize angles betweeen -pi to pi\n q_cur = normalizeAngles(q_cur)\n \n # print_div( \"<br> converged? \" + str(converged) ) # DEBUG\n # print( \"converged? \" + str(converged) ) # DEBUG\n \n itr += 1 # Increase the iteration\n #print(itr)\n #print(converged)\n # print(delta)\n # print(q_cur)\n \n # joints_text=\"\"\n # for i in q_cur:\n # joints_text+= \"(%.3f, %.3f) \" % (np.rad2deg(i), i) \n # print_div_ik_info(str(rox.Transform(R_d,p_d)) +\"<br>\"+ joints_text +\"<br>\"+ str(converged) + \", itr = \" + str(itr))\n return np.squeeze(q_cur), converged\n\ndef normalizeAngle(angle):\n \"\"\"\n :param angle: (float)\n :return: (float) the angle in [-pi, pi]\n \"\"\"\n # while angle > np.pi:\n # angle -= 2 * np.pi\n # while angle < -np.pi:\n # angle += 2 * np.pi\n # return angle \n return angle\n\ndef normalizeAngles(angles):\n for idx, angle in np.ndenumerate(angles):\n angles[idx] = normalizeAngle(angle)\n\n return angles"
] | [
[
"numpy.hstack",
"numpy.eye",
"numpy.squeeze",
"numpy.nan_to_num",
"numpy.linalg.norm",
"numpy.sin",
"numpy.concatenate",
"numpy.ones",
"scipy.optimize.lsq_linear",
"numpy.finfo",
"numpy.transpose",
"numpy.ndenumerate",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
BogdanMarghescu/Deep-Learning-Coursera | [
"af2c71c024f0ea911f89ed476686bd09ce37e87c",
"af2c71c024f0ea911f89ed476686bd09ce37e87c"
] | [
"Sequence Models/Emojify/emo_utils.py",
"Sequence Models/Trigger Word Detection/td_utils.py"
] | [
"import csv\nimport emoji\nimport numpy as np\nemoji_dictionary = {\"0\": \"\\u2764\\uFE0F\", \"1\": \":baseball:\", \"2\": \":smile:\", \"3\": \":disappointed:\", \"4\": \":fork_and_knife:\"}\n\n\ndef read_glove_vecs(glove_file):\n with open(glove_file, encoding=\"utf8\") as f:\n words = set()\n word_to_vec_map = {}\n for line in f:\n line = line.strip().split()\n curr_word = line[0]\n words.add(curr_word)\n word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)\n i = 1\n words_to_index = {}\n index_to_words = {}\n for w in sorted(words):\n words_to_index[w] = i\n index_to_words[i] = w\n i = i + 1\n return words_to_index, index_to_words, word_to_vec_map\n\n\ndef softmax(x):\n # Compute softmax values for each sets of scores in x\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()\n\n\ndef read_csv(filename='data/emojify_data.csv'):\n phrase = []\n emoji = []\n with open(filename) as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n phrase.append(row[0])\n emoji.append(row[1])\n X = np.asarray(phrase)\n Y = np.asarray(emoji, dtype=int)\n return X, Y\n\n\ndef convert_to_one_hot(Y, C):\n Y = np.eye(C)[Y.reshape(-1)]\n return Y\n\n\ndef label_to_emoji(label):\n # Converts a label (int or string) into the corresponding emoji code (string) ready to be printed\n return emoji.emojize(emoji_dictionary[str(label)], use_aliases=True)\n\n\ndef print_predictions(X, pred):\n for i in range(X.shape[0]):\n print(X[i], label_to_emoji(int(pred[i])))\n\n\ndef predict(X, Y, W, b, word_to_vec_map):\n \"\"\"\n Given X (sentences) and Y (emoji indices), predict emojis and compute the accuracy of your model over the given set.\n \n Arguments:\n X -- input data containing sentences, numpy array of shape (m, None)\n Y -- labels, containing index of the label emoji, numpy array of shape (m, 1)\n \n Returns:\n pred -- numpy array of shape (m, 1) with your predictions\n \"\"\"\n m = X.shape[0]\n pred = np.zeros((m, 1))\n for j in range(m): # Loop over training examples\n # Split jth test example (sentence) into list of lower case words\n words = X[j].lower().split()\n # Average words' vectors\n avg = np.zeros((50,))\n for w in words:\n avg += word_to_vec_map[w]\n avg /= len(words)\n # Forward propagation\n Z = W @ avg + b\n A = softmax(Z)\n pred[j] = np.argmax(A)\n print(\"Accuracy: \" + str(np.mean((pred[:] == Y.reshape(Y.shape[0], 1)[:]))))\n return pred\n",
"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nfrom pydub import AudioSegment\n\n\ndef graph_spectrogram(wav_file):\n # Calculate and plot spectrogram for a wav audio file\n rate, data = get_wav_info(wav_file)\n nfft = 200 # Length of each window segment\n fs = 8000 # Sampling frequencies\n noverlap = 120 # Overlap between windows\n nchannels = data.ndim\n if nchannels == 1:\n pxx, freqs, bins, im = plt.specgram(data, nfft, fs, noverlap=noverlap)\n elif nchannels == 2:\n pxx, freqs, bins, im = plt.specgram(data[:, 0], nfft, fs, noverlap=noverlap)\n return pxx\n\n\ndef get_wav_info(wav_file):\n # Load a wav file\n rate, data = wavfile.read(wav_file)\n return rate, data\n\n\ndef match_target_amplitude(sound, target_dBFS):\n # Used to standardize volume of audio clip\n change_in_dBFS = target_dBFS - sound.dBFS\n return sound.apply_gain(change_in_dBFS)\n\n\ndef load_raw_audio():\n # Load raw audio files for speech synthesis\n activates, backgrounds, negatives = [], [], []\n for filename in os.listdir(\"./raw_data/activates\"):\n if filename.endswith(\"wav\"):\n activate = AudioSegment.from_wav(\"./raw_data/activates/\" + filename)\n activates.append(activate)\n for filename in os.listdir(\"./raw_data/backgrounds\"):\n if filename.endswith(\"wav\"):\n background = AudioSegment.from_wav(\"./raw_data/backgrounds/\" + filename)\n backgrounds.append(background)\n for filename in os.listdir(\"./raw_data/negatives\"):\n if filename.endswith(\"wav\"):\n negative = AudioSegment.from_wav(\"./raw_data/negatives/\" + filename)\n negatives.append(negative)\n return activates, negatives, backgrounds\n\n\ndef preprocess_audio(filename):\n # Preprocess the audio to the correct format\n # Trim or pad audio segment to 10000ms\n padding = AudioSegment.silent(duration=10000)\n segment = AudioSegment.from_wav(filename)[:10000]\n segment = padding.overlay(segment)\n # Set frame rate to 44100\n segment = segment.set_frame_rate(44100)\n # Export as wav\n segment.export(filename, format='wav')\n\n\ndef get_random_time_segment(segment_ms):\n \"\"\"\n Gets a random time segment of duration segment_ms in a 10,000 ms audio clip.\n\n Arguments:\n segment_ms -- the duration of the audio clip in ms (\"ms\" stands for \"milliseconds\")\n\n Returns:\n segment_time -- a tuple of (segment_start, segment_end) in ms\n \"\"\"\n segment_start = np.random.randint(low=0, high=10000 - segment_ms)\n segment_end = segment_start + segment_ms - 1\n return segment_start, segment_end\n\n\ndef is_overlapping(segment_time, previous_segments):\n \"\"\"\n Checks if the time of a segment overlaps with the times of existing segments.\n\n Arguments:\n segment_time -- a tuple of (segment_start, segment_end) for the new segment\n previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments\n\n Returns:\n True if the time segment overlaps with any of the existing segments, False otherwise\n \"\"\"\n segment_start, segment_end = segment_time\n overlap = False\n for previous_start, previous_end in previous_segments:\n if segment_start <= previous_end and segment_end >= previous_start:\n overlap = True\n return overlap\n\n\ndef insert_audio_clip(background, audio_clip, previous_segments):\n \"\"\"\n Insert a new audio segment over the background noise at a random time step, ensuring that the\n audio segment does not overlap with existing segments.\n\n Arguments:\n background -- a 10 second background audio recording.\n audio_clip -- the audio clip to be inserted/overlaid.\n previous_segments -- times where audio segments have already been placed\n\n Returns:\n new_background -- the updated background audio\n \"\"\"\n # Get the duration of the audio clip in ms\n segment_ms = len(audio_clip)\n segment_time = get_random_time_segment(segment_ms)\n while is_overlapping(segment_time, previous_segments):\n segment_time = get_random_time_segment(segment_ms)\n previous_segments.append(segment_time)\n # Superpose audio segment and background\n new_background = background.overlay(audio_clip, position=segment_time[0])\n return new_background, segment_time\n\n\ndef insert_ones(y, segment_end_ms, Ty=1375):\n \"\"\"\n Update the label vector y. The labels of the 50 output steps strictly after the end of the segment\n should be set to 1. By strictly we mean that the label of segment_end_y should be 0 while, the\n 50 following labels should be ones.\n\n\n Arguments:\n y -- numpy array of shape (1, Ty), the labels of the training example\n segment_end_ms -- the end time of the segment in ms\n\n Returns:\n y -- updated labels\n \"\"\"\n # duration of the background (in terms of spectrogram time-steps)\n segment_end_y = int(segment_end_ms * Ty / 10000.0)\n for i in range(segment_end_y + 1, segment_end_y + 51):\n if i < Ty:\n y[0, i] = 1\n return y\n\n\ndef create_training_example(example_filename, background, activates, negatives, Ty=1375, seed=18):\n \"\"\"\n Creates a training example with a given background, activates, and negatives.\n\n Arguments:\n background -- a 10 second background audio recording\n activates -- a list of audio segments of the word \"activate\"\n negatives -- a list of audio segments of random words that are not \"activate\"\n\n Returns:\n x -- the spectrogram of the training example\n y -- the label at each time step of the spectrogram\n \"\"\"\n np.random.seed(seed)\n background = background - 20 # Make background quieter\n # Step 1: Initialize y (label vector) of zeros\n y = np.zeros((1, Ty))\n # Step 2: Initialize segment times as an empty list\n previous_segments = []\n # Select 0-4 random \"activate\" audio clips from the entire list of \"activates\" recordings\n number_of_activates = np.random.randint(0, 5)\n random_indices = np.random.randint(len(activates), size=number_of_activates)\n random_activates = [activates[i] for i in random_indices]\n # Step 3: Loop over randomly selected \"activate\" clips and insert in background\n for random_activate in random_activates:\n # Insert the audio clip on the background\n background, segment_time = insert_audio_clip(background, random_activate, previous_segments)\n # Retrieve segment_start and segment_end from segment_time\n segment_start, segment_end = segment_time\n # Insert labels in \"y\"\n y = insert_ones(y, segment_end, Ty)\n # Select 0-2 random negatives audio recordings from the entire list of \"negatives\" recordings\n number_of_negatives = np.random.randint(0, 3)\n random_indices = np.random.randint(len(negatives), size=number_of_negatives)\n random_negatives = [negatives[i] for i in random_indices]\n # Step 4: Loop over randomly selected negative clips and insert in background\n for random_negative in random_negatives:\n # Insert the audio clip on the background\n background, _ = insert_audio_clip(background, random_negative, previous_segments)\n # Standardize the volume of the audio clip\n background = match_target_amplitude(background, -20.0)\n # Export new training example\n background.export(example_filename, format=\"wav\")\n print(f'File {example_filename} was saved in your directory.')\n # Get and plot spectrogram of the new recording (background with superposition of positive and negatives)\n x = graph_spectrogram(example_filename)\n return x, y\n"
] | [
[
"numpy.asarray",
"numpy.eye",
"numpy.max",
"numpy.argmax",
"numpy.array",
"numpy.zeros"
],
[
"numpy.random.seed",
"matplotlib.pyplot.specgram",
"numpy.zeros",
"scipy.io.wavfile.read",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Prithwijit-Chak/simpeg | [
"d93145d768b5512621cdd75566b4a8175fee9ed3",
"d93145d768b5512621cdd75566b4a8175fee9ed3",
"8021082b8b53f3c08fa87fc085547bdd56437c6b"
] | [
"tutorials/13-joint_inversion/plot_inv_1_joint_pf_pgi_full_info_tutorial.py",
"tests/em/static/test_DC_jvecjtvecadj.py",
"SimPEG/regularization/sparse.py"
] | [
"\"\"\"\nJoint PGI of Gravity + Magnetic on an Octree mesh using full petrophysical information\n======================================================================================\n\n\nThis tutorial shows through a joint inversion of Gravity and Magnetic data on an\nOctree mesh how to use the PGI framework introduced in Astic & Oldenburg (2019)\nand Astic et al. (2021) to include petrophysical information into geophysical\ninversions for mutli-physics inversion.\n\nThibaut Astic, Douglas W. Oldenburg,\nA framework for petrophysically and geologically guided geophysical inversion\nusing a dynamic Gaussian mixture model prior, Geophysical Journal International,\nVolume 219, Issue 3, December 2019, Pages 1989–2012, DOI:\n`10.1093/gji/ggz389 <https://doi.org/10.1093/gji/ggz389>`_.\n\n\nThibaut Astic, Lindsey J. Heagy, Douglas W Oldenburg,\nPetrophysically and geologically guided multi-physics inversion using a dynamic\nGaussian mixture model, Geophysical Journal International,\nVolume 224, Issue 1, January 2021, Pages 40-68, DOI: `10.1093/gji/ggaa378\n<https://doi.org/10.1093/gji/ggaa378>`_.\n\n\"\"\"\n#########################################################################\n# Import modules\n# --------------\n#\n\nimport discretize as ds\nimport SimPEG.potential_fields as pf\nfrom SimPEG import (\n maps,\n utils,\n simulation,\n inverse_problem,\n inversion,\n optimization,\n regularization,\n data_misfit,\n directives,\n)\nfrom SimPEG.utils import io_utils\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\n# Reproducible science\nnp.random.seed(518936)\n\n#########################################################################\n# Setup\n# -----\n#\n\n# Load Mesh\nmesh_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/mesh_tutorial.ubc\"\n)\nmesh = ds.TreeMesh.read_UBC(mesh_file)\n\n# Load True geological model for comparison with inversion result\ntrue_geology_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/geology_true.mod\"\n)\ntrue_geology = mesh.read_model_UBC(true_geology_file)\n\n# Plot true geology model\nfig, ax = plt.subplots(1, 4, figsize=(20, 4))\nticksize, labelsize = 14, 16\nfor _, axx in enumerate(ax):\n axx.set_aspect(1)\n axx.tick_params(labelsize=ticksize)\nmesh.plotSlice(\n true_geology,\n normal=\"X\",\n ax=ax[0],\n ind=-17,\n clim=[0, 2],\n pcolorOpts={\"cmap\": \"inferno_r\"},\n grid=True,\n)\nmesh.plotSlice(\n true_geology,\n normal=\"Y\",\n ax=ax[1],\n clim=[0, 2],\n pcolorOpts={\"cmap\": \"inferno_r\"},\n grid=True,\n)\ngeoplot = mesh.plotSlice(\n true_geology,\n normal=\"Z\",\n ax=ax[2],\n clim=[0, 2],\n ind=-10,\n pcolorOpts={\"cmap\": \"inferno_r\"},\n grid=True,\n)\ngeocb = plt.colorbar(geoplot[0], cax=ax[3], ticks=[0, 1, 2])\ngeocb.set_label(\n \"True geology model\\n(classification/density/mag. susc.)\", fontsize=labelsize\n)\ngeocb.set_ticklabels(\n [\"BCKGRD (0 g/cc; 0 SI)\", \"PK (-0.8 g/cc; 5e-3 SI)\", \"VK (-0.2 g/cc; 2e-2 SI)\"]\n)\ngeocb.ax.tick_params(labelsize=ticksize)\nax[3].set_aspect(10)\nplt.show()\n\n# Load geophysical data\ndata_grav_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/gravity_data.obs\"\n)\ndata_grav = io_utils.read_grav3d_ubc(data_grav_file)\ndata_mag_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/magnetic_data.obs\"\n)\ndata_mag = io_utils.read_mag3d_ubc(data_mag_file)\n\n# plot data and mesh\nfig, ax = plt.subplots(2, 2, figsize=(15, 10))\nax = ax.reshape(-1)\nplt.gca().set_aspect(\"equal\")\nplt.gca().set_xlim(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n)\nplt.gca().set_ylim(\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n)\nmesh.plotSlice(\n np.ones(mesh.nC),\n normal=\"Z\",\n ind=int(-10),\n grid=True,\n pcolorOpts={\"cmap\": \"Greys\"},\n ax=ax[0],\n)\nmm = utils.plot2Ddata(\n data_grav.survey.receiver_locations,\n -data_grav.dobs,\n ax=ax[0],\n level=True,\n nx=20,\n ny=20,\n dataloc=True,\n ncontour=12,\n shade=True,\n contourOpts={\"cmap\": \"Blues_r\", \"alpha\": 0.8},\n levelOpts={\"colors\": \"k\", \"linewidths\": 0.5, \"linestyles\": \"dashed\"},\n)\nax[0].set_aspect(1)\nax[0].set_title(\n \"Gravity data values and locations,\\nwith mesh and geology overlays\", fontsize=16\n)\nplt.colorbar(mm[0], cax=ax[2], orientation=\"horizontal\")\nax[2].set_aspect(0.05)\nax[2].set_title(\"mGal\", fontsize=16)\nmesh.plotSlice(\n np.ones(mesh.nC),\n normal=\"Z\",\n ind=int(-10),\n grid=True,\n pcolorOpts={\"cmap\": \"Greys\"},\n ax=ax[1],\n)\nmm = utils.plot2Ddata(\n data_mag.survey.receiver_locations,\n data_mag.dobs,\n ax=ax[1],\n level=True,\n nx=20,\n ny=20,\n dataloc=True,\n ncontour=11,\n shade=True,\n contourOpts={\"cmap\": \"Reds\", \"alpha\": 0.8},\n levelOpts={\"colors\": \"k\", \"linewidths\": 0.5, \"linestyles\": \"dashed\"},\n)\nax[1].set_aspect(1)\nax[1].set_title(\n \"Magnetic data values and locations,\\nwith mesh and geology overlays\", fontsize=16\n)\nplt.colorbar(mm[0], cax=ax[3], orientation=\"horizontal\")\nax[3].set_aspect(0.05)\nax[3].set_title(\"nT\", fontsize=16)\n# overlay true geology model for comparison\nindz = -9\nindslicezplot = mesh.gridCC[:, 2] == mesh.vectorCCz[indz]\nfor i in range(2):\n utils.plot2Ddata(\n mesh.gridCC[indslicezplot][:, [0, 1]],\n true_geology[indslicezplot],\n nx=200,\n ny=200,\n contourOpts={\"alpha\": 0},\n clim=[0, 2],\n ax=ax[i],\n level=True,\n ncontour=2,\n levelOpts={\"colors\": \"k\", \"linewidths\": 2, \"linestyles\": \"--\"},\n method=\"nearest\",\n )\nplt.subplots_adjust(hspace=-0.25, wspace=0.1)\nplt.show()\n\n# Load Topo\ntopo_file = io_utils.download(\n \"https://storage.googleapis.com/simpeg/pgi_tutorial_assets/CDED_Lake_warp.xyz\"\n)\ntopo = np.genfromtxt(topo_file, skip_header=1)\n# find the active cells\nactv = utils.surface2ind_topo(mesh, topo, gridLoc=\"CC\")\n# Create active map to go from reduce set to full\nndv = np.nan\nactvMap = maps.InjectActiveCells(mesh, actv, ndv)\nnactv = int(actv.sum())\n\n# Create simulations and data misfits\n# Wires mapping\nwires = maps.Wires((\"den\", actvMap.nP), (\"sus\", actvMap.nP))\ngravmap = actvMap * wires.den\nmagmap = actvMap * wires.sus\nidenMap = maps.IdentityMap(nP=nactv)\n# Grav problem\nsimulation_grav = pf.gravity.simulation.Simulation3DIntegral(\n survey=data_grav.survey, mesh=mesh, rhoMap=wires.den, actInd=actv,\n)\ndmis_grav = data_misfit.L2DataMisfit(data=data_grav, simulation=simulation_grav)\n# Mag problem\nsimulation_mag = pf.magnetics.simulation.Simulation3DIntegral(\n survey=data_mag.survey, mesh=mesh, chiMap=wires.sus, actInd=actv,\n)\ndmis_mag = data_misfit.L2DataMisfit(data=data_mag, simulation=simulation_mag)\n\n#########################################################################\n# Create a joint Data Misfit\n#\n\n# Joint data misfit\ndmis = 0.5 * dmis_grav + 0.5 * dmis_mag\n\n# initial model\nm0 = np.r_[-1e-4 * np.ones(actvMap.nP), 1e-4 * np.ones(actvMap.nP)]\n\n#########################################################################\n# Inversion with full petrophysical information\n# ---------------------------------------------\n#\n\n#########################################################################\n# Create and plot a petrophysical GMM with full information\n# ---------------------------------------------------------\n#\n# The GMM is our representation of the petrophysical and geological information.\n# Here, we focus on the petrophysical aspect, with the means and covariances of\n# the physical properties of each rock unit.\n# To generate the data above, the PK unit was populated with a density contrast\n# of -0.8 g/cc and a magnetic susceptibility of 0.005 SI. The properties of the\n# HK unit were set at -0.2 g/cc and 0.02 SI. The covariances matrices are set\n# so that we assume petrophysical noise levels of around 0.05 g/cc and 0.001 SI\n# for both unit. Finally the background unit is set at null contrasts (0 g/cc\n# 0 SI) with a petrophysical noise level of half of the above.\n#\n\ngmmref = utils.WeightedGaussianMixture(\n n_components=3, # number of rock units: bckgrd, PK, HK\n mesh=mesh, # inversion mesh\n actv=actv, # actv cells\n covariance_type=\"diag\", # diagonal covariances\n)\n# required: initialization with fit\n# fake random samples, size of the mesh, number of physical properties: 2 (density and mag.susc)\ngmmref.fit(np.random.randn(nactv, 2))\n# set parameters manually\n# set phys. prop means for each unit\ngmmref.means_ = np.c_[\n [0.0, 0.0], # BCKGRD density contrast and mag. susc\n [-0.8, 0.005], # PK\n [-0.2, 0.02], # HK\n].T\n# set phys. prop covariances for each unit\ngmmref.covariances_ = np.array(\n [[6e-04, 3.175e-07], [2.4e-03, 1.5e-06], [2.4e-03, 1.5e-06]]\n)\n# important after setting cov. manually: compute precision matrices and cholesky\ngmmref.compute_clusters_precisions()\n# set global proportions; low-impact as long as not 0 or 1 (total=1)\ngmmref.weights_ = np.r_[0.9, 0.075, 0.025]\n\n# Plot the 2D GMM\nax = gmmref.plot_pdf(flag2d=True)\nax[0].set_xlabel(\"Density contrast [g/cc]\")\nax[0].set_ylim([0, 5])\nax[2].set_ylabel(\"magnetic Susceptibility [SI]\")\nax[2].set_xlim([0, 100])\nplt.show()\n\n#########################################################################\n# Create PGI regularization\n# -------------------------\n#\n\n# Sensitivity weighting\nwr_grav = np.sum(simulation_grav.G ** 2.0, axis=0) ** 0.5 / (mesh.cell_volumes[actv])\nwr_grav = wr_grav / np.max(wr_grav)\n\nwr_mag = np.sum(simulation_mag.G ** 2.0, axis=0) ** 0.5 / (mesh.cell_volumes[actv])\nwr_mag = wr_mag / np.max(wr_mag)\n\n# create joint PGI regularization with smoothness\nreg = utils.make_PGI_regularization(\n gmmref=gmmref,\n mesh=mesh,\n wiresmap=wires,\n maplist=[idenMap, idenMap],\n indActive=actv,\n alpha_s=1.0,\n alpha_x=1.0,\n alpha_y=1.0,\n alpha_z=1.0,\n alpha_xx=0.0,\n alpha_yy=0.0,\n alpha_zz=0.0,\n cell_weights_list=[wr_grav, wr_mag], # weights each phys. prop. by correct sensW\n)\n\n#########################################################################\n# Inverse problem with full petrophysical information\n# ---------------------------------------------------\n#\n\n# Directives\n# Add directives to the inversion\n# ratio to use for each phys prop. smoothness in each direction;\n# roughly the ratio of the order of magnitude of each phys. prop.\nalpha0_ratio = np.r_[\n np.zeros(len(reg.objfcts[0].objfcts)),\n 1e-4 * np.ones(len(reg.objfcts[1].objfcts)),\n 100.0 * 1e-4 * np.ones(len(reg.objfcts[2].objfcts)),\n]\nAlphas = directives.AlphasSmoothEstimate_ByEig(alpha0_ratio=alpha0_ratio, verbose=True)\n# initialize beta and beta/alpha_s schedule\nbeta = directives.BetaEstimate_ByEig(beta0_ratio=1e-2)\nbetaIt = directives.PGI_BetaAlphaSchedule(\n verbose=True, coolingFactor=2.0, tolerance=0.2, progress=0.2,\n)\n# geophy. and petro. target misfits\ntargets = directives.MultiTargetMisfits(verbose=True,)\n# add learned mref in smooth once stable\nMrefInSmooth = directives.PGI_AddMrefInSmooth(wait_till_stable=True, verbose=True,)\n# update the parameters in smallness (L2-approx of PGI)\nupdate_smallness = directives.PGI_UpdateParameters(\n update_gmm=False # keep GMM model fixed\n)\n# pre-conditioner\nupdate_Jacobi = directives.UpdatePreconditioner()\n# iteratively balance the scaling of the data misfits\nscaling_init = directives.ScalingMultipleDataMisfits_ByEig(chi0_ratio=[1.0, 100.0])\nscale_schedule = directives.JointScalingSchedule(verbose=True)\n\n# Create inverse problem\n# Optimization\n# set lower and upper bounds\nlowerbound = np.r_[-2.0 * np.ones(actvMap.nP), 0.0 * np.ones(actvMap.nP)]\nupperbound = np.r_[0.0 * np.ones(actvMap.nP), 1e-1 * np.ones(actvMap.nP)]\nopt = optimization.ProjectedGNCG(\n maxIter=30,\n lower=lowerbound,\n upper=upperbound,\n maxIterLS=20,\n maxIterCG=100,\n tolCG=1e-4,\n)\n# create inverse problem\ninvProb = inverse_problem.BaseInvProblem(dmis, reg, opt)\ninv = inversion.BaseInversion(\n invProb,\n # directives: evaluate alphas (and data misfits scales) before beta\n directiveList=[\n Alphas,\n scaling_init,\n beta,\n update_smallness,\n targets,\n scale_schedule,\n betaIt,\n MrefInSmooth,\n update_Jacobi,\n ],\n)\n\n# invert\npgi_model = inv.run(m0)\n\n# Extract the results\ndensity_model = gravmap * pgi_model\nmagsus_model = magmap * pgi_model\nquasi_geology_model = actvMap * reg.objfcts[0].compute_quasi_geology_model()\n\n# Plot the result with full petrophysical information\nfig, ax = plt.subplots(3, 4, figsize=(15, 10))\nfor _, axx in enumerate(ax):\n for _, axxx in enumerate(axx):\n axxx.set_aspect(1)\n axxx.tick_params(labelsize=ticksize)\n\nindx = 15\nindy = 17\nindz = -9\n# geology model\nmesh.plotSlice(\n quasi_geology_model,\n normal=\"X\",\n ax=ax[0, 0],\n clim=[0, 2],\n ind=indx,\n pcolorOpts={\"cmap\": \"inferno_r\"},\n)\nmesh.plotSlice(\n quasi_geology_model,\n normal=\"Y\",\n ax=ax[0, 1],\n clim=[0, 2],\n ind=indy,\n pcolorOpts={\"cmap\": \"inferno_r\"},\n)\ngeoplot = mesh.plotSlice(\n quasi_geology_model,\n normal=\"Z\",\n ax=ax[0, 2],\n clim=[0, 2],\n ind=indz,\n pcolorOpts={\"cmap\": \"inferno_r\"},\n)\ngeocb = plt.colorbar(geoplot[0], cax=ax[0, 3], ticks=[0, 1, 2])\ngeocb.set_ticklabels([\"BCK\", \"PK\", \"VK\"])\ngeocb.set_label(\"Quasi-Geology model\\n(Rock units classification)\", fontsize=16)\nax[0, 3].set_aspect(10)\n\n# gravity model\nmesh.plotSlice(\n density_model,\n normal=\"X\",\n ax=ax[1, 0],\n clim=[-1, 0],\n ind=indx,\n pcolorOpts={\"cmap\": \"Blues_r\"},\n)\nmesh.plotSlice(\n density_model,\n normal=\"Y\",\n ax=ax[1, 1],\n clim=[-1, 0],\n ind=indy,\n pcolorOpts={\"cmap\": \"Blues_r\"},\n)\ndenplot = mesh.plotSlice(\n density_model,\n normal=\"Z\",\n ax=ax[1, 2],\n clim=[-1, 0],\n ind=indz,\n pcolorOpts={\"cmap\": \"Blues_r\"},\n)\ndencb = plt.colorbar(denplot[0], cax=ax[1, 3])\ndencb.set_label(\"Density contrast\\nmodel (g/cc)\", fontsize=16)\nax[1, 3].set_aspect(10)\n\n# magnetic model\nmesh.plotSlice(\n magsus_model,\n normal=\"X\",\n ax=ax[2, 0],\n clim=[0, 0.025],\n ind=indx,\n pcolorOpts={\"cmap\": \"Reds\"},\n)\nmesh.plotSlice(\n magsus_model,\n normal=\"Y\",\n ax=ax[2, 1],\n clim=[0, 0.025],\n ind=indy,\n pcolorOpts={\"cmap\": \"Reds\"},\n)\nsusplot = mesh.plotSlice(\n magsus_model,\n normal=\"Z\",\n ax=ax[2, 2],\n clim=[0, 0.025],\n ind=indz,\n pcolorOpts={\"cmap\": \"Reds\"},\n)\nsuscb = plt.colorbar(susplot[0], cax=ax[2, 3])\nsuscb.set_label(\"Magnetic susceptibility\\nmodel (SI)\", fontsize=16)\nax[2, 3].set_aspect(10)\n\n# overlay true geology model for comparison\nindslicexplot = mesh.gridCC[:, 0] == mesh.vectorCCx[indx]\nindsliceyplot = mesh.gridCC[:, 1] == mesh.vectorCCy[indy]\nindslicezplot = mesh.gridCC[:, 2] == mesh.vectorCCz[indz]\nfor i in range(3):\n for j, (plane, indd) in enumerate(\n zip([[1, 2], [0, 2], [0, 1]], [indslicexplot, indsliceyplot, indslicezplot])\n ):\n utils.plot2Ddata(\n mesh.gridCC[indd][:, plane],\n true_geology[indd],\n nx=100,\n ny=100,\n contourOpts={\"alpha\": 0},\n clim=[0, 2],\n ax=ax[i, j],\n level=True,\n ncontour=2,\n levelOpts={\"colors\": \"grey\", \"linewidths\": 2, \"linestyles\": \"--\"},\n method=\"nearest\",\n )\n\n# plot the locations of the cross-sections\nfor i in range(3):\n ax[i, 0].plot(\n mesh.vectorCCy[indy] * np.ones(2), [-300, 500], c=\"k\", linestyle=\"dotted\"\n )\n ax[i, 0].plot(\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n mesh.vectorCCz[indz] * np.ones(2),\n c=\"k\",\n linestyle=\"dotted\",\n )\n ax[i, 0].set_xlim(\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n )\n\n ax[i, 1].plot(\n mesh.vectorCCx[indx] * np.ones(2), [-300, 500], c=\"k\", linestyle=\"dotted\"\n )\n ax[i, 1].plot(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n mesh.vectorCCz[indz] * np.ones(2),\n c=\"k\",\n linestyle=\"dotted\",\n )\n ax[i, 1].set_xlim(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n )\n\n ax[i, 2].plot(\n mesh.vectorCCx[indx] * np.ones(2),\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n c=\"k\",\n linestyle=\"dotted\",\n )\n ax[i, 2].plot(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n mesh.vectorCCy[indy] * np.ones(2),\n c=\"k\",\n linestyle=\"dotted\",\n )\n ax[i, 2].set_xlim(\n [\n data_mag.survey.receiver_locations[:, 0].min(),\n data_mag.survey.receiver_locations[:, 0].max(),\n ],\n )\n ax[i, 2].set_ylim(\n [\n data_mag.survey.receiver_locations[:, 1].min(),\n data_mag.survey.receiver_locations[:, 1].max(),\n ],\n )\n\nplt.tight_layout()\nplt.show()\n\n# Plot the 2D GMM\nfig = plt.figure(figsize=(10, 10))\nax0 = plt.subplot2grid((4, 4), (3, 1), colspan=3)\nax1 = plt.subplot2grid((4, 4), (0, 1), colspan=3, rowspan=3)\nax2 = plt.subplot2grid((4, 4), (0, 0), rowspan=3)\nax = [ax0, ax1, ax2]\nreg.objfcts[0].gmm.plot_pdf(flag2d=True, ax=ax, padding=0.5)\nax[0].set_xlabel(\"Density contrast [g/cc]\")\nax[0].set_ylim([0, 5])\nax[2].set_xlim([0, 50])\nax[2].set_ylabel(\"magnetic Susceptibility [SI]\")\nax[1].scatter(\n density_model[actv],\n magsus_model[actv],\n c=quasi_geology_model[actv],\n cmap=\"inferno_r\",\n edgecolors=\"k\",\n label=\"recovered PGI model\",\n alpha=0.5,\n)\nax[1].legend()\nax[0].hist(density_model[actv], density=True, bins=50)\nax[2].hist(magsus_model[actv], density=True, bins=50, orientation=\"horizontal\")\nplt.show()\n",
"from __future__ import print_function\nimport unittest\nimport numpy as np\nimport discretize\nfrom SimPEG import (\n maps,\n data_misfit,\n regularization,\n inversion,\n optimization,\n inverse_problem,\n tests,\n utils,\n)\nfrom SimPEG.utils import mkvc\nfrom SimPEG.electromagnetics import resistivity as dc\nfrom pymatsolver import Pardiso\nimport shutil\n\nnp.random.seed(40)\n\nTOL = 1e-5\nFLR = 1e-20 # \"zero\", so if residual below this --> pass regardless of order\n\n\nclass DCProblemTestsCC(unittest.TestCase):\n def setUp(self):\n\n aSpacing = 2.5\n nElecs = 5\n\n surveySize = nElecs * aSpacing - aSpacing\n cs = surveySize / nElecs / 4\n\n mesh = discretize.TensorMesh(\n [\n [(cs, 10, -1.3), (cs, surveySize / cs), (cs, 10, 1.3)],\n [(cs, 3, -1.3), (cs, 3, 1.3)],\n # [(cs, 5, -1.3), (cs, 10)]\n ],\n \"CN\",\n )\n\n source_list = dc.utils.WennerSrcList(nElecs, aSpacing, in2D=True)\n survey = dc.survey.Survey(source_list)\n simulation = dc.simulation.Simulation3DCellCentered(\n mesh=mesh, survey=survey, rhoMap=maps.IdentityMap(mesh)\n )\n\n mSynth = np.ones(mesh.nC)\n dobs = simulation.make_synthetic_data(mSynth, add_noise=True)\n\n # Now set up the problem to do some minimization\n dmis = data_misfit.L2DataMisfit(simulation=simulation, data=dobs)\n reg = regularization.Tikhonov(mesh)\n opt = optimization.InexactGaussNewton(\n maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6\n )\n invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e4)\n inv = inversion.BaseInversion(invProb)\n\n self.inv = inv\n self.reg = reg\n self.p = simulation\n self.mesh = mesh\n self.m0 = mSynth\n self.survey = survey\n self.dmis = dmis\n self.dobs = dobs\n\n def test_misfit(self):\n passed = tests.checkDerivative(\n lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)],\n self.m0,\n plotIt=False,\n num=3,\n )\n self.assertTrue(passed)\n\n def test_adjoint(self):\n # Adjoint Test\n # u = np.random.rand(self.mesh.nC*self.survey.nSrc)\n v = np.random.rand(self.mesh.nC)\n w = np.random.rand(mkvc(self.dobs).shape[0])\n wtJv = w.dot(self.p.Jvec(self.m0, v))\n vtJtw = v.dot(self.p.Jtvec(self.m0, w))\n passed = np.abs(wtJv - vtJtw) < 1e-10\n print(\"Adjoint Test\", np.abs(wtJv - vtJtw), passed)\n self.assertTrue(passed)\n\n def test_dataObj(self):\n passed = tests.checkDerivative(\n lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=6\n )\n self.assertTrue(passed)\n\n\nclass DCProblemTestsCC_fields(unittest.TestCase):\n def setUp(self):\n cs = 10\n nc = 20\n npad = 10\n mesh = discretize.CylMesh(\n [\n [(cs, nc), (cs, npad, 1.3)],\n np.r_[2 * np.pi],\n [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)],\n ]\n )\n\n mesh.x0 = np.r_[0.0, 0.0, -mesh.hz[: npad + nc].sum()]\n\n # receivers\n rx_x = np.linspace(10, 200, 20)\n rx_z = np.r_[-5]\n rx_locs = utils.ndgrid([rx_x, np.r_[0], rx_z])\n rx_list = [dc.receivers.BaseRx(rx_locs, projField=\"e\", orientation=\"x\")]\n\n # sources\n src_a = np.r_[0.0, 0.0, -5.0]\n src_b = np.r_[55.0, 0.0, -5.0]\n\n src_list = [dc.sources.Dipole(rx_list, location_a=src_a, location_b=src_b)]\n\n self.mesh = mesh\n self.survey = dc.survey.Survey(src_list)\n self.sigma_map = maps.ExpMap(mesh) * maps.InjectActiveCells(\n mesh, mesh.gridCC[:, 2] <= 0, np.log(1e-8)\n )\n self.prob = dc.simulation.Simulation3DCellCentered(\n mesh=mesh,\n survey=self.survey,\n sigmaMap=self.sigma_map,\n solver=Pardiso,\n bc_type=\"Dirichlet\",\n )\n\n def test_e_deriv(self):\n x0 = -1 + 1e-1 * np.random.rand(self.sigma_map.nP)\n\n def fun(x):\n return self.prob.dpred(x), lambda x: self.prob.Jvec(x0, x)\n\n return tests.checkDerivative(fun, x0, num=3, plotIt=False)\n\n def test_e_adjoint(self):\n print(\"Adjoint Test for e\")\n\n m = -1 + 1e-1 * np.random.rand(self.sigma_map.nP)\n u = self.prob.fields(m)\n # u = u[self.survey.source_list,'e']\n\n v = np.random.rand(self.survey.nD)\n w = np.random.rand(self.sigma_map.nP)\n\n vJw = v.dot(self.prob.Jvec(m, w, u))\n wJtv = w.dot(self.prob.Jtvec(m, v, u))\n tol = np.max([TOL * (10 ** int(np.log10(np.abs(vJw)))), FLR])\n print(\n \"vJw: {:1.2e}, wJTv: {:1.2e}, tol: {:1.0e}, passed: {}\\n\".format(\n vJw, wJtv, vJw - wJtv, tol, np.abs(vJw - wJtv) < tol\n )\n )\n return np.abs(vJw - wJtv) < tol\n\n\nclass DCProblemTestsN(unittest.TestCase):\n def setUp(self):\n\n aSpacing = 2.5\n nElecs = 10\n\n surveySize = nElecs * aSpacing - aSpacing\n cs = surveySize / nElecs / 4\n\n mesh = discretize.TensorMesh(\n [\n [(cs, 10, -1.3), (cs, surveySize / cs), (cs, 10, 1.3)],\n [(cs, 3, -1.3), (cs, 3, 1.3)],\n # [(cs, 5, -1.3), (cs, 10)]\n ],\n \"CN\",\n )\n\n source_list = dc.utils.WennerSrcList(nElecs, aSpacing, in2D=True)\n survey = dc.survey.Survey(source_list)\n simulation = dc.simulation.Simulation3DNodal(\n mesh=mesh, survey=survey, rhoMap=maps.IdentityMap(mesh)\n )\n\n mSynth = np.ones(mesh.nC)\n dobs = simulation.make_synthetic_data(mSynth, add_noise=True)\n\n # Now set up the problem to do some minimization\n dmis = data_misfit.L2DataMisfit(simulation=simulation, data=dobs)\n reg = regularization.Tikhonov(mesh)\n opt = optimization.InexactGaussNewton(\n maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6\n )\n invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e4)\n inv = inversion.BaseInversion(invProb)\n\n self.inv = inv\n self.reg = reg\n self.p = simulation\n self.mesh = mesh\n self.m0 = mSynth\n self.survey = survey\n self.dmis = dmis\n self.dobs = dobs\n\n def test_misfit(self):\n passed = tests.checkDerivative(\n lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)],\n self.m0,\n plotIt=False,\n num=3,\n )\n self.assertTrue(passed)\n\n def test_adjoint(self):\n # Adjoint Test\n # u = np.random.rand(self.mesh.nC*self.survey.nSrc)\n v = np.random.rand(self.mesh.nC)\n w = np.random.rand(mkvc(self.dobs).shape[0])\n wtJv = w.dot(self.p.Jvec(self.m0, v))\n vtJtw = v.dot(self.p.Jtvec(self.m0, w))\n passed = np.abs(wtJv - vtJtw) < 1e-8\n print(\"Adjoint Test\", np.abs(wtJv - vtJtw), passed)\n self.assertTrue(passed)\n\n def test_dataObj(self):\n passed = tests.checkDerivative(\n lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=3\n )\n self.assertTrue(passed)\n\n\nclass DCProblemTestsN_Robin(unittest.TestCase):\n def setUp(self):\n\n aSpacing = 2.5\n nElecs = 10\n\n surveySize = nElecs * aSpacing - aSpacing\n cs = surveySize / nElecs / 4\n\n mesh = discretize.TensorMesh(\n [\n [(cs, 10, -1.3), (cs, surveySize / cs), (cs, 10, 1.3)],\n [(cs, 3, -1.3), (cs, 3, 1.3)],\n # [(cs, 5, -1.3), (cs, 10)]\n ],\n \"CN\",\n )\n\n source_list = dc.utils.WennerSrcList(nElecs, aSpacing, in2D=True)\n survey = dc.survey.Survey(source_list)\n simulation = dc.simulation.Simulation3DNodal(\n mesh=mesh, survey=survey, rhoMap=maps.IdentityMap(mesh), bc_type=\"Robin\"\n )\n\n mSynth = np.ones(mesh.nC)\n dobs = simulation.make_synthetic_data(mSynth, add_noise=True)\n\n # Now set up the problem to do some minimization\n dmis = data_misfit.L2DataMisfit(simulation=simulation, data=dobs)\n reg = regularization.Tikhonov(mesh)\n opt = optimization.InexactGaussNewton(\n maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6\n )\n invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e4)\n inv = inversion.BaseInversion(invProb)\n\n self.inv = inv\n self.reg = reg\n self.p = simulation\n self.mesh = mesh\n self.m0 = mSynth\n self.survey = survey\n self.dmis = dmis\n self.dobs = dobs\n\n def test_misfit(self):\n passed = tests.checkDerivative(\n lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)],\n self.m0,\n plotIt=False,\n num=3,\n )\n self.assertTrue(passed)\n\n def test_adjoint(self):\n # Adjoint Test\n # u = np.random.rand(self.mesh.nC*self.survey.nSrc)\n v = np.random.rand(self.mesh.nC)\n w = np.random.rand(mkvc(self.dobs).shape[0])\n wtJv = w.dot(self.p.Jvec(self.m0, v))\n vtJtw = v.dot(self.p.Jtvec(self.m0, w))\n passed = np.abs(wtJv - vtJtw) < 1e-8\n print(\"Adjoint Test\", np.abs(wtJv - vtJtw), passed)\n self.assertTrue(passed)\n\n def test_dataObj(self):\n passed = tests.checkDerivative(\n lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=3\n )\n self.assertTrue(passed)\n\n\nclass DCProblemTestsCC_storeJ(unittest.TestCase):\n def setUp(self):\n\n aSpacing = 2.5\n nElecs = 5\n\n surveySize = nElecs * aSpacing - aSpacing\n cs = surveySize / nElecs / 4\n\n mesh = discretize.TensorMesh(\n [\n [(cs, 10, -1.3), (cs, surveySize / cs), (cs, 10, 1.3)],\n [(cs, 3, -1.3), (cs, 3, 1.3)],\n # [(cs, 5, -1.3), (cs, 10)]\n ],\n \"CN\",\n )\n\n source_list = dc.utils.WennerSrcList(nElecs, aSpacing, in2D=True)\n survey = dc.survey.Survey(source_list)\n simulation = dc.simulation.Simulation3DCellCentered(\n mesh=mesh, survey=survey, rhoMap=maps.IdentityMap(mesh), storeJ=True\n )\n\n mSynth = np.ones(mesh.nC)\n dobs = simulation.make_synthetic_data(mSynth, add_noise=True)\n\n # Now set up the problem to do some minimization\n dmis = data_misfit.L2DataMisfit(simulation=simulation, data=dobs)\n reg = regularization.Tikhonov(mesh)\n opt = optimization.InexactGaussNewton(\n maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6\n )\n invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e4)\n inv = inversion.BaseInversion(invProb)\n\n self.inv = inv\n self.reg = reg\n self.p = simulation\n self.mesh = mesh\n self.m0 = mSynth\n self.survey = survey\n self.dmis = dmis\n self.dobs = dobs\n\n def test_misfit(self):\n passed = tests.checkDerivative(\n lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)],\n self.m0,\n plotIt=False,\n num=3,\n )\n self.assertTrue(passed)\n\n def test_adjoint(self):\n # Adjoint Test\n # u = np.random.rand(self.mesh.nC*self.survey.nSrc)\n v = np.random.rand(self.mesh.nC)\n w = np.random.rand(mkvc(self.dobs).shape[0])\n wtJv = w.dot(self.p.Jvec(self.m0, v))\n vtJtw = v.dot(self.p.Jtvec(self.m0, w))\n passed = np.abs(wtJv - vtJtw) < 1e-10\n print(\"Adjoint Test\", np.abs(wtJv - vtJtw), passed)\n self.assertTrue(passed)\n\n def test_dataObj(self):\n passed = tests.checkDerivative(\n lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=4\n )\n self.assertTrue(passed)\n\n def tearDown(self):\n # Clean up the working directory\n try:\n shutil.rmtree(self.p.sensitivity_path)\n except FileNotFoundError:\n pass\n\n\nclass DCProblemTestsN_storeJ(unittest.TestCase):\n def setUp(self):\n\n aSpacing = 2.5\n nElecs = 10\n\n surveySize = nElecs * aSpacing - aSpacing\n cs = surveySize / nElecs / 4\n\n mesh = discretize.TensorMesh(\n [\n [(cs, 10, -1.3), (cs, surveySize / cs), (cs, 10, 1.3)],\n [(cs, 3, -1.3), (cs, 3, 1.3)],\n # [(cs, 5, -1.3), (cs, 10)]\n ],\n \"CN\",\n )\n\n source_list = dc.utils.WennerSrcList(nElecs, aSpacing, in2D=True)\n survey = dc.survey.Survey(source_list)\n simulation = dc.simulation.Simulation3DNodal(\n mesh=mesh, survey=survey, rhoMap=maps.IdentityMap(mesh), storeJ=True\n )\n\n mSynth = np.ones(mesh.nC)\n dobs = simulation.make_synthetic_data(mSynth, add_noise=True)\n\n # Now set up the problem to do some minimization\n dmis = data_misfit.L2DataMisfit(simulation=simulation, data=dobs)\n reg = regularization.Tikhonov(mesh)\n opt = optimization.InexactGaussNewton(\n maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6\n )\n invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e4)\n inv = inversion.BaseInversion(invProb)\n\n self.inv = inv\n self.reg = reg\n self.p = simulation\n self.mesh = mesh\n self.m0 = mSynth\n self.survey = survey\n self.dmis = dmis\n self.dobs = dobs\n\n def test_misfit(self):\n passed = tests.checkDerivative(\n lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)],\n self.m0,\n plotIt=False,\n num=3,\n )\n self.assertTrue(passed)\n\n def test_adjoint(self):\n # Adjoint Test\n # u = np.random.rand(self.mesh.nC*self.survey.nSrc)\n v = np.random.rand(self.mesh.nC)\n w = np.random.rand(mkvc(self.dobs).shape[0])\n wtJv = w.dot(self.p.Jvec(self.m0, v))\n vtJtw = v.dot(self.p.Jtvec(self.m0, w))\n passed = np.abs(wtJv - vtJtw) < 1e-8\n print(\"Adjoint Test\", np.abs(wtJv - vtJtw), passed)\n self.assertTrue(passed)\n\n def test_dataObj(self):\n passed = tests.checkDerivative(\n lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=3\n )\n self.assertTrue(passed)\n\n def tearDown(self):\n # Clean up the working directory\n try:\n shutil.rmtree(self.p.sensitivity_path)\n except FileNotFoundError:\n pass\n\n\nclass DCProblemTestsN_storeJ_Robin(unittest.TestCase):\n def setUp(self):\n\n aSpacing = 2.5\n nElecs = 10\n\n surveySize = nElecs * aSpacing - aSpacing\n cs = surveySize / nElecs / 4\n\n mesh = discretize.TensorMesh(\n [\n [(cs, 10, -1.3), (cs, surveySize / cs), (cs, 10, 1.3)],\n [(cs, 3, -1.3), (cs, 3, 1.3)],\n # [(cs, 5, -1.3), (cs, 10)]\n ],\n \"CN\",\n )\n\n source_list = dc.utils.WennerSrcList(nElecs, aSpacing, in2D=True)\n survey = dc.survey.Survey(source_list)\n simulation = dc.simulation.Simulation3DNodal(\n mesh=mesh,\n survey=survey,\n rhoMap=maps.IdentityMap(mesh),\n storeJ=True,\n bc_type=\"Robin\",\n )\n\n mSynth = np.ones(mesh.nC)\n dobs = simulation.make_synthetic_data(mSynth, add_noise=True)\n\n # Now set up the problem to do some minimization\n dmis = data_misfit.L2DataMisfit(simulation=simulation, data=dobs)\n reg = regularization.Tikhonov(mesh)\n opt = optimization.InexactGaussNewton(\n maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6\n )\n invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e4)\n inv = inversion.BaseInversion(invProb)\n\n self.inv = inv\n self.reg = reg\n self.p = simulation\n self.mesh = mesh\n self.m0 = mSynth\n self.survey = survey\n self.dmis = dmis\n self.dobs = dobs\n\n def test_misfit(self):\n passed = tests.checkDerivative(\n lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)],\n self.m0,\n plotIt=False,\n num=3,\n )\n self.assertTrue(passed)\n\n def test_adjoint(self):\n # Adjoint Test\n # u = np.random.rand(self.mesh.nC*self.survey.nSrc)\n v = np.random.rand(self.mesh.nC)\n w = np.random.rand(mkvc(self.dobs).shape[0])\n wtJv = w.dot(self.p.Jvec(self.m0, v))\n vtJtw = v.dot(self.p.Jtvec(self.m0, w))\n passed = np.abs(wtJv - vtJtw) < 1e-8\n print(\"Adjoint Test\", np.abs(wtJv - vtJtw), passed)\n self.assertTrue(passed)\n\n def test_dataObj(self):\n passed = tests.checkDerivative(\n lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=3\n )\n self.assertTrue(passed)\n\n def tearDown(self):\n # Clean up the working directory\n try:\n shutil.rmtree(self.p.sensitivity_path)\n except FileNotFoundError:\n pass\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import numpy as np\nimport scipy.sparse as sp\nimport warnings\nimport properties\n\nfrom .base import BaseRegularization, BaseComboRegularization\nfrom .. import utils\n\n\nclass BaseSparse(BaseRegularization):\n \"\"\"\n Base class for building up the components of the Sparse Regularization\n \"\"\"\n\n def __init__(self, mesh, **kwargs):\n self._stashedR = None\n super(BaseSparse, self).__init__(mesh=mesh, **kwargs)\n\n model = properties.Array(\"current model\", dtype=float)\n\n epsilon = properties.Float(\n \"Threshold value for the model norm\", default=1e-3, required=True\n )\n\n norm = properties.Array(\"norm used\", dtype=float)\n\n space = properties.String(\"By default inherit the objctive\", default=\"linear\")\n\n gradientType = properties.String(\"type of gradient\", default=\"components\")\n\n scale = properties.Array(\"General nob for scaling\", dtype=float,)\n\n # Give the option to scale or not\n scaledIRLS = properties.Bool(\"Scale the gradients of the IRLS norms\", default=True)\n\n @properties.validator(\"scale\")\n def _validate_scale(self, change):\n if change[\"value\"] is not None:\n # todo: residual size? we need to know the expected end shape\n if self._nC_residual != \"*\":\n assert (\n len(change[\"value\"]) == self._nC_residual\n ), \"scale must be length {} not {}\".format(\n self._nC_residual, len(change[\"value\"])\n )\n\n @property\n def stashedR(self):\n return self._stashedR\n\n @stashedR.setter\n def stashedR(self, value):\n self._stashedR = value\n\n\nclass SparseSmall(BaseSparse):\n \"\"\"\n Sparse smallness regularization\n\n **Inputs**\n\n :param int norm: norm on the smallness\n \"\"\"\n\n _multiplier_pair = \"alpha_s\"\n\n def __init__(self, mesh, **kwargs):\n super(SparseSmall, self).__init__(mesh=mesh, **kwargs)\n\n # Give the option to scale or not\n scaledIRLS = properties.Bool(\"Scale the gradients of the IRLS norms\", default=True)\n\n @property\n def f_m(self):\n\n return self.mapping * self._delta_m(self.model)\n\n @property\n def W(self):\n if getattr(self, \"model\", None) is None:\n R = utils.speye(self.mapping.shape[0])\n else:\n r = self.R(self.f_m)\n R = utils.sdiag(r)\n\n if self.scale is None:\n self.scale = np.ones(self.mapping.shape[0])\n\n weights = self.scale * self.regmesh.vol\n\n if self.cell_weights is not None:\n weights *= self.cell_weights\n\n return utils.sdiag((weights ** 0.5)) * R\n\n def R(self, f_m):\n # if R is stashed, return that instead\n if getattr(self, \"stashedR\") is not None:\n return self.stashedR\n\n # Default\n eta = np.ones_like(f_m)\n\n if self.scaledIRLS:\n # Eta scaling is important for mix-norms...do not mess with it\n # Scale on l2-norm gradient: f_m.max()\n maxVal = np.ones_like(f_m) * np.abs(f_m).max()\n\n # Compute theoritical maximum gradients for p < 1\n maxVal[self.norm < 1] = self.epsilon / np.sqrt(\n 1.0 - self.norm[self.norm < 1]\n )\n maxGrad = maxVal / (maxVal ** 2.0 + self.epsilon ** 2.0) ** (\n 1.0 - self.norm / 2.0\n )\n # Scaling factor\n eta[maxGrad != 0] = np.abs(f_m).max() / maxGrad[maxGrad != 0]\n\n # Scaled IRLS weights\n r = (eta / (f_m ** 2.0 + self.epsilon ** 2.0) ** (1.0 - self.norm / 2.0)) ** 0.5\n\n self.stashedR = r # stash on the first calculation\n return r\n\n @utils.timeIt\n def deriv(self, m):\n \"\"\"\n\n The regularization is:\n\n .. math::\n\n R(m) = \\\\frac{1}{2}\\mathbf{(m-m_\\\\text{ref})^\\\\top W^\\\\top\n W(m-m_\\\\text{ref})}\n\n So the derivative is straight forward:\n\n .. math::\n\n R(m) = \\mathbf{W^\\\\top W (m-m_\\\\text{ref})}\n\n \"\"\"\n\n mD = self.mapping.deriv(self._delta_m(m))\n r = self.W * (self.mapping * (self._delta_m(m)))\n return mD.T * (self.W.T * r)\n\n\nclass SparseDeriv(BaseSparse):\n \"\"\"\n Base Class for sparse regularization on first spatial derivatives\n \"\"\"\n\n def __init__(self, mesh, orientation=\"x\", **kwargs):\n self.orientation = orientation\n super(SparseDeriv, self).__init__(mesh=mesh, **kwargs)\n\n mrefInSmooth = properties.Bool(\n \"include mref in the smoothness calculation?\", default=False\n )\n\n # Give the option to scale or not\n scaledIRLS = properties.Bool(\"Scale the gradients of the IRLS norms\", default=True)\n\n @utils.timeIt\n def __call__(self, m):\n \"\"\"\n We use a weighted 2-norm objective function\n\n .. math::\n\n r(m) = \\\\frac{1}{2}\n \"\"\"\n if self.mrefInSmooth:\n\n f_m = self._delta_m(m)\n\n else:\n f_m = m\n if self.scale is None:\n self.scale = np.ones(self.mapping.shape[0])\n\n if self.space == \"spherical\":\n ave_cc_f = getattr(self.regmesh, \"aveCC2F{}\".format(self.orientation))\n\n if getattr(self, \"model\", None) is None:\n R = utils.speye(self.cellDiffStencil.shape[0])\n\n else:\n r = self.R(self.f_m)\n R = utils.sdiag(r)\n\n weights = self.scale * self.regmesh.vol\n\n if self.cell_weights is not None:\n weights *= self.cell_weights\n\n W = utils.sdiag((ave_cc_f * weights ** 0.5)) * R\n\n theta = self.cellDiffStencil * (self.mapping * f_m)\n dm_dx = utils.mat_utils.coterminal(theta)\n r = W * dm_dx\n\n else:\n r = self.W * (self.mapping * f_m)\n\n return 0.5 * r.dot(r)\n\n def R(self, f_m):\n # if R is stashed, return that instead\n if getattr(self, \"stashedR\") is not None:\n return self.stashedR\n\n # Default\n eta = np.ones_like(f_m)\n\n if self.scaledIRLS:\n # Eta scaling is important for mix-norms...do not mess with it\n # Scale on l2-norm gradient: f_m.max()\n maxVal = np.ones_like(f_m) * np.abs(f_m).max()\n\n # Compute theoritical maximum gradients for p < 1\n maxVal[self.norm < 1] = self.epsilon / np.sqrt(\n 1.0 - self.norm[self.norm < 1]\n )\n maxGrad = maxVal / (\n maxVal ** 2.0 + (self.epsilon * self.length_scales) ** 2.0\n ) ** (1.0 - self.norm / 2.0)\n\n # Scaling Factor\n eta[maxGrad != 0] = np.abs(f_m).max() / maxGrad[maxGrad != 0]\n\n # Scaled-IRLS weights\n r = (\n eta\n / (f_m ** 2.0 + (self.epsilon * self.length_scales) ** 2.0)\n ** (1.0 - self.norm / 2.0)\n ) ** 0.5\n self.stashedR = r # stash on the first calculation\n return r\n\n @utils.timeIt\n def deriv(self, m):\n \"\"\"\n\n The regularization is:\n\n .. math::\n\n R(m) = \\\\frac{1}{2}\\mathbf{(m-m_\\\\text{ref})^\\\\top W^\\\\top\n W(m-m_\\\\text{ref})}\n\n So the derivative is straight forward:\n\n .. math::\n\n R(m) = \\mathbf{W^\\\\top W (m-m_\\\\text{ref})}\n\n \"\"\"\n\n if self.mrefInSmooth:\n\n model = self._delta_m(m)\n\n else:\n model = m\n if self.scale is None:\n self.scale = np.ones(self.mapping.shape[0])\n\n if self.space == \"spherical\":\n ave_cc_f = getattr(self.regmesh, \"aveCC2F{}\".format(self.orientation))\n\n if getattr(self, \"model\", None) is None:\n R = utils.speye(self.cellDiffStencil.shape[0])\n\n else:\n r = self.R(self.f_m)\n R = utils.sdiag(r)\n\n weights = self.scale * self.regmesh.vol\n\n if self.cell_weights is not None:\n weights *= self.cell_weights\n\n W = utils.sdiag((ave_cc_f * weights ** 0.5)) * R\n theta = self.cellDiffStencil * (self.mapping * model)\n dm_dx = utils.mat_utils.coterminal(theta)\n r = W * dm_dx\n\n else:\n r = self.W * (self.mapping * model)\n\n mD = self.mapping.deriv(model)\n return mD.T * (self.W.T * r)\n\n @property\n def _multiplier_pair(self):\n return \"alpha_{orientation}\".format(orientation=self.orientation)\n\n @property\n def f_m(self):\n\n if self.mrefInSmooth:\n\n f_m = self._delta_m(self.model)\n\n else:\n f_m = self.model\n\n if self.space == \"spherical\":\n theta = self.cellDiffStencil * (self.mapping * f_m)\n dm_dx = utils.mat_utils.coterminal(theta)\n\n else:\n\n if self.gradientType == \"total\":\n ave_cc_f = getattr(self.regmesh, \"aveCC2F{}\".format(self.orientation))\n\n dm_dx = np.abs(\n self.regmesh.aveFx2CC\n * self.regmesh.cellDiffxStencil\n * (self.mapping * f_m)\n )\n\n if self.regmesh.dim > 1:\n\n dm_dx += np.abs(\n self.regmesh.aveFy2CC\n * self.regmesh.cellDiffyStencil\n * (self.mapping * f_m)\n )\n\n if self.regmesh.dim > 2:\n\n dm_dx += np.abs(\n self.regmesh.aveFz2CC\n * self.regmesh.cellDiffzStencil\n * (self.mapping * f_m)\n )\n\n dm_dx = ave_cc_f * dm_dx\n\n else:\n dm_dx = self.cellDiffStencil * (self.mapping * f_m)\n\n return dm_dx\n\n @property\n def cellDiffStencil(self):\n return utils.sdiag(self.length_scales) * getattr(\n self.regmesh, \"cellDiff{}Stencil\".format(self.orientation)\n )\n\n @property\n def W(self):\n\n ave_cc_f = getattr(self.regmesh, \"aveCC2F{}\".format(self.orientation))\n\n if getattr(self, \"model\", None) is None:\n R = utils.speye(self.cellDiffStencil.shape[0])\n\n else:\n r = self.R(self.f_m)\n R = utils.sdiag(r)\n if self.scale is None:\n self.scale = np.ones(self.mapping.shape[0])\n\n weights = self.scale * self.regmesh.vol\n\n if self.cell_weights is not None:\n weights *= self.cell_weights\n\n return utils.sdiag((ave_cc_f * weights ** 0.5)) * R * self.cellDiffStencil\n\n @property\n def length_scales(self):\n \"\"\"\n Normalized cell based weighting\n\n \"\"\"\n ave_cc_f = getattr(self.regmesh, \"aveCC2F{}\".format(self.orientation))\n\n if getattr(self, \"_length_scales\", None) is None:\n index = \"xyz\".index(self.orientation)\n\n length_scales = ave_cc_f * (\n self.regmesh.Pac.T * self.regmesh.mesh.h_gridded[:, index]\n )\n\n self._length_scales = length_scales.min() / length_scales\n\n return self._length_scales\n\n @length_scales.setter\n def length_scales(self, value):\n self._length_scales = value\n\n\nclass Sparse(BaseComboRegularization):\n \"\"\"\n The regularization is:\n\n .. math::\n\n R(m) = \\\\frac{1}{2}\\mathbf{(m-m_\\\\text{ref})^\\\\top W^\\\\top R^\\\\top R\n W(m-m_\\\\text{ref})}\n\n where the IRLS weight\n\n .. math::\n\n R = \\eta TO FINISH LATER!!!\n\n So the derivative is straight forward:\n\n .. math::\n\n R(m) = \\mathbf{W^\\\\top R^\\\\top R W (m-m_\\\\text{ref})}\n\n The IRLS weights are recomputed after each beta solves.\n It is strongly recommended to do a few Gauss-Newton iterations\n before updating.\n \"\"\"\n\n def __init__(\n self, mesh, alpha_s=1.0, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, **kwargs\n ):\n\n objfcts = [\n SparseSmall(mesh=mesh, **kwargs),\n SparseDeriv(mesh=mesh, orientation=\"x\", **kwargs),\n ]\n\n if mesh.dim > 1:\n objfcts.append(SparseDeriv(mesh=mesh, orientation=\"y\", **kwargs))\n\n if mesh.dim > 2:\n objfcts.append(SparseDeriv(mesh=mesh, orientation=\"z\", **kwargs))\n\n super(Sparse, self).__init__(\n mesh=mesh,\n objfcts=objfcts,\n alpha_s=alpha_s,\n alpha_x=alpha_x,\n alpha_y=alpha_y,\n alpha_z=alpha_z,\n **kwargs\n )\n\n # Utils.setKwargs(self, **kwargs)\n\n # Properties\n norms = properties.Array(\n \"Norms used to create the sparse regularization\",\n default=np.c_[2.0, 2.0, 2.0, 2.0],\n shape={(\"*\", \"*\")},\n )\n\n eps_p = properties.Float(\"Threshold value for the model norm\", required=True)\n\n eps_q = properties.Float(\n \"Threshold value for the model gradient norm\", required=True\n )\n\n model = properties.Array(\"current model\", dtype=float)\n\n space = properties.String(\"type of model\", default=\"linear\")\n\n gradientType = properties.String(\"type of gradient\", default=\"components\")\n\n scales = properties.Array(\n \"General nob for scaling\", default=np.c_[1.0, 1.0, 1.0, 1.0], shape={(\"*\", \"*\")}\n )\n # Give the option to scale or not\n scaledIRLS = properties.Bool(\"Scale the gradients of the IRLS norms\", default=True)\n # Save the l2 result during the IRLS\n l2model = None\n\n @properties.validator(\"norms\")\n def _validate_norms(self, change):\n if change[\"value\"].shape[0] == 1:\n change[\"value\"] = np.kron(\n np.ones((self.regmesh.Pac.shape[1], 1)), change[\"value\"]\n )\n elif change[\"value\"].shape[0] > 1:\n assert change[\"value\"].shape[0] == self.regmesh.Pac.shape[1], (\n \"Vector of norms must be the size\"\n \" of active model parameters ({})\"\n \"The provided vector has length \"\n \"{}\".format(self.regmesh.Pac.shape[0], len(change[\"value\"]))\n )\n\n # Observers\n @properties.observer(\"norms\")\n def _mirror_norms_to_objfcts(self, change):\n\n self.objfcts[0].norm = change[\"value\"][:, 0]\n for i, objfct in enumerate(self.objfcts[1:]):\n ave_cc_f = getattr(objfct.regmesh, \"aveCC2F{}\".format(objfct.orientation))\n objfct.norm = ave_cc_f * change[\"value\"][:, i + 1]\n\n @properties.observer(\"model\")\n def _mirror_model_to_objfcts(self, change):\n for objfct in self.objfcts:\n objfct.model = change[\"value\"]\n\n @properties.observer(\"eps_p\")\n def _mirror_eps_p_to_smallness(self, change):\n for objfct in self.objfcts:\n if isinstance(objfct, SparseSmall):\n objfct.epsilon = change[\"value\"]\n\n @properties.observer(\"eps_q\")\n def _mirror_eps_q_to_derivs(self, change):\n for objfct in self.objfcts:\n if isinstance(objfct, SparseDeriv):\n objfct.epsilon = change[\"value\"]\n\n @properties.observer(\"space\")\n def _mirror_space_to_objfcts(self, change):\n for objfct in self.objfcts:\n objfct.space = change[\"value\"]\n\n @properties.observer(\"gradientType\")\n def _mirror_gradientType_to_objfcts(self, change):\n for objfct in self.objfcts:\n objfct.gradientType = change[\"value\"]\n\n @properties.observer(\"scaledIRLS\")\n def _mirror_scaledIRLS_to_objfcts(self, change):\n for objfct in self.objfcts:\n objfct.scaledIRLS = change[\"value\"]\n\n @properties.validator(\"scales\")\n def _validate_scales(self, change):\n if change[\"value\"].shape[0] == 1:\n change[\"value\"] = np.kron(\n np.ones((self.regmesh.Pac.shape[1], 1)), change[\"value\"]\n )\n elif change[\"value\"].shape[0] > 1:\n assert change[\"value\"].shape[0] == self.regmesh.Pac.shape[1], (\n \"Vector of scales must be the size\"\n \" of active model parameters ({})\"\n \"The provided vector has length \"\n \"{}\".format(self.regmesh.Pac.shape[0], len(change[\"value\"]))\n )\n\n # Observers\n @properties.observer(\"scales\")\n def _mirror_scale_to_objfcts(self, change):\n for i, objfct in enumerate(self.objfcts):\n objfct.scale = change[\"value\"][:, i]\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"numpy.sum",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.genfromtxt",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.random.randn",
"matplotlib.pyplot.subplots_adjust",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
],
[
"numpy.log",
"numpy.abs",
"numpy.linspace",
"numpy.random.seed",
"numpy.ones",
"numpy.random.rand"
],
[
"numpy.abs",
"numpy.ones_like",
"numpy.sqrt",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nicococo/ClusterSvdd | [
"2f61c187a3197c807b239202b72d9c84cb46400c"
] | [
"ClusterSVDD/svdd_primal_sgd.py"
] | [
"__author__ = 'nicococo'\nimport numpy as np\n\nfrom numba import autojit\n\n\nclass SvddPrimalSGD(object):\n \"\"\" Primal subgradient descent solver for the support vector data description (SVDD).\n Author: Nico Goernitz, TU Berlin, 2015\n \"\"\"\n PRECISION = 10**-3 # important: effects the threshold, support vectors and speed!\n nu = 0.95\t # (scalar) the regularization constant > 0\n c = None # (vecor) center of the hypersphere\n radius2 = 0.0 # (scalar) the optimized threshold (rho)\n pobj = 0.0 # (scalar) primal objective after training\n\n def __init__(self, nu):\n self.nu = nu\n print('Creating new primal SVDD with nu={0}.'.format(nu))\n\n @autojit\n def fit(self, X, max_iter=20000, prec=1e-6, rate=0.01):\n if X.shape[1] < 1:\n print('Invalid training data.')\n return -1, -1\n self.c, self.radius2, self.pobj, iter = fit_extern(X, self.nu, max_iter, prec, rate)\n print('Iter={2}: obj={0} T={1}'.format(self.pobj, self.radius2, iter+1))\n return self.c, self.radius2\n\n def get_radius(self):\n return self.radius2\n\n def predict(self, X):\n # X : (dims x samples)\n dist = self.c.T.dot(self.c) - 2.*self.c.T.dot(X) + np.sum(X*X, axis=0)\n return dist - self.radius2\n\n\n@autojit(nopython=True)\ndef fit_extern(X, nu, max_iter, prec, rate):\n \"\"\" Subgradient descent solver for primal SVDD.\n Optimized for 'numba'\n \"\"\"\n (dims, samples) = X.shape\n\n # number of training examples\n reg = 1./(np.float64(samples)*nu)\n\n # center of mass\n c = np.zeros(dims, dtype=np.float64)\n # np.sum(X*X, axis=0)\n sum_XX = np.zeros(samples)\n for s in range(samples):\n foo = 0.0\n for d in range(dims):\n foo += X[d, s]*X[d, s]\n c[d] += X[d, s] / np.float64(samples)\n sum_XX[s] = foo\n # print np.sum(np.abs(c-np.mean(X, axis=1)))\n\n dot_2cX = np.zeros(samples, dtype=np.float64)\n for s in range(samples):\n dot_2cX[s] = 2.0 * np.sum(c*X[:, s])\n dist = np.sum(c*c) - dot_2cX + sum_XX\n\n T = 0.4 * np.max(dist) * (1.0-nu) # starting heuristic T\n # if nu exceeds 1.0, then T^* is always 0 and c can\n # be computed analytically (as center-of-mass, mean)\n if nu >= 1.0:\n return c, 0.0, 0.0, 0\n\n is_converged = False\n best_c = c\n best_radius2 = T\n obj_best = np.float64(1e20)\n\n obj_bak = -100.\n iter = 0\n\n # gradient step for center\n dc = np.zeros(dims, dtype=np.float64)\n inds = np.zeros(samples, dtype=np.int64)\n while not is_converged and iter < max_iter:\n # print iter\n for s in range(samples):\n dot_2cX[s] = 2.0 * np.sum(c*X[:, s])\n\n # calculate the distances of the center to each datapoint\n dist = np.sum(c*c) - dot_2cX + sum_XX\n inds_size = 0\n for s in range(samples):\n if dist[s]-T >= 1e-12:\n inds[inds_size] = s\n inds_size += 1\n # we need at least 1 entry, hence lower T to the maximum entry\n if inds_size == 0:\n inds_size = 1\n inds[0] = np.argmax(dist)\n T = dist[inds[0]]\n\n # real objective value given the current center c and threshold T\n ds = 0.0\n for s in range(inds_size):\n ds += dist[inds[s]] - T\n obj = T + reg*ds\n\n # this is subgradient, hence need to store the best solution so far\n if obj_best >= obj:\n best_c = c\n best_radius2 = T\n obj_best = obj\n\n # stop, if progress is too slow\n if obj > 0.:\n if np.abs((obj-obj_bak)/obj) < prec:\n is_converged = True\n continue\n obj_bak = obj\n\n # stepsize should be not more than 0.1 % of the maximum value encountered in dist\n max_change = rate * np.max(dist) / np.float(iter+1)*10.\n\n # gradient step for threshold\n dT = 1.0 - reg*np.float(inds_size)\n T -= np.sign(dT) * max_change\n\n # gradient step for center\n norm_dc = 0.0\n for d in range(dims):\n dc[d] = 0.0\n for s in range(inds_size):\n dc[d] += 2.*reg*(c[d] - X[d, inds[s]])\n norm_dc += dc[d]*dc[d]\n norm_dc = np.sqrt(norm_dc)\n\n if np.abs(norm_dc) < 1e-12:\n norm_dc = 1.0\n\n for d in range(dims):\n c[d] -= dc[d]/norm_dc * max_change\n iter += 1\n\n return best_c, best_radius2, obj_best, iter\n"
] | [
[
"numpy.sqrt",
"numpy.abs",
"numpy.sign",
"numpy.max",
"numpy.argmax",
"numpy.float64",
"numpy.zeros",
"numpy.sum",
"numpy.float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jhhugo/DeepCTR | [
"12012b06097a4ad69d68e61989b16d2d6f02d741"
] | [
"deepctr/models/ccpm.py"
] | [
"# -*- coding:utf-8 -*-\n\"\"\"\n\nAuthor:\n Weichen Shen,[email protected]\n\nReference:\n [1] Liu Q, Yu F, Wu S, et al. A convolutional click prediction model[C]//Proceedings of the 24th ACM International on Conference on Information and Knowledge Management. ACM, 2015: 1743-1746.\n (http://ir.ia.ac.cn/bitstream/173211/12337/1/A%20Convolutional%20Click%20Prediction%20Model.pdf)\n\n\"\"\"\nimport tensorflow as tf\n\nfrom ..feature_column import build_input_features, get_linear_logit, input_from_feature_columns\nfrom ..layers.core import DNN, PredictionLayer\n\nfrom ..layers.sequence import KMaxPooling\nfrom ..layers.utils import concat_func, add_func\n\n\ndef CCPM(linear_feature_columns, dnn_feature_columns, conv_kernel_width=(6, 5), conv_filters=(4, 4),\n dnn_hidden_units=(256,), l2_reg_linear=1e-5, l2_reg_embedding=1e-5, l2_reg_dnn=0, dnn_dropout=0,\n seed=1024, task='binary'):\n \"\"\"Instantiates the Convolutional Click Prediction Model architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param conv_kernel_width: list,list of positive integer or empty list,the width of filter in each conv layer.\n :param conv_filters: list,list of positive integer or empty list,the number of filters in each conv layer.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN.\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param init_std: float,to use as the initialize std of embedding vector\n :param task: str, ``\"binary\"`` for binary logloss or ``\"regression\"`` for regression loss\n :return: A Keras model instance.\n \"\"\"\n\n if len(conv_kernel_width) != len(conv_filters):\n raise ValueError(\n \"conv_kernel_width must have same element with conv_filters\")\n\n features = build_input_features(\n linear_feature_columns + dnn_feature_columns)\n inputs_list = list(features.values())\n\n linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed,\n l2_reg=l2_reg_linear)\n\n sparse_embedding_list, _ = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding,\n seed, support_dense=False)\n\n n = len(sparse_embedding_list)\n l = len(conv_filters)\n\n conv_input = concat_func(sparse_embedding_list, axis=1)\n pooling_result = tf.keras.layers.Lambda(\n lambda x: tf.expand_dims(x, axis=3))(conv_input)\n\n for i in range(1, l + 1):\n filters = conv_filters[i - 1]\n width = conv_kernel_width[i - 1]\n k = max(1, int((1 - pow(i / l, l - i)) * n)) if i < l else 3\n\n conv_result = tf.keras.layers.Conv2D(filters=filters, kernel_size=(width, 1), strides=(1, 1), padding='same',\n activation='tanh', use_bias=True, )(pooling_result)\n pooling_result = KMaxPooling(\n k=min(k, int(conv_result.shape[1])), axis=1)(conv_result)\n\n flatten_result = tf.keras.layers.Flatten()(pooling_result)\n dnn_out = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout)(flatten_result)\n dnn_logit = tf.keras.layers.Dense(1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_out)\n\n final_logit = add_func([dnn_logit, linear_logit])\n\n output = PredictionLayer(task)(final_logit)\n model = tf.keras.models.Model(inputs=inputs_list, outputs=output)\n return model\n"
] | [
[
"tensorflow.keras.initializers.glorot_normal",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.expand_dims",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
UoA-eResearch/dynamic_network_graph | [
"350a22a40dd7425eb08a688651df13af8826ea52"
] | [
"stress_test.py"
] | [
"#!/usr/bin/env python3\n\nimport asyncio\nimport websockets\nimport json\nimport random\nimport time\nimport numpy as np\n\nURI = \"wss://api-proxy.auckland-cer.cloud.edu.au/dynamic_network_graph\"\n#URI = \"ws://api-proxy.auckland-cer.cloud.edu.au:6789\"\n#URI = \"ws://localhost:6789\"\nSESSION_ID = \"STRESS_TEST\"\nconnections = []\n\nasync def read_all(websocket):\n try:\n while True:\n await asyncio.wait_for(websocket.recv(), 0)\n except:\n return\n\nasync def test():\n start = time.time()\n websocket = await websockets.connect(URI)\n connections.append(websocket)\n await websocket.send(json.dumps({\n \"action\": \"connect\",\n \"session_id\": SESSION_ID\n }))\n await websocket.send(json.dumps({\n \"session_id\": SESSION_ID,\n \"action\": \"upsert_entry\",\n \"entry\": {\n \"id\": random.randint(0, 100),\n \"donor\": random.randint(0, 100),\n \"resourceType\": \"$\",\n \"recipient\": random.randint(0, 100)\n }\n }))\n return time.time() - start\n\nasync def run_n_tests(n):\n results = await asyncio.gather(*[test() for i in range(n)])\n return results\n\nasync def main():\n print(\"n_clients,t,wall_time\")\n start = time.time()\n for i in range(100):\n result = await run_n_tests(15)\n result = np.mean(result)\n print(f\"{len(connections)},{result},{time.time() - start}\")\n for ws in connections:\n await read_all(ws)\n\nasyncio.get_event_loop().run_until_complete(main())"
] | [
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pfinashx/openvino | [
"1d417e888b508415510fb0a92e4a9264cf8bdef7",
"1d417e888b508415510fb0a92e4a9264cf8bdef7",
"ac2e639ff8f9a607c3c682a4c4e165c238eb817f",
"1d417e888b508415510fb0a92e4a9264cf8bdef7",
"1d417e888b508415510fb0a92e4a9264cf8bdef7",
"1d417e888b508415510fb0a92e4a9264cf8bdef7"
] | [
"tests/layer_tests/onnx_tests/test_mean_variance_normalization.py",
"tests/layer_tests/onnx_tests/test_image_scaler.py",
"src/bindings/python/src/openvino/runtime/utils/input_validation.py",
"tests/layer_tests/onnx_tests/test_identity.py",
"tests/layer_tests/onnx_tests/test_unsqueeze.py",
"tests/layer_tests/tensorflow_tests/test_tf_BiasAdd.py"
] | [
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport pytest\n\nfrom common.onnx_layer_test_class import OnnxRuntimeLayerTest\n\n\nclass TestMeanVarianceNormalization(OnnxRuntimeLayerTest):\n def _prepare_input(self, inputs_dict):\n for input in inputs_dict.keys():\n inputs_dict[input] = np.random.randn(*inputs_dict[input]).astype(np.float32)\n return inputs_dict\n\n def create_net(self, shape, axes, ir_version):\n \"\"\"\n ONNX net IR net\n\n Input->MeanVarianceNormalization->Output => Input->MVN\n \"\"\"\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)\n\n node_def = onnx.helper.make_node(\n 'MeanVarianceNormalization',\n inputs=['input'],\n outputs=['output'],\n axes=axes\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_def],\n 'test_model',\n [input],\n [output]\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_model')\n\n #\n # Create reference IR net\n # Please, specify 'type': 'Input' for input node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net\n\n test_data = [\n dict(shape=[7, 2, 3, 5], axes=[2, 3]),\n dict(shape=[7, 2, 3, 5], axes=[1, 2, 3]),\n dict(shape=[7, 2, 3, 5, 11], axes=[2, 3, 4]),\n dict(shape=[7, 2, 3, 5, 11], axes=[1, 2, 3, 4])\n ]\n\n @pytest.mark.parametrize(\"params\", test_data)\n @pytest.mark.nightly\n def test_mvn(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_net(**params, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n",
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport pytest\n\nfrom common.onnx_layer_test_class import Caffe2OnnxLayerTest\n\n\nclass TestImageScaler(Caffe2OnnxLayerTest):\n def create_net(self, shape, scale, ir_version):\n \"\"\"\n ONNX net IR net\n\n Input->ImageScaler->Output => Input->ScaleShift(Power)\n\n \"\"\"\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)\n\n bias = np.random.randint(-10, 10, shape[1]).astype(np.float)\n\n node_def = onnx.helper.make_node(\n 'ImageScaler',\n inputs=['input'],\n outputs=['output'],\n bias=bias,\n scale=scale\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_def],\n 'test_model',\n [input],\n [output],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_model')\n\n #\n # Create reference IR net\n #\n\n ref_net = None\n\n return onnx_net, ref_net\n\n def create_net_const(self, shape, scale, precision, ir_version):\n \"\"\"\n ONNX net IR net\n\n Input->Concat(+scaled const)->Output => Input->Concat(+const)\n\n \"\"\"\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n\n concat_axis = 0\n output_shape = shape.copy()\n output_shape[concat_axis] *= 2\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)\n\n constant = np.random.randint(-127, 127, shape).astype(np.float)\n bias = np.random.randint(-10, 10, shape[1]).astype(np.float)\n\n node_const_def = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['const1'],\n value=helper.make_tensor(\n name='const_tensor',\n data_type=TensorProto.FLOAT,\n dims=constant.shape,\n vals=constant.flatten(),\n ),\n )\n\n node_def = onnx.helper.make_node(\n 'ImageScaler',\n inputs=['const1'],\n outputs=['scale'],\n bias=bias,\n scale=scale\n )\n\n node_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=['input', 'scale'],\n outputs=['output'],\n axis=concat_axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_const_def, node_def, node_concat_def],\n 'test_model',\n [input],\n [output],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_model')\n\n #\n # Create reference IR net\n #\n ir_const = constant * scale + np.expand_dims(np.expand_dims([bias], 2), 3)\n if precision == 'FP16':\n ir_const = ir_const.astype(np.float16)\n\n ref_net = None\n\n return onnx_net, ref_net\n\n test_data_precommit = [dict(shape=[2, 4, 6, 8], scale=4.5),\n dict(shape=[1, 1, 10, 12], scale=0.5)]\n\n test_data = [dict(shape=[1, 1, 10, 12], scale=0.5),\n dict(shape=[1, 3, 10, 12], scale=1.5),\n dict(shape=[6, 8, 10, 12], scale=4.5)]\n\n @pytest.mark.parametrize(\"params\", test_data_precommit)\n def test_image_scaler_precommit(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,\n temp_dir=temp_dir)\n\n @pytest.mark.parametrize(\"params\", test_data)\n @pytest.mark.nightly\n def test_image_scaler(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,\n temp_dir=temp_dir)\n\n @pytest.mark.parametrize(\"params\", test_data_precommit)\n def test_image_scaler_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n\n @pytest.mark.parametrize(\"params\", test_data)\n @pytest.mark.nightly\n def test_image_scaler_const(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n",
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Helper functions for validating user input.\"\"\"\n\nimport logging\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type\n\nimport numpy as np\n\nfrom openvino.runtime.exceptions import UserInputError\n\nlog = logging.getLogger(__name__)\n\n\ndef assert_list_of_ints(value_list: Iterable[int], message: str) -> None:\n \"\"\"Verify that the provided value is an iterable of integers.\"\"\"\n try:\n for value in value_list:\n if not isinstance(value, int):\n raise TypeError\n except TypeError:\n log.warning(message)\n raise UserInputError(message, value_list)\n\n\ndef _check_value(op_name, attr_key, value, val_type, cond=None):\n # type: (str, str, Any, Type, Optional[Callable[[Any], bool]]) -> bool\n \"\"\"Check whether provided value satisfies specified criteria.\n\n @param op_name: The operator name which attributes are checked.\n @param attr_key: The attribute name.\n @param value: The value to check.\n @param val_type: Required value type.\n @param cond: The optional function running additional checks.\n\n :raises UserInputError:\n @return True if attribute satisfies all criterias. Otherwise False.\n \"\"\"\n if not np.issubdtype(type(value), val_type):\n raise UserInputError(\n '{} operator attribute \"{}\" value must by of type {}.'.format(\n op_name, attr_key, val_type\n )\n )\n if cond is not None and not cond(value):\n raise UserInputError(\n '{} operator attribute \"{}\" value does not satisfy provided condition.'.format(\n op_name, attr_key\n )\n )\n return True\n\n\ndef check_valid_attribute(op_name, attr_dict, attr_key, val_type, cond=None, required=False):\n # type: (str, dict, str, Type, Optional[Callable[[Any], bool]], Optional[bool]) -> bool\n \"\"\"Check whether specified attribute satisfies given criteria.\n\n @param op_name: The operator name which attributes are checked.\n @param attr_dict: Dictionary containing key-value attributes to check.\n @param attr_key: Key value for validated attribute.\n @param val_type: Value type for validated attribute.\n @param cond: Any callable wich accept attribute value and returns True or False.\n @param required: Whether provided attribute key is not required. This mean it may be missing\n from provided dictionary.\n\n :raises UserInputError:\n\n @return True if attribute satisfies all criterias. Otherwise False.\n \"\"\"\n result = True\n\n if required and attr_key not in attr_dict:\n raise UserInputError(\n 'Provided dictionary is missing {} operator required attribute \"{}\"'.format(\n op_name, attr_key\n )\n )\n\n if attr_key not in attr_dict:\n return result\n\n attr_value = attr_dict[attr_key]\n\n if np.isscalar(attr_value):\n result = result and _check_value(op_name, attr_key, attr_value, val_type, cond)\n else:\n for v in attr_value:\n result = result and _check_value(op_name, attr_key, v, val_type, cond)\n\n return result\n\n\ndef check_valid_attributes(\n op_name, # type: str\n attributes, # type: Dict[str, Any]\n requirements, # type: List[Tuple[str, bool, Type, Optional[Callable]]]\n):\n # type: (...) -> bool\n \"\"\"Perform attributes validation according to specified type, value criteria.\n\n @param op_name: The operator name which attributes are checked.\n @param attributes: The dictionary with user provided attributes to check.\n @param requirements: The list of tuples describing attributes' requirements. The tuple should\n contain following values:\n (attr_name: str,\n is_required: bool,\n value_type: Type,\n value_condition: Callable)\n\n :raises UserInputError:\n @return True if all attributes satisfies criterias. Otherwise False.\n \"\"\"\n for attr, required, val_type, cond in requirements:\n check_valid_attribute(op_name, attributes, attr, val_type, cond, required)\n return True\n\n\ndef is_positive_value(x): # type: (Any) -> bool\n \"\"\"Determine whether the specified x is positive value.\n\n @param x: The value to check.\n\n @return True if the specified x is positive value, False otherwise.\n \"\"\"\n return x > 0\n\n\ndef is_non_negative_value(x): # type: (Any) -> bool\n \"\"\"Determine whether the specified x is non-negative value.\n\n @param x: The value to check.\n\n @return True if the specified x is non-negative value, False otherwise.\n \"\"\"\n return x >= 0\n",
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport pytest\n\nfrom common.layer_test_class import check_ir_version\nfrom common.onnx_layer_test_class import Caffe2OnnxLayerTest\nfrom unit_tests.utils.graph import build_graph\n\n\nclass TestIdentity(Caffe2OnnxLayerTest):\n def create_net(self, shape, ir_version):\n \"\"\"\n ONNX net IR net\n\n Input->Identity->Sigmoid->Output => Input->sigmoid\n\n \"\"\"\n\n #\n # Create ONNX model\n #\n\n from onnx import helper\n from onnx import TensorProto\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)\n\n node_def = helper.make_node(\n 'Identity',\n inputs=['input'],\n outputs=['identity']\n )\n\n sigmoid_def = helper.make_node(\n 'Sigmoid',\n inputs=['identity'],\n outputs=['output']\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_def, sigmoid_def],\n 'test_model',\n [input],\n [output],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_model')\n\n #\n # Create reference IR net\n #\n\n ref_net = None\n\n if check_ir_version(10, None, ir_version):\n nodes_attributes = {\n 'input': {'kind': 'op', 'type': 'Parameter'},\n 'input_data': {'shape': shape, 'kind': 'data'},\n 'sigmoid': {'kind': 'op', 'type': 'Sigmoid'},\n 'sigmoid_data': {'shape': shape, 'kind': 'data'},\n 'result': {'kind': 'op', 'type': 'Result'}\n }\n ref_net = build_graph(nodes_attributes,\n [('input', 'input_data'),\n ('input_data', 'sigmoid'),\n ('sigmoid', 'sigmoid_data'),\n ('sigmoid_data', 'result')\n ])\n\n return onnx_net, ref_net\n\n def create_net_const(self, shape, precision, ir_version):\n \"\"\"\n ONNX net IR net\n\n Input->Concat(+identity on const)->Output => Input->Concat(+const)\n\n \"\"\"\n\n #\n # Create ONNX model\n #\n\n from onnx import helper\n from onnx import TensorProto\n\n constant = np.random.randint(-127, 127, shape).astype(np.float)\n\n concat_axis = 0\n output_shape = shape.copy()\n output_shape[concat_axis] *= 2\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)\n\n node_const_def = helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['const1'],\n value=helper.make_tensor(\n name='const_tensor',\n data_type=TensorProto.FLOAT,\n dims=constant.shape,\n vals=constant.flatten(),\n ),\n )\n\n node_def = helper.make_node(\n 'Identity',\n inputs=['const1'],\n outputs=['identity']\n )\n\n node_concat_def = helper.make_node(\n 'Concat',\n inputs=['input', 'identity'],\n outputs=['output'],\n axis=concat_axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_const_def, node_def, node_concat_def],\n 'test_model',\n [input],\n [output],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_model')\n\n #\n # Create reference IR net\n #\n\n ref_net = None\n\n if check_ir_version(10, None, ir_version):\n\n nodes_attributes = {\n 'input': {'kind': 'op', 'type': 'Parameter'},\n 'input_data': {'shape': shape, 'kind': 'data'},\n 'input_const_data': {'kind': 'data', 'value': constant.flatten()},\n 'const': {'kind': 'op', 'type': 'Const'},\n 'const_data': {'shape': shape, 'kind': 'data'},\n 'concat': {'kind': 'op', 'type': 'Concat', 'axis': concat_axis},\n 'concat_data': {'shape': output_shape, 'kind': 'data'},\n 'result': {'kind': 'op', 'type': 'Result'}\n }\n\n ref_net = build_graph(nodes_attributes,\n [('input', 'input_data'),\n ('input_const_data', 'const'),\n ('const', 'const_data'),\n ('input_data', 'concat'),\n ('const_data', 'concat'),\n ('concat', 'concat_data'),\n ('concat_data', 'result')\n ])\n\n return onnx_net, ref_net\n\n test_data = [dict(shape=[10, 12]),\n dict(shape=[8, 10, 12]),\n dict(shape=[6, 8, 10, 12]),\n dict(shape=[4, 6, 8, 10, 12])]\n\n @pytest.mark.parametrize(\"params\", test_data)\n @pytest.mark.nightly\n def test_identity(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,\n temp_dir=temp_dir)\n\n @pytest.mark.parametrize(\"params\", test_data)\n @pytest.mark.nightly\n def test_identity_const(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n",
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport pytest\n\nfrom common.onnx_layer_test_class import Caffe2OnnxLayerTest\n\n\nclass TestUnsqueeze(Caffe2OnnxLayerTest):\n def create_unsqueeze_net(self, axes, input_shape, output_shape, ir_version):\n \"\"\"\n ONNX net IR net\n\n Input->Unsqueeze(axes=0)->Output => Input->Reshape\n\n \"\"\"\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)\n\n node_squeeze_def = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['input'],\n outputs=['output'],\n axes=axes\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_squeeze_def],\n 'test_squeeze_model',\n [input],\n [output],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_squeeze_model')\n\n #\n # Create reference IR net\n # Please, specify 'type': 'Input' for input node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net\n\n def create_unsqueeze_net_const(self, axes, input_shape, output_shape, ir_version):\n \"\"\"\n ONNX net IR net\n\n Input->Concat(+unsqueezed const)->Output => Input->Concat(+const)\n\n \"\"\"\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n import numpy as np\n\n concat_axis = 1\n concat_output_shape = output_shape.copy()\n concat_output_shape[concat_axis] *= 2\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, output_shape)\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, concat_output_shape)\n\n const_number = np.prod(input_shape)\n constant = np.random.randint(-127, 127, const_number).astype(np.float)\n constant = np.reshape(constant, input_shape)\n\n node_const_def = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['const1'],\n value=helper.make_tensor(\n name='const_tensor',\n data_type=TensorProto.FLOAT,\n dims=constant.shape,\n vals=constant.flatten(),\n ),\n )\n\n node_squeeze_def = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['const1'],\n outputs=['unsqueeze1'],\n axes=axes\n )\n\n node_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=['input', 'unsqueeze1'],\n outputs=['output'],\n axis=concat_axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_const_def, node_squeeze_def, node_concat_def],\n 'test_unsqueeze_model',\n [input],\n [output],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_unsqueeze_model')\n\n #\n # Create reference IR net\n # Please, specify 'type': 'Input' for input node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n ref_net = None\n\n return onnx_net, ref_net\n\n test_data_5D = [\n dict(axes=[0], input_shape=[2, 3, 10, 10], output_shape=[1, 2, 3, 10, 10]),\n dict(axes=[1], input_shape=[2, 3, 10, 10], output_shape=[2, 1, 3, 10, 10]),\n dict(axes=[2], input_shape=[2, 3, 10, 10], output_shape=[2, 3, 1, 10, 10]),\n dict(axes=[3], input_shape=[2, 3, 10, 10], output_shape=[2, 3, 10, 1, 10]),\n dict(axes=[4], input_shape=[2, 3, 10, 10], output_shape=[2, 3, 10, 10, 1]),\n dict(axes=[0, 1], input_shape=[3, 10, 10], output_shape=[1, 1, 3, 10, 10]),\n dict(axes=[0, 2], input_shape=[3, 10, 10], output_shape=[1, 3, 1, 10, 10]),\n dict(axes=[0, 3], input_shape=[3, 10, 10], output_shape=[1, 3, 10, 1, 10]),\n dict(axes=[0, 4], input_shape=[3, 10, 10], output_shape=[1, 3, 10, 10, 1]),\n dict(axes=[1, 2], input_shape=[3, 10, 10], output_shape=[3, 1, 1, 10, 10]),\n dict(axes=[1, 3], input_shape=[3, 10, 10], output_shape=[3, 1, 10, 1, 10]),\n dict(axes=[1, 4], input_shape=[3, 10, 10], output_shape=[3, 1, 10, 10, 1]),\n dict(axes=[2, 3], input_shape=[3, 10, 10], output_shape=[3, 10, 1, 1, 10]),\n dict(axes=[2, 4], input_shape=[3, 10, 10], output_shape=[3, 10, 1, 10, 1]),\n dict(axes=[3, 4], input_shape=[3, 10, 10], output_shape=[3, 10, 10, 1, 1]),\n dict(axes=[0, 1, 2], input_shape=[10, 10], output_shape=[1, 1, 1, 10, 10]),\n dict(axes=[0, 1, 3], input_shape=[10, 10], output_shape=[1, 1, 10, 1, 10]),\n dict(axes=[0, 1, 4], input_shape=[10, 10], output_shape=[1, 1, 10, 10, 1]),\n dict(axes=[0, 2, 3], input_shape=[10, 10], output_shape=[1, 10, 1, 1, 10]),\n dict(axes=[0, 2, 4], input_shape=[10, 10], output_shape=[1, 10, 1, 10, 1]),\n dict(axes=[0, 3, 4], input_shape=[10, 10], output_shape=[1, 10, 10, 1, 1]),\n dict(axes=[1, 2, 3], input_shape=[10, 10], output_shape=[10, 1, 1, 1, 10]),\n dict(axes=[1, 2, 4], input_shape=[10, 10], output_shape=[10, 1, 1, 10, 1]),\n dict(axes=[1, 3, 4], input_shape=[10, 10], output_shape=[10, 1, 10, 1, 1]),\n dict(axes=[2, 3, 4], input_shape=[10, 10], output_shape=[10, 10, 1, 1, 1])]\n\n test_data_4D = [\n dict(axes=[0], input_shape=[3, 10, 10], output_shape=[1, 3, 10, 10]),\n dict(axes=[1], input_shape=[3, 10, 10], output_shape=[3, 1, 10, 10]),\n dict(axes=[2], input_shape=[3, 10, 10], output_shape=[3, 10, 1, 10]),\n dict(axes=[3], input_shape=[3, 10, 10], output_shape=[3, 10, 10, 1]),\n dict(axes=[3], input_shape=[3, 10, 10], output_shape=[3, 10, 10, 1]),\n dict(axes=[0, 1], input_shape=[10, 10], output_shape=[1, 1, 10, 10]),\n dict(axes=[0, 2], input_shape=[10, 10], output_shape=[1, 10, 1, 10]),\n dict(axes=[0, 3], input_shape=[10, 10], output_shape=[1, 10, 10, 1]),\n dict(axes=[1, 2], input_shape=[10, 10], output_shape=[10, 1, 1, 10]),\n dict(axes=[1, 3], input_shape=[10, 10], output_shape=[10, 1, 10, 1]),\n dict(axes=[2, 3], input_shape=[10, 10], output_shape=[10, 10, 1, 1])]\n\n test_data_3D = [\n dict(axes=[0], input_shape=[10, 10], output_shape=[1, 10, 10]),\n dict(axes=[1], input_shape=[10, 10], output_shape=[10, 1, 10]),\n dict(axes=[2], input_shape=[10, 10], output_shape=[10, 10, 1])]\n\n @pytest.mark.parametrize(\"params\", test_data_5D)\n @pytest.mark.nightly\n def test_unsqueeze_5D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version,\n temp_dir=temp_dir)\n\n @pytest.mark.parametrize(\"params\", test_data_4D)\n @pytest.mark.nightly\n def test_unsqueeze_4D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version,\n temp_dir=temp_dir)\n\n @pytest.mark.parametrize(\"params\", test_data_3D)\n @pytest.mark.nightly\n def test_unsqueeze_3D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version,\n temp_dir=temp_dir)\n\n @pytest.mark.parametrize(\"params\", test_data_5D)\n @pytest.mark.nightly\n def test_unsqueeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,\n temp_dir=temp_dir)\n\n @pytest.mark.parametrize(\"params\", test_data_4D)\n @pytest.mark.nightly\n def test_unsqueeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,\n temp_dir=temp_dir)\n\n @pytest.mark.parametrize(\"params\", test_data_3D)\n @pytest.mark.nightly\n def test_unsqueeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,\n temp_dir=temp_dir)\n",
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport pytest\n\nfrom common.tf_layer_test_class import CommonTFLayerTest\nfrom common.utils.tf_utils import permute_nchw_to_nhwc\n\n\nclass TestBiasAdd(CommonTFLayerTest):\n def create_bias_add_placeholder_const_net(self, shape, ir_version, use_new_frontend):\n \"\"\"\n Tensorflow net IR net\n\n Placeholder->BiasAdd => Placeholder->Power or ScaleShift\n / /\n Const-------/ Const-------/\n\n \"\"\"\n\n #\n # Create Tensorflow model\n #\n\n import tensorflow as tf\n import numpy as np\n\n tf.compat.v1.reset_default_graph()\n\n # Create the graph and model\n with tf.compat.v1.Session() as sess:\n tf_x_shape = shape.copy()\n\n tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)\n tf_y_shape = tf_x_shape[-1:]\n\n x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')\n constant_value = np.random.randint(0, 1, tf_y_shape).astype(np.float32)\n if (constant_value == 0).all():\n # Avoid elimination of the layer from IR\n constant_value = constant_value + 1\n y = tf.constant(constant_value)\n\n tf.nn.bias_add(x, y, name=\"Operation\")\n\n tf.compat.v1.global_variables_initializer()\n tf_net = sess.graph_def\n\n #\n # Create reference IR net\n # Please, specify 'type': 'Input' for input node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return tf_net, ref_net\n\n def create_bias_add_2_consts_net(self, shape, ir_version, use_new_frontend):\n \"\"\"\n Tensorflow net IR net\n\n Const->BiasAdd-->Concat => Const---->Concat\n / / /\n Const--/ / Placeholder-/\n /\n Placeholder---/\n\n \"\"\"\n\n #\n # Create Tensorflow model\n #\n\n import tensorflow as tf\n import numpy as np\n\n tf.compat.v1.reset_default_graph()\n\n tf_concat_axis = -1\n\n # Create the graph and model\n with tf.compat.v1.Session() as sess:\n tf_x_shape = shape.copy()\n\n tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)\n tf_y_shape = tf_x_shape[-1:]\n\n constant_value_x = np.random.randint(-256, 256, tf_x_shape).astype(np.float32)\n x = tf.constant(constant_value_x)\n constant_value_y = np.random.randint(-256, 256, tf_y_shape).astype(np.float32)\n y = tf.constant(constant_value_y)\n\n add = tf.nn.bias_add(x, y, name=\"Operation\")\n\n placeholder = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def\n\n concat = tf.concat([placeholder, add], axis=tf_concat_axis, name='Operation')\n\n tf.compat.v1.global_variables_initializer()\n tf_net = sess.graph_def\n\n #\n # Create reference IR net\n # Please, specify 'type': 'Input' for input node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return tf_net, ref_net\n\n test_data_2D = [\n dict(shape=[1, 1]),\n dict(shape=[1, 224])\n ]\n\n @pytest.mark.parametrize(\"params\", test_data_2D)\n @pytest.mark.nightly\n def test_bias_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):\n self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),\n ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)\n\n @pytest.mark.parametrize(\"params\", test_data_2D)\n @pytest.mark.nightly\n def test_bias_add_2_consts_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):\n self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),\n ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)\n\n test_data_3D = [\n pytest.param(dict(shape=[1, 1, 224]), marks=pytest.mark.xfail(reason=\"*-19053\")),\n pytest.param(dict(shape=[1, 3, 224]), marks=pytest.mark.xfail(reason=\"*-19053\"))\n ]\n\n @pytest.mark.parametrize(\"params\", test_data_3D)\n @pytest.mark.nightly\n def test_bias_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):\n self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),\n ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)\n\n @pytest.mark.parametrize(\"params\", test_data_3D)\n @pytest.mark.nightly\n def test_bias_add_2_consts_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):\n self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),\n ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)\n\n test_data_4D = [\n dict(shape=[1, 1, 100, 224]),\n dict(shape=[1, 3, 100, 224])\n ]\n\n @pytest.mark.parametrize(\"params\", test_data_4D)\n @pytest.mark.nightly\n @pytest.mark.precommit\n def test_bias_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):\n self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),\n ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)\n\n @pytest.mark.parametrize(\"params\", test_data_4D)\n @pytest.mark.nightly\n def test_bias_add_2_consts_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):\n self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),\n ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)\n\n test_data_5D = [\n dict(shape=[1, 1, 50, 100, 224]),\n dict(shape=[1, 3, 220, 222, 224])\n ]\n\n @pytest.mark.parametrize(\"params\", test_data_5D)\n @pytest.mark.nightly\n @pytest.mark.precommit\n def test_bias_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):\n self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),\n ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)\n\n @pytest.mark.parametrize(\"params\", test_data_5D)\n @pytest.mark.nightly\n def test_bias_add_2_consts_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):\n self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),\n ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)\n"
] | [
[
"numpy.random.randn"
],
[
"numpy.expand_dims",
"numpy.random.randint"
],
[
"numpy.isscalar"
],
[
"numpy.random.randint"
],
[
"numpy.reshape",
"numpy.prod",
"numpy.random.randint"
],
[
"tensorflow.nn.bias_add",
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.reset_default_graph",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
renyixiang/xmind_to_testcase | [
"25f3a5377e67138fc6707c0a14dcf6ed8501c845"
] | [
"webtool/tow_csvfile_compare.py"
] | [
"# _*_ coding:utf-8 _*_\n\n'''\ncsv文件的合并和去重\n主要是针对测试用例增加使用此脚本\n'''\nimport pandas as pd\nimport glob\n#输出文件\noutputfile = '/Users/huaan720/Downloads/百度网盘/xmind2testcase-master/docs/case_csvfile/new.csv'\n#合并csv的文件夹\ncsv_list = glob.glob('/Users/huaan720/Downloads/百度网盘/xmind2testcase-master/docs/case_csvfile/*.csv')\nprint(u'共发现%s个CSV文件' % len(csv_list))\nprint(u'正在处理............')\n\ndef hebing():\n for inputfile in csv_list:\n f = open(inputfile,encoding='gbk')\n data = pd.read_csv(f)\n data.to_csv(outputfile, mode='a', index=False, header=None)\n print('完成合并')\n\ndef quchong(file):\n df = pd.read_csv(file, header=0)\n datalist = df.drop_duplicates()\n datalist.to_csv(file)\n print('完成去重')\n\nif __name__ == '__main__':\n hebing()\n quchong(outputfile)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mchapman87501/mars_perseverance_images | [
"9d138ffba25fcb039051cda724e15e994153d90c"
] | [
"tools/band_finder/src/band_finder/image_matcher.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nimage_matcher adjusts image contrast based on two image samples.\nCopyright 2021, Mitch Chapman All rights reserved\n\"\"\"\n\nimport numpy as np\n\n\nclass ChannelAdjuster:\n def __init__(self, src_sample, target_sample, channel, vmin, vmax):\n src = src_sample.astype(np.float64)\n targ = target_sample.astype(np.float64)\n\n src_values = src[:, :, channel].flatten()\n targ_values = targ[:, :, channel].flatten()\n\n samples = dict()\n for s, t in zip(src_values, targ_values):\n samples.setdefault(s, []).append(t)\n\n # Default the left and right edges to the channel extreme values.\n value_map = {\n vmin: vmin,\n vmax: vmax\n }\n for s, tvals in samples.items():\n value_map[s] = np.mean(tvals)\n\n ordered_src = sorted(value_map.keys())\n ordered_targ = [value_map[src] for src in ordered_src]\n\n self._osrc = ordered_src\n self._otarg = ordered_targ\n self._channel = channel\n\n def adjust(self, image_data):\n values = image_data[:, :, self._channel]\n new_values = np.interp(values, self._osrc, self._otarg)\n image_data[:, :, self._channel] = new_values\n\n\nclass ImageMatcher:\n \"\"\"\n ImageMatcher tries to make a source image match\n the appearance of a target image.\n\n It uses samples of the source and target image, that presumably depict the\n same scene, to characterize the mapping from source to target.\n It does this poorly, by considering image color components separately.\n \"\"\"\n\n def __init__(self, src_sample, target_sample):\n \"\"\"Create an instance.\n src_sample and target_sample are numpy image_data.\n Both show the same scene, but with potentially different colors -\n intensity, saturation, etc.\n\n Args:\n src_sample (array): A numpy image\n target_sample (array): A numpy image, depicting\n the same scene as src_sample but with\n possibly different color ranges\n \"\"\"\n src = src_sample.astype(np.float64)\n targ = target_sample.astype(np.float64)\n\n # Assume Lab channels.\n # TODO let caller specify this, perhaps via a class method.\n # This same information is encoded in find_pano.\n chan_info = [\n [0, 0.0, 100.0],\n [1, -127.0, 128.0],\n [2, -128.0, 127.0]\n ]\n self._adjusters = [\n ChannelAdjuster(src, targ, channel, vmin, vmax)\n for channel, vmin, vmax in chan_info\n ]\n\n def adjusted(self, src_image):\n \"\"\"Get a copy of a source image, adjusted to\n match self's target_sample.\n\n Note: the result's value data may not be bounded to, e.g., 0..255.0\n\n Args:\n image (array): numpy image array\n\n Returns:\n array: the adjusted image array\n \"\"\"\n result = src_image.copy()\n for adjuster in self._adjusters:\n adjuster.adjust(result)\n return result\n"
] | [
[
"numpy.mean",
"numpy.interp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NeversayEverLin/PyOCT | [
"d2c221142ebc3c13050ad26ea09ad9d031ddab31"
] | [
"PyOCT/misc.py"
] | [
"import os\nfrom h5py._hl.files import File \nimport numpy as np \nimport xml.etree.ElementTree as ET\nimport time\nfrom scipy.linalg import dft\nimport numpy.matlib \nimport matplotlib.pyplot as plt \nimport matplotlib \nfrom PyOCT import CAO \nimport re \nimport h5py\nfrom scipy.linalg.misc import norm \nfrom scipy.signal import fftconvolve\nimport matplotlib.patches as patches\nimport cv2 \nimport pickle\nfrom scipy import ndimage\nimport scipy.stats\nimport matplotlib.colors\nfrom matplotlib import cm \nimport math \ndef find_all_dataset(root_dir,saveFolder, saveOption='in'):\n \"\"\"\n Looking for all datasets under root_dir and create a saveFolder under root_dir for data save. \n : root_dir: root directory of all data files\n : saveFolder: name of folder where the data should be saved \n Return:\n : NumOfFile: total of raw data files \n : RawDataFileID: sorted raw data file ID\n : SettingsFileID: sorted settings file ID of corresponding raw data file \n : BkgndFileID: background data file \n : save_path: the path to save data \n : saveOption: 'in' or 'out', indicating save the processed files into current root directory with folder name as saveFolder ('in')\n : or save the processed files into an independent directory with saveFolder as a full directory path. \n \"\"\"\n if saveOption.lower() == 'in':\n save_path = os.path.join(root_dir,saveFolder) \n elif saveOption.lower() == 'out':\n save_path = saveFolder \n if not os.path.exists(save_path):\n os.mkdir(save_path)\n subfolders = os.listdir(root_dir)\n SettingsFileID = [] \n RawDataFileID = []\n BkgndFileID = []\n for item in subfolders:\n if item.endswith('_settings.xml'):\n SettingsFileID.append(item) \n if item.endswith('_raw.bin'):\n if 'bkgnd' not in item:\n RawDataFileID.append(item)\n else:\n BkgndFileID.append(item) \n # sort file name by numerical order\n pattern = re.compile(r'_\\d+_') \n pattern2 = re.compile(r'\\d+')\n RawDataFileID = sorted(RawDataFileID, key=lambda x:int(pattern2.findall(pattern.findall(x)[0])[0]))\n SettingsFileID = sorted(SettingsFileID , key=lambda x:int(pattern2.findall(pattern.findall(x)[0])[0]))\n NumOfFile = len(RawDataFileID) \n return NumOfFile, RawDataFileID, BkgndFileID, SettingsFileID, save_path\n\ndef ListAllDataFile(data_path,endsWith,startsWith=None,searchPattern=r\"_\\d+_\",searchPattern2=r\"\\d+\",returnNum=False):\n \"\"\"\n Serach for all data file under the condition of endsWith and return with a sorted results. \n The data file name can only have one number indicating the sequential order of file name, otherwise it might not right. \n searchPattern and searchPattern2 are intial and refined rearch target. \n \"\"\"\n if data_path.endswith(\"/\") or data_path.endswith(\"\\\\\"):\n data_path = data_path[:-1]\n dataID = []\n for dfile in os.listdir(data_path):\n if startsWith == None:\n if dfile.endswith(endsWith):\n dataID.append(dfile) \n else:\n if dfile.startswith(startsWith) and dfile.endswith(endsWith):\n dataID.append(dfile) \n searchPattern = re.compile(searchPattern)\n searchPattern2 = re.compile(searchPattern2)\n if searchPattern2 == None:\n dataID = sorted(dataID, key=lambda x:int(searchPattern.findall(x)[0])) \n else:\n dataID = sorted(dataID, key=lambda x:int(searchPattern2.findall(searchPattern.findall(x)[0])[0])) \n sortNum = []\n for x in dataID: \n sortNum.append(int(searchPattern2.findall(searchPattern.findall(x)[0])[0]))\n if returnNum:\n return dataID, np.asarray(sortNum)\n else:\n return dataID \n\ndef SaveData(save_path,FileName,inData,datatype='data',varName = 'OCTData'):\n \"\"\"\n Save data in the format of .hdf5\n : save_path: directory path where the data will be saved. \n : FileName: name of file name. Therefore, the file will be FileName.hdf5 \n : inData: input data. This should be an ndarray or Settings file. \n\n \"\"\"\n if save_path.endswith(\"/\") or save_path.endswith(\"\\\\\"):\n save_path = save_path[:-1]\n\n if datatype.lower() == 'data':\n if np.iscomplexobj(inData):\n DataFileSave = h5py.File(save_path+'/'+FileName+'.hdf5','w')\n DataFileSave.create_dataset(varName+'_real',shape=np.shape(inData),data=np.real(inData),compression=\"gzip\")\n DataFileSave.create_dataset(varName+'_imag',shape=np.shape(inData),data=np.imag(inData),compression=\"gzip\")\n DataFileSave.close()\n else:\n DataFileSave = h5py.File(save_path+'/'+FileName+'.hdf5','w')\n DataFileSave.create_dataset(varName,shape=np.shape(inData),data=inData,compression=\"gzip\")\n DataFileSave.close()\n elif datatype.lower() == 'settings':\n SettingsFile = h5py.File(save_path+'/'+FileName+'.hdf5','w')\n for k, v in inData.items():\n SettingsFile.create_dataset(k,data=v)\n SettingsFile.close() \n else:\n raise ValueError(\"Wrong data type!\") \ndef LoadSettings(path,FileName):\n \"\"\"\n Loading Settings file. \n path should NOT end with \"/\" or \"\\\\\".\n \"\"\"\n Settings = dict.fromkeys([], []) \n fid = h5py.File(path+'/'+FileName,'r')\n for key in fid.keys():\n Settings[key] = fid[key][()]\n return Settings \n\ndef mean2(x):\n y = np.sum(x) / np.size(x);\n return y\n\ndef corr2(a,b):\n \"\"\"Calculating correlation coefficient between two input 2D array\n with same definition to corr2() in MATLAB\n \"\"\"\n a = a - mean2(a)\n b = b - mean2(b)\n r = (a*b).sum() / np.sqrt((a*a).sum() * (b*b).sum())\n return np.abs(r)\n\n\ndef normxcorr2(template, image, mode=\"full\"):\n \"\"\"\n Input arrays should be floating point numbers.\n :param template: N-D array, of template or filter you are using for cross-correlation.\n Must be less or equal dimensions to image.\n Length of each dimension must be less than length of image.\n :param image: N-D array\n :param mode: Options, \"full\", \"valid\", \"same\"\n full (Default): The output of fftconvolve is the full discrete linear convolution of the inputs. \n Output size will be image size + 1/2 template size in each dimension.\n valid: The output consists only of those elements that do not rely on the zero-padding.\n same: The output is the same size as image, centered with respect to the ‘full’ output.\n :return: N-D array of same dimensions as image. Size depends on mode parameter.\n \"\"\"\n\n # If this happens, it is probably a mistake\n if np.ndim(template) > np.ndim(image) or \\\n len([i for i in range(np.ndim(template)) if template.shape[i] > image.shape[i]]) > 0:\n print(\"normxcorr2: TEMPLATE larger than IMG. Arguments may be swapped.\")\n\n template = template - np.mean(template)\n image = image - np.mean(image)\n\n a1 = np.ones(template.shape)\n # Faster to flip up down and left right then use fftconvolve instead of scipy's correlate\n ar = np.flipud(np.fliplr(template))\n out = fftconvolve(image, ar.conj(), mode=mode)\n \n image = fftconvolve(np.square(image), a1, mode=mode) - \\\n np.square(fftconvolve(image, a1, mode=mode)) / (np.prod(template.shape))\n\n # Remove small machine precision errors after subtraction\n image[np.where(image < 0)] = 0\n\n template = np.sum(np.square(template))\n out = out / np.sqrt(image * template)\n\n # Remove any divisions by 0 or very close to 0\n out[np.where(np.logical_not(np.isfinite(out)))] = 0\n \n return out\n\ndef Max2d(inData):\n return np.amax(inData), np.unravel_index(inData.argmax(),inData.shape) \n\ndef patternMatch(template,rootImage,cropIndex = None, showFit = False):\n \"\"\"Compare template image to rootImage and find the translation index required \n for makeing template image matched with rootImage. That's by moving (transX,transY) to ensure \n template image as much similar as to rootImage. Using normxcorr2() method which requires a small image region cropped from template.\n Therefore, cropIndex means the subimage of template used to deconvlve with rootImage. If cropIndex is None, then directly using template image as subimage.\n : template: to be compared, 2d numpy array as real. if cropIndex is None, both dimensions of template image must be smaller than rootImage. Using cropIndex must result in a smaller dimension of subimage compared to rootImage. \n : rootImage: basic image, 2d nump.array as real. It is best template and rootImage has the same \n : cropIndex: None as default, or (4,) list/array with [xmin,xmax,ymin,ymax]. \n : showFit: present fit results \n \"\"\"\n if cropIndex == None:\n CropImage = template \n centerofCropInTemplate = (0,0)\n else:\n cropIndex = np.asarray(cropIndex) \n CropImage = template[cropIndex[0]:cropIndex[1],cropIndex[2]:cropIndex[3]]\n centerofCropInTemplate = (int(np.ceil((cropIndex[1]+cropIndex[0])/2)), int(np.ceil((cropIndex[2]+cropIndex[3])/2)))\n cTmp = normxcorr2(CropImage,rootImage,mode='same') \n cMax, cPos = Max2d(cTmp) \n transX, transY = (cPos[0] - centerofCropInTemplate[0], cPos[1] - centerofCropInTemplate[1])\n if showFit:\n figC = plt.figure(figsize=(14,4))\n ax00 = plt.subplot2grid((1,3),(0,0),rowspan=1,colspan=1) \n ax00.set_title(\"Matching Corr Map\")\n ax01 = plt.subplot2grid((1,3),(0,1),rowspan=1,colspan=1) \n ax01.set_title(\"Root image\")\n ax02 = plt.subplot2grid((1,3),(0,2),rowspan=1,colspan=1) \n ax02.set_title(\"Template Image\") \n imax0 = ax00.imshow(cTmp,aspect='equal') \n ax01.imshow(rootImage,aspect='equal',cmap='gray')\n ax02.imshow(template,aspect='equal',cmap='gray') \n figC.colorbar(imax0,ax=ax00,orientation='vertical',fraction=0.05,aspect=50)\n\n figT = plt.figure(figsize=(5,5))\n axT = plt.subplot2grid((1,2),(0,0),rowspan=1,colspan=1) \n axT2 = plt.subplot2grid((1,2),(0,1),rowspan=1,colspan=1) \n axT.imshow(rootImage,cmap='gray',aspect='equal',interpolation = 'none')\n rect = patches.Rectangle((cPos[1]-np.shape(CropImage)[1]/2,cPos[0]-np.shape(CropImage)[0]/2), np.shape(CropImage)[1], np.shape(CropImage)[0], fill=False,linestyle='--',linewidth=2,edgecolor='tab:red')\n axT.add_patch(rect)\n axT2.imshow(CropImage,cmap='gray',aspect='equal',interpolation='none')\n\n return cTmp, cMax, transX, transY \n\n\ndef filter_bilateral( img_in, sigma_s, sigma_v, reg_constant=1e-8 ):\n \"\"\"Simple bilateral filtering of an input image\n\n Performs standard bilateral filtering of an input image. If padding is desired,\n img_in should be padded prior to calling\n\n Args:\n img_in (ndarray) monochrome input image\n sigma_s (float) spatial gaussian std. dev.\n sigma_v (float) value gaussian std. dev.\n reg_constant (float) optional regularization constant for pathalogical cases\n\n Returns:\n result (ndarray) output bilateral-filtered image\n\n Raises: \n ValueError whenever img_in is not a 2D float32 valued numpy.ndarray\n \"\"\"\n\n # check the input\n if not isinstance( img_in, numpy.ndarray ) or img_in.dtype != 'float32' or img_in.ndim != 2:\n raise ValueError('Expected a 2D numpy.ndarray with float32 elements')\n\n # make a simple Gaussian function taking the squared radius\n gaussian = lambda r2, sigma: (numpy.exp( -0.5*r2/sigma**2 )*3).astype(int)*1.0/3.0\n\n # define the window width to be the 3 time the spatial std. dev. to \n # be sure that most of the spatial kernel is actually captured\n win_width = int( 3*sigma_s+1 )\n\n # initialize the results and sum of weights to very small values for\n # numerical stability. not strictly necessary but helpful to avoid\n # wild values with pathological choices of parameters\n wgt_sum = numpy.ones( img_in.shape )*reg_constant\n result = img_in*reg_constant\n\n # accumulate the result by circularly shifting the image across the\n # window in the horizontal and vertical directions. within the inner\n # loop, calculate the two weights and accumulate the weight sum and \n # the unnormalized result image\n for shft_x in range(-win_width,win_width+1):\n for shft_y in range(-win_width,win_width+1):\n # compute the spatial weight\n w = gaussian( shft_x**2+shft_y**2, sigma_s )\n\n # shift by the offsets\n off = numpy.roll(img_in, [shft_y, shft_x], axis=[0,1] )\n\n # compute the value weight\n tw = w*gaussian( (off-img_in)**2, sigma_v )\n\n # accumulate the results\n result += off*tw\n wgt_sum += tw\n\n # normalize the result and return\n return result/wgt_sum\n\ndef FindVrange(enFace,VmaxBound=[0.999,1.0],VminBound=[0.01,0.05]):\n tmp = np.sort(enFace.flatten())\n sizeTmp = np.size(tmp) \n vmax = np.median(tmp[int(sizeTmp*VmaxBound[0]):int(sizeTmp*VmaxBound[1])]) \n vmin = np.median(tmp[int(sizeTmp*VminBound[0]):int(sizeTmp*VminBound[1])])\n OCTnorm = matplotlib.colors.Normalize(vmin=vmin,vmax=vmax) \n return [vmax, vmin,OCTnorm]\n\ndef truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):\n new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\n cmap(np.linspace(minval, maxval, n)))\n return new_cmap \n\ndef fillhole(input_image):\n '''\n input gray binary image get the filled image by floodfill method\n Note: only holes surrounded in the connected regions will be filled.\n :param input_image:\n :return:\n '''\n im_flood_fill = input_image.copy()\n h, w = input_image.shape[:2]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n im_flood_fill = im_flood_fill.astype(\"uint8\")\n cv2.floodFill(im_flood_fill, mask, (0, 0), 255)\n im_flood_fill_inv = cv2.bitwise_not(im_flood_fill)\n img_out = input_image | im_flood_fill_inv\n return img_out \n\ndef WriteIntoGif(path,fps,endsWith = '.png',saveFileName=None):\n import imageio \n from progress.bar import Bar\n if path.endswith(\"/\") or path.endswith(\"\\\\\"):\n path = path[:-1]\n pngFiles = []\n pngIndx = []\n for file in os.listdir(path):\n if file.endswith(endsWith):\n pngFiles.append(file) \n tmp = re.findall(r'\\d+',file)\n pngIndx.append(int(tmp[0]))\n pngFiles = np.asarray(pngFiles)\n pngFiles = pngFiles[np.argsort(pngIndx)]\n bar2 = Bar(' Writing into GIF', max=len(pngFiles))\n images = []\n\n for filename in pngFiles:\n bar2.next() \n images.append(imageio.imread(path+'/'+filename))\n if saveFileName:\n savename = saveFileName+\".gif\"\n else:\n savename = \"animation.gif\"\n imageio.mimsave(path+\"/\"+savename, images,fps=fps)\n\n\ndef patternMatch_fft(plane_xy_shift,plane_xy,showFit = False):\n \"\"\"\n pattern match in frequency domain by taking Fourier transform of input data\n here plane_xy_shift and plane_xy must has the same dimension. \n \"\"\" \n sx, sy = np.shape(plane_xy)\n fft_xy = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(plane_xy)))\n fft_xy_shift = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(plane_xy_shift)))\n cross_xy = np.abs(np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(fft_xy * np.conjugate(fft_xy_shift)))))\n max_corr_r, posxy = Max2d(cross_xy)\n shiftx, shifty = np.asarray(posxy)-[int(sx/2),int(sy/2)]\n\n if showFit:\n vmin = 2*np.amin(np.abs(plane_xy)**0.4)\n vmax = 0.7*np.amax(np.abs(plane_xy)**0.4)\n OCTnorm = matplotlib.colors.Normalize(vmin = vmin,vmax = vmax)\n fig = plt.figure(figsize=(5,3))\n ax00 = plt.subplot2grid((1,4),(0,0),rowspan=1,colspan=1) #xz\n ax01 = plt.subplot2grid((1,4),(0,1),rowspan=1,colspan=1)\n ax02 = plt.subplot2grid((1,4),(0,2),rowspan=1,colspan=1)\n ax03 = plt.subplot2grid((1,4),(0,3),rowspan=1,colspan=1)\n ax00.set_title(\"plane xy\")\n ax01.set_title(\"plane shift\")\n ax02.set_title(\"Fit res\")\n ax03.set_title(\"Re shift plane\")\n ax00.imshow(np.abs(plane_xy)**0.4,cmap='gray')\n ax01.imshow(np.abs(plane_xy_shift)**0.4,cmap='gray') \n ax02.imshow(np.abs(cross_xy))\n ax03.imshow(np.roll(np.abs(plane_xy_shift)**0.4,(shiftx,shifty),axis=(0,1)),cmap='gray')\n\n return [max_corr_r,shiftx,shifty]\n\ndef patternMatch_fft_scan(testVol,refPlane,cAxis,numAve=2,showFit=False):\n \"\"\"\n Do a pattern match by using fft method and also scan over a range along cAxis. \n refPlane: 2d dim array (alread median filtered)\n testVol: 3d dim array\n cAxis: along which axis of testVol to do plane-b-plane compare\n numAve: number of axis to averged over cAxis to do compare\n \"\"\"\n testVol = (testVol - np.mean(testVol))/np.std(testVol) \n refPlane = (refPlane - np.mean(refPlane))/np.std(refPlane) #normalize to reduce the effects of variant brightness on motion correction \n cAxisLen = np.shape(testVol)[cAxis] \n CorrR = 0 \n shiftx = 0\n shifty = 0\n for i in np.arange(numAve,cAxisLen-numAve,step=1,dtype=int):\n if cAxis == 0:\n tmpPlane = np.amax(testVol[i-numAve:i+numAve,:,:],axis=0)\n elif cAxis == 1:\n tmpPlane = np.amax(testVol[:,i-numAve:i+numAve,:],axis=1)\n elif cAxis == 2:\n tmpPlane = np.amax(testVol[:,:,i-numAve:i+numAve],axis=2)\n tmpPlane = ndimage.median_filter(tmpPlane,size=(3,3))\n tmpCorrR,tmpshiftx,tmpshifty = patternMatch_fft(tmpPlane,refPlane,showFit=showFit)\n if np.abs(tmpCorrR) > np.abs(CorrR):\n CorrR = tmpCorrR \n shiftx = tmpshiftx \n shifty = tmpshifty \n\n return [shiftx,shifty]\n\ndef patternMatch_fft_3d(testVol_raw,refVol_raw,testSurfPos,refSurfPos):\n \"\"\"\n Pattern match for motion correction in 3d. \n src_freq = np.fft.fftn(src_image_cpx)\n target_freq = np.fft.fftn(target_image_cpx)\n shape = src_freq.shape\n image_product = src_freq * target_freq.conj()\n cross_correlation = np.fft.ifftn(image_product)\n #cross_correlation = ifftn(image_product) # TODO CHECK why this line is different\n new_cross_corr = np.abs(cross_correlation)\n CCmax = cross_correlation.max()\n maxima = np.unravel_index(np.argmax(new_cross_corr), new_cross_corr.shape)\n midpoints = np.array([np.fix(axis_size//2) for axis_size in shape])\n shifts = np.array(maxima, dtype=np.float32)\n shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]\n \"\"\"\n zshift = int(refSurfPos-testSurfPos) \n testVol_raw = np.roll(testVol_raw,zshift,axis=0)\n testVol = testVol_raw[refSurfPos+20:refSurfPos+450,:,:]\n refVol = refVol_raw[refSurfPos+20:refSurfPos+450,:,:]\n testVol = (testVol - np.mean(testVol))/np.std(testVol) \n refVol = (refVol - np.mean(refVol))/np.std(refVol) \n test_freq = np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(testVol))) \n ref_freq = np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(refVol))) \n shape = test_freq.shape\n cross_correlation = np.abs(np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(ref_freq * test_freq.conj()))))\n CCmax = cross_correlation.max() \n maxima = np.unravel_index(np.argmax(cross_correlation), cross_correlation.shape)\n midpoints = np.array([np.fix(axis_size//2) for axis_size in shape]) \n shifts = np.array(maxima-midpoints, dtype=int)\n #shifts[0] = shifts[0]-int(testSurfPos-refSurfPos) \n testVol_raw = np.roll(testVol_raw,shifts,axis=(0,1,2)) \n if shifts[0] > 30:\n print(\"Warning: z axis shift larger than 30 pixels!\")\n if shifts[1] > 30:\n print(\"Warning: x axis shift larger than 30 pixels!\")\n if shifts[2] > 30:\n print(\"Warining: y axis shift larger than 30 pixels!\") \n return shifts, testVol_raw \n\ndef ListTXT(FileName,mode='w',data=None):\n if mode.lower() == 'w' or mode.lower() == \"write\":\n if data == None:\n raise ValueError(\"Please input DATA to be saved!\")\n else:\n with open(FileName,\"wb\") as fp2:\n pickle.dump(data,fp2)\n output = 1\n elif mode.lower() == \"r\" or mode.lower() == \"read\":\n with open(FileName, \"rb\") as fp: # Unpickling\n output = pickle.load(fp)\n else:\n raise ValueError(\"mode should only be either write or read !\")\n \n return output\n\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = data.flatten() #1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, h\n\n\nclass MplColorHelper:\n def __init__(self, cmap_name, start_val, stop_val):\n self.cmap_name = cmap_name\n self.cmap = plt.get_cmap(cmap_name)\n self.norm = matplotlib.colors.Normalize(vmin=start_val, vmax=stop_val)\n self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)\n print(\"color clim is {}\".format(self.scalarMap.get_clim()))\n def get_rgb(self, val,alpha=1.0):\n return self.scalarMap.to_rgba(val,alpha=alpha) #,bytes=True)\n\n\n\ndef get_prime_factors(number):\n prime_factors = []\n\n while number % 2 == 0:\n prime_factors.append(2)\n number = number / 2\n for i in range(3, int(math.sqrt(number)) + 1, 2):\n while number % i == 0:\n prime_factors.append(int(i))\n number = number / i\n if number > 2:\n prime_factors.append(int(number))\n return prime_factors\n"
] | [
[
"numpy.amax",
"numpy.imag",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"matplotlib.pyplot.get_cmap",
"numpy.mean",
"numpy.iscomplexobj",
"numpy.fix",
"numpy.where",
"matplotlib.pyplot.subplot2grid",
"numpy.roll",
"numpy.conjugate",
"numpy.square",
"numpy.fliplr",
"numpy.arange",
"scipy.ndimage.median_filter",
"numpy.ceil",
"numpy.size",
"numpy.std",
"numpy.argmax",
"matplotlib.cm.ScalarMappable",
"numpy.fft.ifftshift",
"numpy.real",
"numpy.zeros",
"matplotlib.pyplot.figure",
"scipy.signal.fftconvolve",
"numpy.ndim",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.isfinite",
"matplotlib.colors.Normalize",
"numpy.ones",
"numpy.shape",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
stephane-eisen/pyleecan | [
"1faedde4b24acc6361fa1fdd4e980eaec4ca3a62",
"8444b8131c9eff11a616da8277fb1f280c8f70e5"
] | [
"pyleecan/Methods/Machine/LamSlotMulti/plot.py",
"pyleecan/GUI/Dxf/DXF_Hole.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom matplotlib.patches import Patch\nfrom matplotlib.pyplot import axis, legend\n\nfrom ....Functions.init_fig import init_fig\nfrom ....definitions import config_dict\n\nROTOR_COLOR = config_dict[\"PLOT\"][\"COLOR_DICT\"][\"ROTOR_COLOR\"]\nSTATOR_COLOR = config_dict[\"PLOT\"][\"COLOR_DICT\"][\"STATOR_COLOR\"]\n\n\ndef plot(\n self,\n fig=None,\n ax=None,\n is_lam_only=False,\n sym=1,\n alpha=0,\n delta=0,\n is_edge_only=False,\n is_display=True,\n is_show_fig=True,\n):\n \"\"\"Plot the Lamination with empty Slots in a matplotlib fig\n\n Parameters\n ----------\n self : LamSlotMulti\n A LamSlotMulti object\n fig : Matplotlib.figure.Figure\n existing figure to use if None create a new one\n ax : Matplotlib.axes.Axes object\n Axis on which to plot the data\n is_lam_only: bool\n True to plot only the lamination\n sym : int\n Symmetry factor (1= full machine, 2= half of the machine...)\n alpha : float\n Angle for rotation [rad]\n delta : complex\n Complex value for translation\n is_edge_only: bool\n To plot transparent Patches\n is_display : bool\n False to return the patches\n is_show_fig : bool\n To call show at the end of the method\n Returns\n -------\n patches : list\n List of Patches\n \"\"\"\n\n if self.is_stator:\n lam_color = STATOR_COLOR\n else:\n lam_color = ROTOR_COLOR\n\n (fig, axes, patch_leg, label_leg) = init_fig(fig=fig, ax=ax, shape=\"rectangle\")\n\n surf_list = self.build_geometry(sym=sym, alpha=alpha, delta=delta)\n patches = list()\n for surf in surf_list:\n if \"Lamination\" in surf.label:\n patches.extend(surf.get_patches(color=lam_color, is_edge_only=is_edge_only))\n else:\n patches.extend(surf.get_patches(is_edge_only=is_edge_only))\n # Display the result\n if is_display:\n (fig, axes, patch_leg, label_leg) = init_fig(fig)\n axes.set_xlabel(\"(m)\")\n axes.set_ylabel(\"(m)\")\n for patch in patches:\n axes.add_patch(patch)\n\n # Axis Setup\n axes.axis(\"equal\")\n\n # The Lamination is centered in the figure\n Lim = self.Rext * 1.5\n axes.set_xlim(-Lim, Lim)\n axes.set_ylim(-Lim, Lim)\n\n # Add the legend\n if not is_edge_only:\n if self.is_stator:\n patch_leg.append(Patch(color=STATOR_COLOR))\n label_leg.append(\"Stator\")\n axes.set_title(\"Stator with empty slot\")\n else:\n patch_leg.append(Patch(color=ROTOR_COLOR))\n label_leg.append(\"Rotor\")\n axes.set_title(\"Rotor with empty slot\")\n\n legend(patch_leg, label_leg)\n if is_show_fig:\n fig.show()\n else:\n return patches\n",
"from logging import getLogger\nfrom os.path import dirname, isfile\nimport matplotlib.pyplot as plt\nfrom ezdxf import readfile\nfrom numpy import angle as np_angle\nfrom numpy import array, pi, argmax, argmin\nfrom numpy import max as np_max, min as np_min\nfrom PySide2.QtCore import QUrl, Qt\nfrom PySide2.QtGui import QIcon, QPixmap, QDesktopServices\nfrom PySide2.QtWidgets import (\n QComboBox,\n QDialog,\n QFileDialog,\n QMessageBox,\n QPushButton,\n QHeaderView,\n)\n\nfrom ...Classes.HoleUD import HoleUD\nfrom ...Classes.Magnet import Magnet\nfrom ...Classes.SurfLine import SurfLine\nfrom ...GUI.Dxf.dxf_to_pyleecan_list import dxf_to_pyleecan_list\nfrom ...GUI.Resources import pixmap_dict\nfrom ...GUI.Tools.MPLCanvas import MPLCanvas2\nfrom ...GUI.Tools.FloatEdit import FloatEdit\nfrom ...GUI import gui_option\nfrom ...loggers import GUI_LOG_NAME\nfrom .Ui_DXF_Hole import Ui_DXF_Hole\n\n# Column index for table\n\nDEL_COL = 0\nHL_COL = 1\nTYPE_COL = 2\nREF_COL = 3\nOFF_COL = 4\n\nICON_SIZE = 24\n# Unselected, selected, selected-bottom-mag\nCOLOR_LIST = [\"k\", \"r\", \"c\"]\nZ_TOL = 1e-4 # Point comparison tolerance\n\n\nclass DXF_Hole(Ui_DXF_Hole, QDialog):\n \"\"\"Dialog to create HoleUD objects from DXF files\"\"\"\n\n def __init__(self, dxf_path=None, Zh=None, Lmag=None, lam=None):\n \"\"\"Initialize the Dialog\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n dxf_path : str\n Path to a dxf file to read\n \"\"\"\n # Widget setup\n QDialog.__init__(self)\n self.setupUi(self)\n\n # Icon preparation\n self.delete_icon = QPixmap(pixmap_dict[\"cross\"])\n self.delete_icon.scaled(ICON_SIZE, ICON_SIZE, Qt.KeepAspectRatio)\n self.highlight_icon = QPixmap(pixmap_dict[\"search\"])\n self.highlight_icon.scaled(ICON_SIZE, ICON_SIZE, Qt.KeepAspectRatio)\n\n # Tutorial video link\n self.url = \"https://pyleecan.org/videos.html#feature-tutorials\"\n self.b_tuto.setEnabled(True)\n\n # Set units\n self.lf_mag_len.unit = \"m\"\n wid_list = [\n self.unit_mag_len,\n ]\n for wid in wid_list:\n wid.setText(\"[\" + gui_option.unit.get_m_name() + \"]\")\n\n # Initialize the graph\n self.init_graph()\n\n # Not used yet\n self.lf_axe_angle.hide()\n self.in_axe_angle.hide()\n self.unit_axe_angle.hide()\n\n # Set default values\n if Zh is not None:\n self.si_Zh.setValue(Zh)\n if Lmag is not None:\n self.lf_mag_len.setValue(Lmag)\n if lam is None:\n self.lam = lam\n else:\n self.lam = lam.copy()\n\n # Init properties\n self.line_list = list() # List of line from DXF\n self.selected_list = list() # List of currently selected lines\n self.surf_list = list() # List of defined surfaces\n self.Zcenter = 0 # For translate offset\n\n # Set DXF edit widget\n self.lf_center_x.setValue(0)\n self.lf_center_y.setValue(0)\n self.lf_scaling.validator().setBottom(0)\n self.lf_scaling.setValue(1)\n\n # Load the DXF file if provided\n self.dxf_path = dxf_path\n if dxf_path is not None and isfile(dxf_path):\n self.open_document()\n\n # Setup Path selector for DXF files\n self.w_path_selector.obj = self\n self.w_path_selector.param_name = \"dxf_path\"\n self.w_path_selector.verbose_name = \"DXF File\"\n self.w_path_selector.extension = \"DXF file (*.dxf)\"\n self.w_path_selector.set_path_txt(self.dxf_path)\n self.w_path_selector.update()\n\n # Set table column width\n header = self.w_surface_list.horizontalHeader()\n header.setSectionResizeMode(DEL_COL, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(HL_COL, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(TYPE_COL, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(REF_COL, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(OFF_COL, QHeaderView.ResizeToContents)\n\n # Connect signals to slot\n self.w_path_selector.pathChanged.connect(self.open_document)\n self.b_save.pressed.connect(self.save)\n self.b_plot.pressed.connect(self.plot)\n self.b_reset.pressed.connect(self.update_graph)\n self.b_cancel.pressed.connect(self.remove_selection)\n self.b_tuto.pressed.connect(self.open_tuto)\n self.lf_center_x.editingFinished.connect(self.set_center)\n self.lf_center_y.editingFinished.connect(self.set_center)\n\n # Display the GUI\n self.show()\n\n def open_document(self):\n \"\"\"Open a new dxf in the viewer\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n \"\"\"\n\n getLogger(GUI_LOG_NAME).debug(\"Reading dxf file: \" + self.dxf_path)\n # Read the DXF file\n try:\n document = readfile(self.dxf_path)\n modelspace = document.modelspace()\n # Convert DXF to pyleecan objects\n self.line_list = dxf_to_pyleecan_list(modelspace)\n # Display\n # selected line: 0: unselected, 1:selected, 2: selected bottom magnet\n self.selected_list = [0 for line in self.line_list]\n self.surf_list = list()\n self.w_surface_list.setRowCount(0)\n self.update_graph()\n except Exception as e:\n QMessageBox().critical(\n self,\n self.tr(\"Error\"),\n self.tr(\"Error while reading dxf file:\\n\" + str(e)),\n )\n\n def init_graph(self):\n \"\"\"Initialize the viewer\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n \"\"\"\n fig, axes = self.w_viewer.fig, self.w_viewer.axes\n axes.set_axis_off()\n\n # Setup interaction with graph\n def select_line(event):\n \"\"\"Function to select/unselect the closest line from click\"\"\"\n X = event.xdata # X position of the click\n Y = event.ydata # Y position of the click\n # Get closer pyleecan object\n Z = X + 1j * Y\n min_dist = float(\"inf\")\n closest_id = -1\n for ii, line in enumerate(self.line_list):\n line_dist = line.comp_distance(Z)\n if line_dist < min_dist:\n closest_id = ii\n min_dist = line_dist\n # Select/unselect line\n if self.selected_list[closest_id] == 0: # Unselected to selected\n self.selected_list[closest_id] = 1\n elif self.selected_list[closest_id] == 1: # Selected to selected bottom mag\n if 2 in self.selected_list:\n current_bot_mag = self.selected_list.index(2)\n # Only one selected bottom mag line at the time\n point_list = array(self.line_list[current_bot_mag].discretize(20))\n self.selected_list[current_bot_mag] = 1\n axes.plot(point_list.real, point_list.imag, COLOR_LIST[1], zorder=2)\n self.selected_list[closest_id] = 2\n elif self.selected_list[closest_id] == 2:\n # selected bottom mag to Unselected\n self.selected_list[closest_id] = 0\n # Change line color\n point_list = array(self.line_list[closest_id].discretize(20))\n color = COLOR_LIST[self.selected_list[closest_id]]\n axes.plot(point_list.real, point_list.imag, color, zorder=2)\n self.w_viewer.draw()\n\n # Check if the surface is complete\n if self.check_selection():\n self.add_surface()\n\n def zoom(event):\n \"\"\"Function to zoom/unzoom according the mouse wheel\"\"\"\n\n base_scale = 0.3 # Scaling factor\n # get the current x and y limits\n ax = self.w_viewer.axes\n cur_xlim = ax.get_xlim()\n cur_ylim = ax.get_ylim()\n cur_xrange = (cur_xlim[1] - cur_xlim[0]) * 0.5\n cur_yrange = (cur_ylim[1] - cur_ylim[0]) * 0.5\n xdata = event.xdata # get event x location\n ydata = event.ydata # get event y location\n if event.button == \"down\":\n # deal with zoom in\n scale_factor = 1 / base_scale\n elif event.button == \"up\":\n # deal with zoom out\n scale_factor = base_scale\n else:\n # deal with something that should never happen\n scale_factor = 1\n # set new limits\n ax.set_xlim(\n [xdata - cur_xrange * scale_factor, xdata + cur_xrange * scale_factor]\n )\n ax.set_ylim(\n [ydata - cur_yrange * scale_factor, ydata + cur_yrange * scale_factor]\n )\n self.w_viewer.draw() # force re-draw\n\n # Connect the function\n self.w_viewer.mpl_connect(\"button_press_event\", select_line)\n self.w_viewer.mpl_connect(\"scroll_event\", zoom)\n\n # Axis cleanup\n axes.axis(\"equal\")\n axes.set_axis_off()\n\n def set_center(self):\n \"\"\"Update the position of the center\"\"\"\n self.Zcenter = self.lf_center_x.value() + 1j * self.lf_center_y.value()\n self.update_graph()\n\n def update_graph(self):\n \"\"\"Clean and redraw all the lines in viewer\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n \"\"\"\n fig, axes = self.w_viewer.fig, self.w_viewer.axes\n axes.clear()\n axes.set_axis_off()\n\n # Draw the lines in the correct color\n for ii, line in enumerate(self.line_list):\n point_list = array(line.discretize(20))\n color = COLOR_LIST[self.selected_list[ii]]\n axes.plot(point_list.real, point_list.imag, color, zorder=1)\n # Add lamination center\n axes.plot(self.Zcenter.real, self.Zcenter.imag, \"rx\", zorder=0)\n axes.text(self.Zcenter.real, self.Zcenter.imag, \"O\")\n\n self.w_viewer.draw()\n\n def check_selection(self):\n \"\"\"Check if every line in the selection form a surface\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n\n Returns\n -------\n is_surf : bool\n True if it forms a surface\n \"\"\"\n\n # Create list of begin and end point for all lines\n point_list = list()\n for ii, line in enumerate(self.line_list):\n if self.selected_list[ii]:\n point_list.append(line.get_begin())\n point_list.append(line.get_end())\n\n # Check with a tolerance if every point is twice in the list\n if len(point_list) == 0:\n return False\n\n for p1 in point_list:\n count = 0\n for p2 in point_list:\n if abs(p1 - p2) < Z_TOL:\n count += 1\n if count != 2:\n return False\n\n return True\n\n def add_surface(self):\n \"\"\"Validate the selection and create a surface object\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n \"\"\"\n\n # Get all the selected lines\n line_list = list()\n index_list = list()\n for ii, line in enumerate(self.line_list):\n if self.selected_list[ii]:\n index_list.append(str(ii))\n line_list.append(line.copy())\n # Sort the lines (begin = end)\n curve_list = list()\n curve_list.append(line_list.pop())\n while len(line_list) > 0:\n end = curve_list[-1].get_end()\n for ii in range(len(line_list)):\n if abs(line_list[ii].get_begin() - end) < Z_TOL:\n break\n if abs(line_list[ii].get_end() - end) < Z_TOL:\n line_list[ii].reverse()\n break\n curve_list.append(line_list.pop(ii))\n # Create the Surface object\n self.surf_list.append(SurfLine(line_list=curve_list))\n self.surf_list[-1].comp_point_ref(is_set=True)\n\n # Add a line in the Table\n nrows = self.w_surface_list.rowCount()\n self.w_surface_list.setRowCount(nrows + 1)\n # Adding Surface Type combobox\n combobox = QComboBox()\n combobox.addItems([\"Hole\", \"Magnet\"])\n self.w_surface_list.setCellWidget(\n nrows,\n TYPE_COL,\n combobox,\n )\n if 2 in self.selected_list:\n combobox.setCurrentIndex(1) # Magnet\n combobox.currentIndexChanged.connect(self.enable_magnetization)\n\n # Adding Delete button\n del_button = QPushButton(\"\")\n del_button.setIcon(QIcon(self.delete_icon))\n del_button.pressed.connect(self.delete_surface)\n self.w_surface_list.setCellWidget(\n nrows,\n DEL_COL,\n del_button,\n )\n\n # Adding Highlight button\n HL_button = QPushButton(\"\")\n HL_button.setIcon(QIcon(self.highlight_icon))\n HL_button.pressed.connect(self.highlight_surface)\n self.w_surface_list.setCellWidget(\n nrows,\n HL_COL,\n HL_button,\n )\n\n # Add reference combobox\n combobox = QComboBox()\n combobox.addItems(index_list)\n self.w_surface_list.setCellWidget(\n nrows,\n REF_COL,\n combobox,\n )\n if 2 in self.selected_list:\n combobox.setCurrentIndex(\n index_list.index(str(self.selected_list.index(2)))\n ) #\n else:\n combobox.setEnabled(False)\n\n # Add Offset FloatEdit\n lf_off = FloatEdit()\n lf_off.validator().setBottom(-360)\n lf_off.validator().setTop(360)\n lf_off.setValue(0)\n # lf_off.setText(\"0\")\n lf_off.setEnabled(2 in self.selected_list)\n self.w_surface_list.setCellWidget(\n nrows,\n OFF_COL,\n lf_off,\n )\n\n # Remove selection to start new one\n self.remove_selection()\n\n def enable_magnetization(self):\n \"\"\"Enable/Disable the combobox/float edit for magnetization according to type\"\"\"\n for ii in range(self.w_surface_list.rowCount()):\n if self.w_surface_list.cellWidget(ii, TYPE_COL).currentIndex() == 0:\n self.w_surface_list.cellWidget(ii, REF_COL).setEnabled(False)\n self.w_surface_list.cellWidget(ii, OFF_COL).setEnabled(False)\n else:\n self.w_surface_list.cellWidget(ii, REF_COL).setEnabled(True)\n self.w_surface_list.cellWidget(ii, OFF_COL).setEnabled(True)\n\n def remove_selection(self):\n # Remove selection\n self.selected_list = [0 for line in self.line_list]\n self.update_graph()\n\n def get_hole(self):\n \"\"\"Generate the HoleUD object corresponding to the selected surfaces\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n\n Returns\n -------\n hole : HoleUD\n User defined hole according to selected surfaces\n \"\"\"\n\n if self.lf_scaling.value() == 0: # Avoid error\n self.lf_scaling.setValue(1)\n hole = HoleUD(surf_list=[])\n bottom_list = list()\n offset_list = list()\n # Set labels\n Nmag = 0\n for ii in range(self.w_surface_list.rowCount()):\n hole.surf_list.append(self.surf_list[ii].copy())\n hole.surf_list[ii].scale(self.lf_scaling.value())\n if self.w_surface_list.cellWidget(ii, TYPE_COL).currentIndex() == 0:\n hole.surf_list[ii].label = \"Hole\"\n else:\n hole.surf_list[ii].label = \"HoleMagnet\"\n Nmag += 1\n bottom_list.append(\n self.line_list[\n int(self.w_surface_list.cellWidget(ii, REF_COL).currentText())\n ]\n )\n offset_list.append(self.w_surface_list.cellWidget(ii, OFF_COL).value())\n # Create magnet objects\n hole.magnet_dict = dict()\n for ii in range(Nmag):\n hole.magnet_dict[\"magnet_\" + str(ii)] = Magnet(type_magnetization=1)\n\n # Sort the surfaces\n angles = [np_angle(surf.point_ref) for surf in hole.surf_list]\n idx = sorted(range(len(angles)), key=lambda k: angles[k])\n surf_list_sorted = [hole.surf_list[ii] for ii in idx]\n bottom_list_sorted = [bottom_list[ii] for ii in idx]\n offset_list_sorted = [offset_list[ii] for ii in idx]\n hole.surf_list = surf_list_sorted\n\n # Translate\n if self.Zcenter != 0:\n for surf in hole.surf_list:\n surf.translate(-self.Zcenter * self.lf_scaling.value())\n\n # Rotation\n Zref = sum([surf.point_ref for surf in hole.surf_list])\n for surf in hole.surf_list:\n surf.rotate(-1 * np_angle(Zref))\n\n # Magnetization dict\n mag_dict = dict()\n Nmag = 0\n for ii in range(len(hole.surf_list)):\n if \"Magnet\" in hole.surf_list[ii].label:\n line = bottom_list_sorted[ii].copy()\n line.rotate(-1 * np_angle(Zref))\n mag_dict[\"magnet_\" + str(Nmag)] = line.comp_normal()\n mag_dict[\"magnet_\" + str(Nmag)] += offset_list_sorted[ii] * pi / 180\n Nmag += 1\n hole.magnetization_dict_offset = mag_dict\n\n # Set metadata\n hole.Zh = self.si_Zh.value()\n for magnet in hole.magnet_dict.values():\n magnet.Lmag = self.lf_mag_len.value()\n\n # Remove all materials => To be set in GUI\n hole.mat_void = None\n for magnet in hole.magnet_dict.values():\n magnet.mat_type = None\n\n # Sort Hole then magnets\n # (for plot when Magnets are inside Hole surface)\n mag_list = list()\n hole_list = list()\n for surf in hole.surf_list:\n if \"HoleMagnet\" in surf.label:\n mag_list.append(surf)\n else:\n hole_list.append(surf)\n hole.surf_list = hole_list + mag_list\n\n # Correct hole ref_point (when Magnets are inside Hole surface)\n for surf in hole.surf_list:\n if \"HoleMagnet\" not in surf.label:\n line_list = surf.get_lines()\n # Get middle list\n middle_array = array([line.get_middle() for line in line_list])\n # Get the extrema line on the top or bottom of the hole\n if np_min(middle_array.imag) > 0 and np_max(middle_array.imag) > 0:\n start_idx = argmax(middle_array.imag)\n else:\n start_idx = argmin(middle_array.imag)\n # Get the two lines middle besides the extrema line middle\n if start_idx == 0:\n ref_mid = [middle_array[-1], middle_array[0], middle_array[1]]\n elif start_idx == len(line_list) - 1:\n ref_mid = [middle_array[-2], middle_array[-1], middle_array[0]]\n else:\n ref_mid = [\n middle_array[start_idx - 1],\n middle_array[start_idx],\n middle_array[start_idx + 1],\n ]\n # Barycenter of these middles as new reference\n surf.point_ref = sum(ref_mid) / 3\n\n return hole\n\n def plot(self):\n \"\"\"Plot the current state of the hole\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n \"\"\"\n hole = self.get_hole()\n if self.lam is None:\n hole.plot(is_add_arrow=True)\n else:\n fig, (ax1, ax2) = plt.subplots(1, 2)\n hole.plot(fig=fig, ax=ax1, is_add_arrow=True, is_add_ref=False)\n self.lam.hole = [hole]\n self.lam.plot(fig=fig, ax=ax2)\n\n def delete_surface(self):\n \"\"\"Delete a selected surface\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n \"\"\"\n nrow = self.w_surface_list.currentRow()\n self.surf_list.pop(nrow)\n self.w_surface_list.removeRow(nrow)\n\n def highlight_surface(self):\n \"\"\"Highlight a surface to find it on the viewer\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n \"\"\"\n self.selected_list = [0 for line in self.line_list]\n surf = self.surf_list[self.w_surface_list.currentRow()]\n # Find the index of the surface line in self.line_list\n for surf_line in surf.line_list:\n mid = surf_line.get_middle()\n for ii, line in enumerate(self.line_list):\n if abs(mid - line.get_middle()) < Z_TOL:\n self.selected_list[ii] = 1\n self.w_viewer.axes.text(\n mid.real,\n mid.imag,\n str(ii),\n # fontsize=fontsize,\n )\n self.update_graph()\n # Add Label\n for ii in range(len(self.selected_list)):\n if self.selected_list[ii] == 1:\n Zmid = self.line_list[ii].get_middle()\n self.w_viewer.axes.text(\n Zmid.real,\n Zmid.imag,\n str(ii),\n # fontsize=fontsize,\n )\n\n def save(self):\n \"\"\"Save the HoleUD object in a json file\n\n Parameters\n ----------\n self : DXF_Hole\n a DXF_Hole object\n \"\"\"\n\n hole = self.get_hole()\n\n save_file_path = QFileDialog.getSaveFileName(\n self, self.tr(\"Save file\"), dirname(self.dxf_path), \"Json (*.json)\"\n )[0]\n if save_file_path not in [\"\", \".json\", None]:\n self.save_path = save_file_path\n try:\n hole.save(save_file_path)\n self.accept()\n except Exception as e:\n QMessageBox().critical(\n self,\n self.tr(\"Error\"),\n self.tr(\"Error while saving hole json file:\\n\" + str(e)),\n )\n\n def open_tuto(self):\n \"\"\"Open the tutorial video in a web browser\"\"\"\n QDesktopServices.openUrl(QUrl(self.url))\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.patches.Patch"
],
[
"numpy.min",
"matplotlib.pyplot.subplots",
"numpy.max",
"numpy.argmax",
"numpy.argmin",
"numpy.angle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
charliememory/detectron2 | [
"a2a6220068e73c616ee4c84cb52ea023c0203fa0",
"a2a6220068e73c616ee4c84cb52ea023c0203fa0",
"a2a6220068e73c616ee4c84cb52ea023c0203fa0"
] | [
"projects/DensePose_wrong/densepose/modeling/condinst/iuv_head.py",
"projects/DensePose/densepose/densepose_coco_evaluation.py",
"projects/DensePose/densepose/modeling/condinst/iuv_multiscale_head.py"
] | [
"from typing import Dict\nimport math\n\nimport torch\nfrom torch import nn\n\nfrom fvcore.nn import sigmoid_focal_loss_jit\nfrom detectron2.layers import ShapeSpec\n\n# from adet.layers import conv_with_kaiming_uniform\n# from adet.utils.comm import aligned_bilinear\nfrom densepose.layers import conv_with_kaiming_uniform\nfrom densepose.utils.comm import compute_locations, aligned_bilinear\n# from .. import (\n# build_densepose_data_filter,\n# build_densepose_head,\n# build_densepose_losses,\n# build_densepose_predictor,\n# densepose_inference,\n# )\nimport pdb\n\nINF = 100000000\n\n\ndef build_iuv_head(cfg):\n # return GlobalIUVHeadAfterMaskBranch(cfg)\n return CoordGlobalIUVHeadAfterMaskBranch(cfg)\n\n\nclass CoordGlobalIUVHeadAfterMaskBranch(nn.Module):\n def __init__(self, cfg, disable_rel_coords=False):\n super().__init__()\n self.num_outputs = cfg.MODEL.CONDINST.IUVHead.OUT_CHANNELS\n norm = cfg.MODEL.CONDINST.IUVHead.NORM\n num_convs = cfg.MODEL.CONDINST.IUVHead.NUM_CONVS\n channels = cfg.MODEL.CONDINST.IUVHead.CHANNELS\n soi = cfg.MODEL.FCOS.SIZES_OF_INTEREST\n self.register_buffer(\"sizes_of_interest\", torch.tensor(soi + [soi[-1] * 2]))\n self.in_channels = channels + 2\n self.iuv_out_stride = cfg.MODEL.CONDINST.MASK_OUT_STRIDE\n self.disable_rel_coords = disable_rel_coords\n\n conv_block = conv_with_kaiming_uniform(norm, activation=True)\n\n tower = []\n tower.append(conv_block(\n self.in_channels, channels, 3, 1\n ))\n for i in range(1,num_convs):\n tower.append(conv_block(\n channels, channels, 3, 1\n ))\n tower.append(nn.Conv2d(\n channels, max(self.num_outputs, 1), 1\n ))\n self.add_module('tower', nn.Sequential(*tower))\n\n # self.densepose_losses = build_densepose_losses(cfg)\n\n def forward(self, s_logits, iuv_feats, iuv_feat_stride, instances):\n\n locations = compute_locations(\n iuv_feats.size(2), iuv_feats.size(3),\n stride=iuv_feat_stride, device=iuv_feats.device\n )\n # n_inst = len(instances)\n\n im_inds = instances.im_inds\n\n N, _, H, W = iuv_feats.size()\n rel_coord = torch.zeros([N,2,H,W], device=iuv_feats.device).to(dtype=iuv_feats.dtype)\n\n if not self.disable_rel_coords: \n instance_locations = instances.locations\n relative_coords = instance_locations.reshape(-1, 1, 2) - locations.reshape(1, -1, 2)\n relative_coords = relative_coords.permute(0, 2, 1).float()\n soi = self.sizes_of_interest.float()[instances.fpn_levels]\n relative_coords = relative_coords / soi.reshape(-1, 1, 1)\n relative_coords = relative_coords.to(dtype=iuv_feats.dtype)\n # rel_coord_list = []\n for idx in range(N):\n if idx in im_inds:\n cc = relative_coords[im_inds==idx,].reshape(-1, 2, H, W)\n # assert s_logits.shape[1]==1\n ss = s_logits[im_inds==idx,-1:]\n # coord = torch.sum(cc*ss, dim=0, keepdim=True) \\\n # / (torch.sum(ss, dim=0, keepdim=True)+1e-7)\n coord = torch.mean(cc*ss, dim=0, keepdim=True) \n rel_coord[idx:idx+1] = coord #.reshape(1, 2, H, W)\n # pdb.set_trace()\n # import imageio\n # imageio.imwrite(\"tmp/cc.png\",cc[0,0].detach().cpu().numpy())\n # imageio.imwrite(\"tmp/ss.png\",ss[0,0].detach().cpu().numpy())\n # imageio.imwrite(\"tmp/cc_ss.png\",(cc*ss)[0,0].detach().cpu().numpy())\n # imageio.imwrite(\"tmp/ss_sum.png\",torch.sum(ss, dim=0, keepdim=True)[0,0].detach().cpu().numpy())\n # imageio.imwrite(\"tmp/coord_mean.png\",coord[0,0].detach().cpu().numpy())\n # rel_coord_list.append(rel_coord)\n # pdb.set_trace()\n iuv_head_inputs = torch.cat([rel_coord, iuv_feats], dim=1) \n else:\n iuv_head_inputs = iuv_feats\n\n\n\n\n\n iuv_logit = self.tower(iuv_head_inputs)\n\n assert iuv_feat_stride >= self.iuv_out_stride\n assert iuv_feat_stride % self.iuv_out_stride == 0\n iuv_logit = aligned_bilinear(iuv_logit, int(iuv_feat_stride / self.iuv_out_stride))\n\n return iuv_logit\n\n\nclass GlobalIUVHeadAfterMaskBranch(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.num_outputs = cfg.MODEL.CONDINST.IUVHead.OUT_CHANNELS\n norm = cfg.MODEL.CONDINST.IUVHead.NORM\n num_convs = cfg.MODEL.CONDINST.IUVHead.NUM_CONVS\n channels = cfg.MODEL.CONDINST.IUVHead.CHANNELS\n self.iuv_out_stride = cfg.MODEL.CONDINST.MASK_OUT_STRIDE\n\n conv_block = conv_with_kaiming_uniform(norm, activation=True)\n\n tower = []\n for i in range(num_convs):\n tower.append(conv_block(\n channels, channels, 3, 1\n ))\n tower.append(nn.Conv2d(\n channels, max(self.num_outputs, 1), 1\n ))\n self.add_module('tower', nn.Sequential(*tower))\n\n # self.densepose_losses = build_densepose_losses(cfg)\n\n def forward(self, iuv_feats, iuv_feat_stride, instances=None):\n iuv_logit = self.tower(iuv_feats)\n\n assert iuv_feat_stride >= self.iuv_out_stride\n assert iuv_feat_stride % self.iuv_out_stride == 0\n iuv_logit = aligned_bilinear(iuv_logit, int(iuv_feat_stride / self.iuv_out_stride))\n\n return iuv_logit\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n# This is a modified version of cocoeval.py where we also have the densepose evaluation.\n\n__author__ = \"tsungyi\"\n\nimport copy, pdb\nimport datetime\nimport logging\nimport numpy as np\nimport pickle\nimport time\nfrom collections import defaultdict\nfrom enum import Enum\nfrom typing import Any, Dict, Tuple\nimport scipy.spatial.distance as ssd\nfrom pycocotools import mask as maskUtils\nfrom scipy.io import loadmat\nfrom scipy.ndimage import zoom as spzoom\n\nfrom detectron2.utils.file_io import PathManager\n\nfrom densepose.structures.mesh import create_mesh\n\nfrom .data.structures import DensePoseDataRelative\n\nlogger = logging.getLogger(__name__)\n\n\nclass DensePoseEvalMode(str, Enum):\n # use both masks and geodesic distances (GPS * IOU) to compute scores\n GPSM = \"gpsm\"\n # use only geodesic distances (GPS) to compute scores\n GPS = \"gps\"\n # use only masks (IOU) to compute scores\n IOU = \"iou\"\n\n\nclass DensePoseDataMode(str, Enum):\n # use estimated IUV data (default mode)\n IUV_DT = \"iuvdt\"\n # use ground truth IUV data\n IUV_GT = \"iuvgt\"\n # use ground truth labels I and set UV to 0\n I_GT_UV_0 = \"igtuv0\"\n # use ground truth labels I and estimated UV coordinates\n I_GT_UV_DT = \"igtuvdt\"\n # use estimated labels I and set UV to 0\n I_DT_UV_0 = \"idtuv0\"\n\n\nclass DensePoseCocoEval(object):\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox', 'keypoints' or 'densepose'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(\n self,\n cocoGt=None,\n cocoDt=None,\n iouType: str = \"densepose\",\n dpEvalMode: DensePoseEvalMode = DensePoseEvalMode.GPS,\n dpDataMode: DensePoseDataMode = DensePoseDataMode.IUV_DT,\n ):\n \"\"\"\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n \"\"\"\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self._dpEvalMode = dpEvalMode\n self._dpDataMode = dpDataMode\n self.params = {} # evaluation parameters\n self.evalImgs = defaultdict(list) # per-image per-category eval results [KxAxI]\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if cocoGt is not None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n self.ignoreThrBB = 0.7\n self.ignoreThrUV = 0.9\n\n def _loadGEval(self):\n smpl_subdiv_fpath = PathManager.get_local_path(\n \"https://dl.fbaipublicfiles.com/densepose/data/SMPL_subdiv.mat\"\n )\n pdist_transform_fpath = PathManager.get_local_path(\n \"https://dl.fbaipublicfiles.com/densepose/data/SMPL_SUBDIV_TRANSFORM.mat\"\n )\n pdist_matrix_fpath = PathManager.get_local_path(\n \"https://dl.fbaipublicfiles.com/densepose/data/Pdist_matrix.pkl\", timeout_sec=120\n )\n SMPL_subdiv = loadmat(smpl_subdiv_fpath)\n self.PDIST_transform = loadmat(pdist_transform_fpath)\n self.PDIST_transform = self.PDIST_transform[\"index\"].squeeze()\n UV = np.array([SMPL_subdiv[\"U_subdiv\"], SMPL_subdiv[\"V_subdiv\"]]).squeeze()\n ClosestVertInds = np.arange(UV.shape[1]) + 1\n self.Part_UVs = []\n self.Part_ClosestVertInds = []\n for i in np.arange(24):\n self.Part_UVs.append(UV[:, SMPL_subdiv[\"Part_ID_subdiv\"].squeeze() == (i + 1)])\n self.Part_ClosestVertInds.append(\n ClosestVertInds[SMPL_subdiv[\"Part_ID_subdiv\"].squeeze() == (i + 1)]\n )\n\n with open(pdist_matrix_fpath, \"rb\") as hFile:\n arrays = pickle.load(hFile, encoding=\"latin1\")\n self.Pdist_matrix = arrays[\"Pdist_matrix\"]\n self.Part_ids = np.array(SMPL_subdiv[\"Part_ID_subdiv\"].squeeze())\n # Mean geodesic distances for parts.\n self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150])\n # Coarse Part labels.\n self.CoarseParts = np.array(\n [0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8]\n )\n\n def _prepare(self):\n \"\"\"\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n \"\"\"\n\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n # safeguard for invalid segmentation annotation;\n # annotations containing empty lists exist in the posetrack\n # dataset. This is not a correct segmentation annotation\n # in terms of COCO format; we need to deal with it somehow\n segm = ann[\"segmentation\"]\n if type(segm) == list and len(segm) == 0:\n ann[\"segmentation\"] = None\n continue\n rle = coco.annToRLE(ann)\n ann[\"segmentation\"] = rle\n\n def _getIgnoreRegion(iid, coco):\n img = coco.imgs[iid]\n\n if \"ignore_regions_x\" not in img.keys():\n return None\n\n if len(img[\"ignore_regions_x\"]) == 0:\n return None\n\n rgns_merged = [\n [v for xy in zip(region_x, region_y) for v in xy]\n for region_x, region_y in zip(img[\"ignore_regions_x\"], img[\"ignore_regions_y\"])\n ]\n rles = maskUtils.frPyObjects(rgns_merged, img[\"height\"], img[\"width\"])\n rle = maskUtils.merge(rles)\n return maskUtils.decode(rle)\n\n def _checkIgnore(dt, iregion):\n if iregion is None:\n return True\n\n bb = np.array(dt[\"bbox\"]).astype(np.int)\n x1, y1, x2, y2 = bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3]\n x2 = min([x2, iregion.shape[1]])\n y2 = min([y2, iregion.shape[0]])\n\n if bb[2] * bb[3] == 0:\n return False\n\n crop_iregion = iregion[y1:y2, x1:x2]\n\n if crop_iregion.sum() == 0:\n return True\n\n if \"densepose\" not in dt.keys(): # filtering boxes\n return crop_iregion.sum() / bb[2] / bb[3] < self.ignoreThrBB\n\n # filtering UVs\n ignoremask = np.require(crop_iregion, requirements=[\"F\"])\n mask = self._extract_mask(dt)\n uvmask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=[\"F\"])\n uvmask_ = maskUtils.encode(uvmask)\n ignoremask_ = maskUtils.encode(ignoremask)\n uviou = maskUtils.iou([uvmask_], [ignoremask_], [1])[0]\n return uviou < self.ignoreThrUV\n\n p = self.params\n\n if p.useCats:\n gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n imns = self.cocoGt.loadImgs(p.imgIds)\n self.size_mapping = {}\n for im in imns:\n self.size_mapping[im[\"id\"]] = [im[\"height\"], im[\"width\"]]\n\n # if iouType == 'uv', add point gt annotations\n if p.iouType == \"densepose\":\n self._loadGEval()\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == \"segm\":\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n\n # set ignore flag\n for gt in gts:\n gt[\"ignore\"] = gt[\"ignore\"] if \"ignore\" in gt else 0\n gt[\"ignore\"] = \"iscrowd\" in gt and gt[\"iscrowd\"]\n if p.iouType == \"keypoints\":\n gt[\"ignore\"] = (gt[\"num_keypoints\"] == 0) or gt[\"ignore\"]\n if p.iouType == \"densepose\":\n gt[\"ignore\"] = (\"dp_x\" in gt) == 0\n if p.iouType == \"segm\":\n gt[\"ignore\"] = gt[\"segmentation\"] is None\n\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self._igrgns = defaultdict(list)\n\n for gt in gts:\n iid = gt[\"image_id\"]\n if iid not in self._igrgns.keys():\n self._igrgns[iid] = _getIgnoreRegion(iid, self.cocoGt)\n if _checkIgnore(gt, self._igrgns[iid]):\n self._gts[iid, gt[\"category_id\"]].append(gt)\n for dt in dts:\n iid = dt[\"image_id\"]\n if (iid not in self._igrgns) or _checkIgnore(dt, self._igrgns[iid]):\n self._dts[iid, dt[\"category_id\"]].append(dt)\n\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n \"\"\"\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n \"\"\"\n tic = time.time()\n logger.info(\"Running per image DensePose evaluation... {}\".format(self.params.iouType))\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if p.useSegm is not None:\n p.iouType = \"segm\" if p.useSegm == 1 else \"bbox\"\n logger.info(\"useSegm (deprecated) is not None. Running DensePose evaluation\")\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params = p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType in [\"segm\", \"bbox\"]:\n computeIoU = self.computeIoU\n elif p.iouType == \"keypoints\":\n computeIoU = self.computeOks\n elif p.iouType == \"densepose\":\n computeIoU = self.computeOgps\n if self._dpEvalMode == DensePoseEvalMode.GPSM:\n self.real_ious = {\n (imgId, catId): self.computeDPIoU(imgId, catId)\n for imgId in p.imgIds\n for catId in catIds\n }\n # pdb.set_trace()\n self.ious = {\n (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds\n }\n\n evaluateImg = self.evaluateImg\n maxDet = p.maxDets[-1]\n self.evalImgs = [\n evaluateImg(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n logger.info(\"DensePose evaluation DONE (t={:0.2f}s).\".format(toc - tic))\n\n def getDensePoseMask(self, polys):\n maskGen = np.zeros([256, 256])\n stop = min(len(polys) + 1, 15)\n for i in range(1, stop):\n if polys[i - 1]:\n currentMask = maskUtils.decode(polys[i - 1])\n maskGen[currentMask > 0] = i\n return maskGen\n\n def _generate_rlemask_on_image(self, mask, imgId, data):\n bbox_xywh = np.array(data[\"bbox\"])\n x, y, w, h = bbox_xywh\n im_h, im_w = self.size_mapping[imgId]\n im_mask = np.zeros((im_h, im_w), dtype=np.uint8)\n if mask is not None:\n x0 = max(int(x), 0)\n x1 = min(int(x + w), im_w, int(x) + mask.shape[1])\n y0 = max(int(y), 0)\n y1 = min(int(y + h), im_h, int(y) + mask.shape[0])\n y = int(y)\n x = int(x)\n im_mask[y0:y1, x0:x1] = mask[y0 - y : y1 - y, x0 - x : x1 - x]\n im_mask = np.require(np.asarray(im_mask > 0), dtype=np.uint8, requirements=[\"F\"])\n rle_mask = maskUtils.encode(np.array(im_mask[:, :, np.newaxis], order=\"F\"))[0]\n return rle_mask\n\n def computeDPIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId, catId]\n dt = self._dts[imgId, catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]\n if len(gt) == 0 and len(dt) == 0:\n return []\n inds = np.argsort([-d[\"score\"] for d in dt], kind=\"mergesort\")\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt = dt[0 : p.maxDets[-1]]\n\n gtmasks = []\n for g in gt:\n if DensePoseDataRelative.S_KEY in g:\n # convert DensePose mask to a binary mask\n mask = np.minimum(self.getDensePoseMask(g[DensePoseDataRelative.S_KEY]), 1.0)\n _, _, w, h = g[\"bbox\"]\n scale_x = float(max(w, 1)) / mask.shape[1]\n scale_y = float(max(h, 1)) / mask.shape[0]\n mask = spzoom(mask, (scale_y, scale_x), order=1, prefilter=False)\n mask = np.array(mask > 0.5, dtype=np.uint8)\n rle_mask = self._generate_rlemask_on_image(mask, imgId, g)\n elif \"segmentation\" in g:\n segmentation = g[\"segmentation\"]\n if isinstance(segmentation, list) and segmentation:\n # polygons\n im_h, im_w = self.size_mapping[imgId]\n rles = maskUtils.frPyObjects(segmentation, im_h, im_w)\n rle_mask = maskUtils.merge(rles)\n elif isinstance(segmentation, dict):\n if isinstance(segmentation[\"counts\"], list):\n # uncompressed RLE\n im_h, im_w = self.size_mapping[imgId]\n rle_mask = maskUtils.frPyObjects(segmentation, im_h, im_w)\n else:\n # compressed RLE\n rle_mask = segmentation\n else:\n rle_mask = self._generate_rlemask_on_image(None, imgId, g)\n else:\n rle_mask = self._generate_rlemask_on_image(None, imgId, g)\n gtmasks.append(rle_mask)\n\n dtmasks = []\n for d in dt:\n mask = self._extract_mask(d)\n mask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=[\"F\"])\n rle_mask = self._generate_rlemask_on_image(mask, imgId, d)\n dtmasks.append(rle_mask)\n\n # # Debug: COCO RLE --> tensor\n # pdb.set_trace()\n # import pycocotools.mask as mask_util\n # import imageio\n # for ii in range(len(gtmasks)):\n # imageio.imwrite(\"tmp/gtmasks_{}.png\".format(ii), mask_util.decode(gtmasks[ii]).astype(np.float))\n # for ii in range(len(dtmasks)):\n # imageio.imwrite(\"tmp/dtmasks_{}.png\".format(ii), mask_util.decode(dtmasks[ii]).astype(np.float))\n\n # compute iou between each dt and gt region\n iscrowd = [int(o.get(\"iscrowd\", 0)) for o in gt]\n iousDP = maskUtils.iou(dtmasks, gtmasks, iscrowd)\n return iousDP\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId, catId]\n dt = self._dts[imgId, catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]\n if len(gt) == 0 and len(dt) == 0:\n return []\n inds = np.argsort([-d[\"score\"] for d in dt], kind=\"mergesort\")\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt = dt[0 : p.maxDets[-1]]\n\n if p.iouType == \"segm\":\n g = [g[\"segmentation\"] for g in gt if g[\"segmentation\"] is not None]\n d = [d[\"segmentation\"] for d in dt if d[\"segmentation\"] is not None]\n elif p.iouType == \"bbox\":\n g = [g[\"bbox\"] for g in gt]\n d = [d[\"bbox\"] for d in dt]\n else:\n raise Exception(\"unknown iouType for iou computation\")\n\n # compute iou between each dt and gt region\n iscrowd = [int(o.get(\"iscrowd\", 0)) for o in gt]\n ious = maskUtils.iou(d, g, iscrowd)\n\n # # Debug: COCO RLE --> tensor\n # import pycocotools.mask as mask_util\n # import imageio\n # for ii in range(len(g)):\n # imageio.imwrite(\"tmp/g_mask_{}.png\".format(ii), mask_util.decode(g[ii]).astype(np.float))\n # for ii in range(len(d)):\n # imageio.imwrite(\"tmp/d_mask_{}.png\".format(ii), mask_util.decode(d[ii]).astype(np.float))\n # pdb.set_trace()\n\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimension here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d[\"score\"] for d in dts], kind=\"mergesort\")\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0 : p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = (\n np.array(\n [\n 0.26,\n 0.25,\n 0.25,\n 0.35,\n 0.35,\n 0.79,\n 0.79,\n 0.72,\n 0.72,\n 0.62,\n 0.62,\n 1.07,\n 1.07,\n 0.87,\n 0.87,\n 0.89,\n 0.89,\n ]\n )\n / 10.0\n )\n vars = (sigmas * 2) ** 2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt[\"keypoints\"])\n xg = g[0::3]\n yg = g[1::3]\n vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt[\"bbox\"]\n x0 = bb[0] - bb[2]\n x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]\n y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt[\"keypoints\"])\n xd = d[0::3]\n yd = d[1::3]\n if k1 > 0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros(k)\n dx = np.max((z, x0 - xd), axis=0) + np.max((z, xd - x1), axis=0)\n dy = np.max((z, y0 - yd), axis=0) + np.max((z, yd - y1), axis=0)\n e = (dx ** 2 + dy ** 2) / vars / (gt[\"area\"] + np.spacing(1)) / 2\n if k1 > 0:\n e = e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def _extract_mask(self, dt: Dict[str, Any]) -> np.ndarray:\n if \"densepose\" in dt:\n densepose_results_quantized = dt[\"densepose\"]\n return densepose_results_quantized.labels_uv_uint8[0].numpy()\n elif \"cse_mask\" in dt:\n return dt[\"cse_mask\"]\n\n def _extract_iuv(\n self, densepose_data: np.ndarray, py: np.ndarray, px: np.ndarray, gt: Dict[str, Any]\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Extract arrays of I, U and V values at given points as numpy arrays\n given the data mode stored in self._dpDataMode\n \"\"\"\n if self._dpDataMode == DensePoseDataMode.IUV_DT:\n # estimated labels and UV (default)\n ipoints = densepose_data[0, py, px]\n upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255.\n vpoints = densepose_data[2, py, px] / 255.0\n elif self._dpDataMode == DensePoseDataMode.IUV_GT:\n # ground truth\n ipoints = np.array(gt[\"dp_I\"])\n upoints = np.array(gt[\"dp_U\"])\n vpoints = np.array(gt[\"dp_V\"])\n elif self._dpDataMode == DensePoseDataMode.I_GT_UV_0:\n # ground truth labels, UV = 0\n ipoints = np.array(gt[\"dp_I\"])\n upoints = upoints * 0.0\n vpoints = vpoints * 0.0\n elif self._dpDataMode == DensePoseDataMode.I_GT_UV_DT:\n # ground truth labels, estimated UV\n ipoints = np.array(gt[\"dp_I\"])\n upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255.\n vpoints = densepose_data[2, py, px] / 255.0\n elif self._dpDataMode == DensePoseDataMode.I_DT_UV_0:\n # estimated labels, UV = 0\n ipoints = densepose_data[0, py, px]\n upoints = upoints * 0.0\n vpoints = vpoints * 0.0\n else:\n raise ValueError(f\"Unknown data mode: {self._dpDataMode}\")\n return ipoints, upoints, vpoints\n\n def computeOgps(self, imgId, catId):\n p = self.params\n # dimension here should be Nxm\n g = self._gts[imgId, catId]\n d = self._dts[imgId, catId]\n inds = np.argsort([-d_[\"score\"] for d_ in d], kind=\"mergesort\")\n d = [d[i] for i in inds]\n if len(d) > p.maxDets[-1]:\n d = d[0 : p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(g) == 0 or len(d) == 0:\n return []\n ious = np.zeros((len(d), len(g)))\n # compute opgs between each detection and ground truth object\n # sigma = self.sigma #0.255 # dist = 0.3m corresponds to ogps = 0.5\n # 1 # dist = 0.3m corresponds to ogps = 0.96\n # 1.45 # dist = 1.7m (person height) corresponds to ogps = 0.5)\n for j, gt in enumerate(g):\n if not gt[\"ignore\"]:\n g_ = gt[\"bbox\"]\n for i, dt in enumerate(d):\n #\n dy = int(dt[\"bbox\"][3])\n dx = int(dt[\"bbox\"][2])\n dp_x = np.array(gt[\"dp_x\"]) * g_[2] / 255.0\n dp_y = np.array(gt[\"dp_y\"]) * g_[3] / 255.0\n py = (dp_y + g_[1] - dt[\"bbox\"][1]).astype(np.int)\n px = (dp_x + g_[0] - dt[\"bbox\"][0]).astype(np.int)\n #\n pts = np.zeros(len(px))\n pts[px >= dx] = -1\n pts[py >= dy] = -1\n pts[px < 0] = -1\n pts[py < 0] = -1\n if len(pts) < 1:\n ogps = 0.0\n elif np.max(pts) == -1:\n ogps = 0.0\n else:\n px[pts == -1] = 0\n py[pts == -1] = 0\n # Find closest vertices in subsampled mesh.\n if \"densepose\" in dt:\n cVertsGT, ClosestVertsGTTransformed = self.findAllClosestVertsGT(gt)\n densepose_results_quantized = dt[\"densepose\"]\n ipoints, upoints, vpoints = self._extract_iuv(\n densepose_results_quantized.labels_uv_uint8.numpy(), py, px, gt\n )\n ipoints[pts == -1] = 0\n cVerts = self.findAllClosestVertsUV(upoints, vpoints, ipoints)\n # Get pairwise geodesic distances between gt and estimated mesh points.\n dist = self.getDistancesUV(ClosestVertsGTTransformed, cVerts)\n # Compute the Ogps measure.\n # Find the mean geodesic normalization distance for\n # each GT point, based on which part it is on.\n Current_Mean_Distances = self.Mean_Distances[\n self.CoarseParts[\n self.Part_ids[cVertsGT[cVertsGT > 0].astype(int) - 1]\n ]\n ]\n elif \"cse_indices\" in dt:\n cVertsGT = np.array(gt[\"dp_vertex\"])\n cse_mask, cse_indices = dt[\"cse_mask\"], dt[\"cse_indices\"]\n cVerts = self.findAllClosestVertsCSE(\n cse_indices[py, px],\n cse_mask[py, px],\n )\n # Get pairwise geodesic distances between gt and estimated mesh points.\n dist = self.getDistancesCSE(cVertsGT, cVerts, gt[\"ref_model\"])\n Current_Mean_Distances = self.Mean_Distances[\n self.CoarseParts[np.array(gt[\"dp_I\"], dtype=int)]\n ]\n # Compute gps\n ogps_values = np.exp(-(dist ** 2) / (2 * (Current_Mean_Distances ** 2)))\n #\n if len(dist) > 0:\n ogps = np.sum(ogps_values) / len(dist)\n ious[i, j] = ogps\n\n gbb = [gt[\"bbox\"] for gt in g]\n dbb = [dt[\"bbox\"] for dt in d]\n\n # compute iou between each dt and gt region\n iscrowd = [int(o.get(\"iscrowd\", 0)) for o in g]\n ious_bb = maskUtils.iou(dbb, gbb, iscrowd)\n return ious, ious_bb\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n \"\"\"\n perform evaluation for single category and image\n :return: dict (single image results)\n \"\"\"\n\n p = self.params\n if p.useCats:\n gt = self._gts[imgId, catId]\n dt = self._dts[imgId, catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]\n if len(gt) == 0 and len(dt) == 0:\n return None\n\n for g in gt:\n # g['_ignore'] = g['ignore']\n if g[\"ignore\"] or (g[\"area\"] < aRng[0] or g[\"area\"] > aRng[1]):\n g[\"_ignore\"] = True\n else:\n g[\"_ignore\"] = False\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g[\"_ignore\"] for g in gt], kind=\"mergesort\")\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d[\"score\"] for d in dt], kind=\"mergesort\")\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o.get(\"iscrowd\", 0)) for o in gt]\n # load computed ious\n if p.iouType == \"densepose\":\n # print('Checking the length', len(self.ious[imgId, catId]))\n # if len(self.ious[imgId, catId]) == 0:\n # print(self.ious[imgId, catId])\n ious = (\n self.ious[imgId, catId][0][:, gtind]\n if len(self.ious[imgId, catId]) > 0\n else self.ious[imgId, catId]\n )\n ioubs = (\n self.ious[imgId, catId][1][:, gtind]\n if len(self.ious[imgId, catId]) > 0\n else self.ious[imgId, catId]\n )\n if self._dpEvalMode == DensePoseEvalMode.GPSM:\n iousM = (\n self.real_ious[imgId, catId][:, gtind]\n if len(self.real_ious[imgId, catId]) > 0\n else self.real_ious[imgId, catId]\n )\n else:\n ious = (\n self.ious[imgId, catId][:, gtind]\n if len(self.ious[imgId, catId]) > 0\n else self.ious[imgId, catId]\n )\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T, G))\n dtm = np.zeros((T, D))\n gtIg = np.array([g[\"_ignore\"] for g in gt])\n dtIg = np.zeros((T, D))\n if np.all(gtIg) and p.iouType == \"densepose\":\n dtIg = np.logical_or(dtIg, True)\n\n if len(ious) > 0: # and not p.iouType == 'densepose':\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t, 1 - 1e-10])\n m = -1\n for gind, _g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind, gind] > 0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:\n break\n if p.iouType == \"densepose\":\n if self._dpEvalMode == DensePoseEvalMode.GPSM:\n new_iou = np.sqrt(iousM[dind, gind] * ious[dind, gind])\n elif self._dpEvalMode == DensePoseEvalMode.IOU:\n new_iou = iousM[dind, gind]\n elif self._dpEvalMode == DensePoseEvalMode.GPS:\n new_iou = ious[dind, gind]\n else:\n new_iou = ious[dind, gind]\n if new_iou < iou:\n continue\n if new_iou == 0.0:\n continue\n # if match successful and best so far, store appropriately\n iou = new_iou\n m = gind\n # if match made store id of match for both dt and gt\n if m == -1:\n continue\n dtIg[tind, dind] = gtIg[m]\n dtm[tind, dind] = gt[m][\"id\"]\n gtm[tind, m] = d[\"id\"]\n\n if p.iouType == \"densepose\":\n if not len(ioubs) == 0:\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n if dtm[tind, dind] == 0:\n ioub = 0.8\n m = -1\n for gind, _g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind, gind] > 0 and not iscrowd[gind]:\n continue\n # continue to next gt unless better match made\n if ioubs[dind, gind] < ioub:\n continue\n # if match successful and best so far, store appropriately\n ioub = ioubs[dind, gind]\n m = gind\n # if match made store id of match for both dt and gt\n if m > -1:\n dtIg[:, dind] = gtIg[m]\n if gtIg[m]:\n dtm[tind, dind] = gt[m][\"id\"]\n gtm[tind, m] = d[\"id\"]\n # set unmatched detections outside of area range to ignore\n a = np.array([d[\"area\"] < aRng[0] or d[\"area\"] > aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0)))\n # store results for given image and category\n # print('Done with the function', len(self.ious[imgId, catId]))\n return {\n \"image_id\": imgId,\n \"category_id\": catId,\n \"aRng\": aRng,\n \"maxDet\": maxDet,\n \"dtIds\": [d[\"id\"] for d in dt],\n \"gtIds\": [g[\"id\"] for g in gt],\n \"dtMatches\": dtm,\n \"gtMatches\": gtm,\n \"dtScores\": [d[\"score\"] for d in dt],\n \"gtIgnore\": gtIg,\n \"dtIgnore\": dtIg,\n }\n\n def accumulate(self, p=None):\n \"\"\"\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n \"\"\"\n logger.info(\"Accumulating evaluation results...\")\n tic = time.time()\n if not self.evalImgs:\n logger.info(\"Please run evaluate() first\")\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -(np.ones((T, R, K, A, M))) # -1 for the precision of absent categories\n recall = -(np.ones((T, K, A, M)))\n\n # create dictionary for future indexing\n logger.info(\"Categories: {}\".format(p.catIds))\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0 * A0 * I0\n for a, a0 in enumerate(a_list):\n Na = a0 * I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if e is not None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e[\"dtScores\"][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind=\"mergesort\")\n\n dtm = np.concatenate([e[\"dtMatches\"][:, 0:maxDet] for e in E], axis=1)[:, inds]\n dtIg = np.concatenate([e[\"dtIgnore\"][:, 0:maxDet] for e in E], axis=1)[:, inds]\n gtIg = np.concatenate([e[\"gtIgnore\"] for e in E])\n npig = np.count_nonzero(gtIg == 0)\n if npig == 0:\n continue\n tps = np.logical_and(dtm, np.logical_not(dtIg))\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp + tp + np.spacing(1))\n q = np.zeros((R,))\n\n if nd:\n recall[t, k, a, m] = rc[-1]\n else:\n recall[t, k, a, m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist()\n q = q.tolist()\n\n for i in range(nd - 1, 0, -1):\n if pr[i] > pr[i - 1]:\n pr[i - 1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side=\"left\")\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n except Exception:\n pass\n precision[t, :, k, a, m] = np.array(q)\n logger.info(\n \"Final: max precision {}, min precision {}\".format(np.max(precision), np.min(precision))\n )\n self.eval = {\n \"params\": p,\n \"counts\": [T, R, K, A, M],\n \"date\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"precision\": precision,\n \"recall\": recall,\n }\n toc = time.time()\n logger.info(\"DONE (t={:0.2f}s).\".format(toc - tic))\n\n def summarize(self):\n \"\"\"\n Compute and display summary metrics for evaluation results.\n Note this function can *only* be applied on the default parameter setting\n \"\"\"\n\n def _summarize(ap=1, iouThr=None, areaRng=\"all\", maxDets=100):\n p = self.params\n iStr = \" {:<18} {} @[ {}={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}\"\n titleStr = \"Average Precision\" if ap == 1 else \"Average Recall\"\n typeStr = \"(AP)\" if ap == 1 else \"(AR)\"\n measure = \"IoU\"\n if self.params.iouType == \"keypoints\":\n measure = \"OKS\"\n elif self.params.iouType == \"densepose\":\n measure = \"OGPS\"\n iouStr = (\n \"{:0.2f}:{:0.2f}\".format(p.iouThrs[0], p.iouThrs[-1])\n if iouThr is None\n else \"{:0.2f}\".format(iouThr)\n )\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval[\"precision\"]\n # IoU\n if iouThr is not None:\n t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0]\n s = s[t]\n s = s[:, :, :, aind, mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval[\"recall\"]\n if iouThr is not None:\n t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0]\n s = s[t]\n s = s[:, :, aind, mind]\n if len(s[s > -1]) == 0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s > -1])\n logger.info(iStr.format(titleStr, typeStr, measure, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng=\"small\", maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng=\"medium\", maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng=\"large\", maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng=\"small\", maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng=\"medium\", maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng=\"large\", maxDets=self.params.maxDets[2])\n return stats\n\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=0.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=0.75)\n stats[3] = _summarize(1, maxDets=20, areaRng=\"medium\")\n stats[4] = _summarize(1, maxDets=20, areaRng=\"large\")\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=0.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=0.75)\n stats[8] = _summarize(0, maxDets=20, areaRng=\"medium\")\n stats[9] = _summarize(0, maxDets=20, areaRng=\"large\")\n return stats\n\n def _summarizeUvs():\n stats = [_summarize(1, maxDets=self.params.maxDets[0])]\n min_threshold = self.params.iouThrs.min()\n if min_threshold <= 0.201:\n stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.2)]\n if min_threshold <= 0.301:\n stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.3)]\n if min_threshold <= 0.401:\n stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.4)]\n stats += [\n _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5),\n _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75),\n _summarize(1, maxDets=self.params.maxDets[0], areaRng=\"medium\"),\n _summarize(1, maxDets=self.params.maxDets[0], areaRng=\"large\"),\n _summarize(0, maxDets=self.params.maxDets[0]),\n _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5),\n _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75),\n _summarize(0, maxDets=self.params.maxDets[0], areaRng=\"medium\"),\n _summarize(0, maxDets=self.params.maxDets[0], areaRng=\"large\"),\n ]\n return np.array(stats)\n\n def _summarizeUvsOld():\n stats = np.zeros((18,))\n stats[0] = _summarize(1, maxDets=self.params.maxDets[0])\n stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5)\n stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.55)\n stats[3] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.60)\n stats[4] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.65)\n stats[5] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.70)\n stats[6] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75)\n stats[7] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.80)\n stats[8] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.85)\n stats[9] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.90)\n stats[10] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.95)\n stats[11] = _summarize(1, maxDets=self.params.maxDets[0], areaRng=\"medium\")\n stats[12] = _summarize(1, maxDets=self.params.maxDets[0], areaRng=\"large\")\n stats[13] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[14] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5)\n stats[15] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75)\n stats[16] = _summarize(0, maxDets=self.params.maxDets[0], areaRng=\"medium\")\n stats[17] = _summarize(0, maxDets=self.params.maxDets[0], areaRng=\"large\")\n return stats\n\n if not self.eval:\n raise Exception(\"Please run accumulate() first\")\n iouType = self.params.iouType\n if iouType in [\"segm\", \"bbox\"]:\n summarize = _summarizeDets\n elif iouType in [\"keypoints\"]:\n summarize = _summarizeKps\n elif iouType in [\"densepose\"]:\n summarize = _summarizeUvs\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()\n\n # ================ functions for dense pose ==============================\n def findAllClosestVertsUV(self, U_points, V_points, Index_points):\n ClosestVerts = np.ones(Index_points.shape) * -1\n for i in np.arange(24):\n #\n if (i + 1) in Index_points:\n UVs = np.array(\n [U_points[Index_points == (i + 1)], V_points[Index_points == (i + 1)]]\n )\n Current_Part_UVs = self.Part_UVs[i]\n Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]\n D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze()\n ClosestVerts[Index_points == (i + 1)] = Current_Part_ClosestVertInds[\n np.argmin(D, axis=0)\n ]\n ClosestVertsTransformed = self.PDIST_transform[ClosestVerts.astype(int) - 1]\n ClosestVertsTransformed[ClosestVerts < 0] = 0\n return ClosestVertsTransformed\n\n def findAllClosestVertsCSE(self, cse_indices, mask):\n ClosestVerts = np.ones(cse_indices.shape) * -1\n ClosestVerts[mask == 1] = cse_indices[mask == 1]\n return ClosestVerts\n\n def findAllClosestVertsGT(self, gt):\n #\n I_gt = np.array(gt[\"dp_I\"])\n U_gt = np.array(gt[\"dp_U\"])\n V_gt = np.array(gt[\"dp_V\"])\n #\n # print(I_gt)\n #\n ClosestVertsGT = np.ones(I_gt.shape) * -1\n for i in np.arange(24):\n if (i + 1) in I_gt:\n UVs = np.array([U_gt[I_gt == (i + 1)], V_gt[I_gt == (i + 1)]])\n Current_Part_UVs = self.Part_UVs[i]\n Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]\n D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze()\n ClosestVertsGT[I_gt == (i + 1)] = Current_Part_ClosestVertInds[np.argmin(D, axis=0)]\n #\n ClosestVertsGTTransformed = self.PDIST_transform[ClosestVertsGT.astype(int) - 1]\n ClosestVertsGTTransformed[ClosestVertsGT < 0] = 0\n return ClosestVertsGT, ClosestVertsGTTransformed\n\n def getDistancesCSE(self, cVertsGT, cVerts, mesh_name):\n geodists_vertices = np.ones(len(cVertsGT)) * np.inf\n mask = (cVertsGT >= 0) * (cVerts >= 0)\n mesh = create_mesh(mesh_name, \"cpu\")\n geodists_vertices[mask] = mesh.geodists[cVertsGT[mask], cVerts[mask]]\n return geodists_vertices\n\n def getDistancesUV(self, cVertsGT, cVerts):\n #\n n = 27554\n dists = []\n for d in range(len(cVertsGT)):\n if cVertsGT[d] > 0:\n if cVerts[d] > 0:\n i = cVertsGT[d] - 1\n j = cVerts[d] - 1\n if j == i:\n dists.append(0)\n elif j > i:\n ccc = i\n i = j\n j = ccc\n i = n - i - 1\n j = n - j - 1\n k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1\n k = (n * n - n) / 2 - k - 1\n dists.append(self.Pdist_matrix[int(k)][0])\n else:\n i = n - i - 1\n j = n - j - 1\n k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1\n k = (n * n - n) / 2 - k - 1\n dists.append(self.Pdist_matrix[int(k)][0])\n else:\n dists.append(np.inf)\n return np.atleast_1d(np.array(dists).squeeze())\n\n\nclass Params:\n \"\"\"\n Params for coco evaluation api\n \"\"\"\n\n def setDetParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True)\n self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)\n self.maxDets = [1, 10, 100]\n self.areaRng = [\n [0 ** 2, 1e5 ** 2],\n [0 ** 2, 32 ** 2],\n [32 ** 2, 96 ** 2],\n [96 ** 2, 1e5 ** 2],\n ]\n self.areaRngLbl = [\"all\", \"small\", \"medium\", \"large\"]\n self.useCats = 1\n\n def setKpParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True)\n self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True)\n self.maxDets = [20]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = [\"all\", \"medium\", \"large\"]\n self.useCats = 1\n\n def setUvParams(self):\n self.imgIds = []\n self.catIds = []\n self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True)\n self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)\n self.maxDets = [20]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = [\"all\", \"medium\", \"large\"]\n self.useCats = 1\n\n def __init__(self, iouType=\"segm\"):\n if iouType == \"segm\" or iouType == \"bbox\":\n self.setDetParams()\n elif iouType == \"keypoints\":\n self.setKpParams()\n elif iouType == \"densepose\":\n self.setUvParams()\n else:\n raise Exception(\"iouType not supported\")\n self.iouType = iouType\n # useSegm is deprecated\n self.useSegm = None\n",
"from typing import Dict\nimport math\n\nimport torch\nfrom torch import nn\n\nfrom fvcore.nn import sigmoid_focal_loss_jit\nfrom detectron2.layers import ShapeSpec\n\n# from adet.layers import conv_with_kaiming_uniform\n# from adet.utils.comm import aligned_bilinear\nfrom densepose.layers import conv_with_kaiming_uniform\nfrom densepose.utils.comm import compute_locations, aligned_bilinear\n# from .. import (\n# build_densepose_data_filter,\n# build_densepose_head,\n# build_densepose_losses,\n# build_densepose_predictor,\n# densepose_inference,\n# )\nfrom lambda_networks import LambdaLayer\nfrom .iuv_head import get_embedder\nimport pdb\n\nINF = 100000000\n\ndef build_iuv_multiscale_head(cfg):\n return CoordGlobalIUVMultiscaleHead(cfg)\n\n## Inspired by HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation\nclass CoordGlobalIUVMultiscaleHead(nn.Module):\n def __init__(self, cfg, use_rel_coords=True):\n super().__init__()\n self.num_outputs = cfg.MODEL.CONDINST.IUVHead.OUT_CHANNELS\n norm = cfg.MODEL.CONDINST.IUVHead.NORM\n num_convs = cfg.MODEL.CONDINST.IUVHead.NUM_CONVS\n num_lambda_layer = cfg.MODEL.CONDINST.IUVHead.NUM_LAMBDA_LAYER\n assert num_lambda_layer<=num_convs\n channels = cfg.MODEL.CONDINST.IUVHead.CHANNELS\n self.norm_feat = cfg.MODEL.CONDINST.IUVHead.NORM_FEATURES\n soi = cfg.MODEL.FCOS.SIZES_OF_INTEREST\n self.register_buffer(\"sizes_of_interest\", torch.tensor(soi + [soi[-1] * 2]))\n self.iuv_out_stride = cfg.MODEL.CONDINST.MASK_OUT_STRIDE\n self.use_rel_coords = cfg.MODEL.CONDINST.IUVHead.REL_COORDS\n self.use_abs_coords = cfg.MODEL.CONDINST.IUVHead.ABS_COORDS\n # pdb.set_trace()\n # if self.use_rel_coords:\n # self.in_channels = channels + 2\n # else:\n self.pos_emb_num_freqs = cfg.MODEL.CONDINST.IUVHead.POSE_EMBEDDING_NUM_FREQS\n self.use_pos_emb = self.pos_emb_num_freqs>0\n if self.use_pos_emb:\n self.position_embedder, self.position_emb_dim = get_embedder(multires=self.pos_emb_num_freqs, input_dims=2)\n self.in_channels = channels + self.position_emb_dim\n else:\n self.in_channels = channels + 2\n\n if self.use_abs_coords:\n if self.use_pos_emb:\n self.in_channels += self.position_emb_dim\n else:\n self.in_channels += 2\n\n\n conv_block = conv_with_kaiming_uniform(norm, activation=True)\n\n tower = []\n if num_lambda_layer>0:\n layer = LambdaLayer(\n dim = self.in_channels,\n dim_out = channels,\n r = 23, # the receptive field for relative positional encoding (23 x 23)\n dim_k = 16,\n heads = 4,\n dim_u = 4\n )\n tower.append(layer)\n else:\n tower.append(conv_block(\n self.in_channels, channels, 3, 1\n ))\n\n for i in range(1,num_convs-1):\n if i<num_lambda_layer:\n layer = LambdaLayer(\n dim = channels,\n dim_out = channels,\n r = 23, # the receptive field for relative positional encoding (23 x 23)\n dim_k = 16,\n heads = 4,\n dim_u = 4\n )\n tower.append(layer)\n else:\n tower.append(conv_block(\n channels, channels, 3, 1\n ))\n\n self.add_module('tower', nn.Sequential(*tower))\n\n self.mid_res_conv = conv_block(channels, channels, 3, 1)\n self.mid_res_out = nn.Conv2d(channels, self.num_outputs, 1)\n\n self.low_res_conv = conv_block(channels, channels, 3, 2)\n self.low_res_out = nn.Conv2d(channels, self.num_outputs, 1)\n\n deconv_block = conv_with_kaiming_uniform(norm, activation=True, use_deconv=True)\n self.high_res_conv = deconv_block(channels, channels, 3, 2)\n self.high_res_out = nn.Conv2d(channels, self.num_outputs, 1)\n\n # tower.append(nn.Conv2d(\n # channels, max(self.num_outputs, 1), 1\n # ))\n\n # self.densepose_losses = build_densepose_losses(cfg)\n\n # def forward(self, s_logits, iuv_feats, iuv_feat_stride, relative_coords, instances):\n\n\n # N, _, H, W = iuv_feats.size()\n # rel_coord = torch.zeros([N,2,H,W], device=iuv_feats.device).to(dtype=iuv_feats.dtype)\n # abs_coord = torch.zeros([N,2,H,W], device=iuv_feats.device).to(dtype=iuv_feats.dtype)\n\n # if self.use_rel_coords: \n # # locations = compute_locations(\n # # iuv_feats.size(2), iuv_feats.size(3),\n # # stride=iuv_feat_stride, device=iuv_feats.device\n # # )\n # # # n_inst = len(instances)\n\n # im_inds = instances.im_inds\n\n\n # # instance_locations = instances.locations\n # # relative_coords = instance_locations.reshape(-1, 1, 2) - locations.reshape(1, -1, 2)\n # # relative_coords = relative_coords.permute(0, 2, 1).float()\n # # soi = self.sizes_of_interest.float()[instances.fpn_levels]\n # # relative_coords = relative_coords / soi.reshape(-1, 1, 1)\n # # relative_coords = relative_coords.to(dtype=iuv_feats.dtype)\n # # rel_coord_list = []\n # for idx in range(N):\n # if idx in im_inds:\n # cc = relative_coords[im_inds==idx,].reshape(-1, 2, H, W)\n # # assert s_logits.shape[1]==1\n # ss = s_logits[im_inds==idx,-1:]\n # # coord = torch.sum(cc*ss, dim=0, keepdim=True) \\\n # # / (torch.sum(ss, dim=0, keepdim=True)+1e-7)\n # coord = torch.mean(cc*ss, dim=0, keepdim=True) \n # rel_coord[idx:idx+1] = coord #.reshape(1, 2, H, W)\n # # pdb.set_trace()\n # # import imageio\n # # imageio.imwrite(\"tmp/cc.png\",cc[0,0].detach().cpu().numpy())\n # # imageio.imwrite(\"tmp/ss.png\",ss[0,0].detach().cpu().numpy())\n # # imageio.imwrite(\"tmp/cc_ss.png\",(cc*ss)[0,0].detach().cpu().numpy())\n # # imageio.imwrite(\"tmp/ss_sum.png\",torch.sum(ss, dim=0, keepdim=True)[0,0].detach().cpu().numpy())\n # # imageio.imwrite(\"tmp/coord_mean.png\",coord[0,0].detach().cpu().numpy())\n # # rel_coord_list.append(rel_coord)\n # # assert self.norm_feat\n # if self.norm_feat:\n # # iuv_feats = iuv_feats/iuv_feats.max()*2.0 - 1.0\n # iuv_feats = iuv_feats/20.0\n\n # if self.use_pos_emb:\n # rel_coord = self.position_embedder(rel_coord)\n\n\n # iuv_head_inputs = torch.cat([rel_coord, iuv_feats], dim=1) \n # # else:\n # # iuv_head_inputs = iuv_feats\n\n # if self.use_abs_coords: \n # abs_coord = compute_grid(H, W, device=iuv_feats.device)\n # iuv_head_inputs = torch.cat([abs_coord, iuv_head_inputs], dim=1) \n\n\n def forward(self, s_logits, iuv_feats, iuv_feat_stride, rel_coord, instances):\n N, _, H, W = iuv_feats.size()\n\n if self.use_rel_coords: \n if self.use_pos_emb:\n rel_coord = self.position_embedder(rel_coord)\n else:\n rel_coord = torch.zeros([N,2,H,W], device=iuv_feats.device).to(dtype=iuv_feats.dtype)\n iuv_head_inputs = torch.cat([rel_coord, iuv_feats], dim=1) \n\n if self.use_abs_coords: \n abs_coord = compute_grid(H, W, device=iuv_feats.device)[None,...].repeat(N,1,1,1)\n if self.use_pos_emb:\n abs_coord = self.position_embedder(abs_coord)\n else:\n abs_coord = torch.zeros([N,2,H,W], device=iuv_feats.device).to(dtype=iuv_feats.dtype)\n iuv_head_inputs = torch.cat([abs_coord, iuv_head_inputs], dim=1)\n\n iuv_logit_mid_res = self.tower(iuv_head_inputs)\n iuv_logit_mid_res = self.mid_res_conv(iuv_logit_mid_res)\n iuv_logit_mid_out = self.mid_res_out(iuv_logit_mid_res)\n\n iuv_logit_low_res = self.low_res_conv(iuv_logit_mid_res)\n iuv_logit_low_out = self.low_res_out(iuv_logit_low_res)\n\n iuv_logit_high_res = self.high_res_conv(iuv_logit_mid_res)\n iuv_logit_high_out = self.high_res_out(iuv_logit_high_res)\n\n # assert iuv_feat_stride >= self.iuv_out_stride\n # assert iuv_feat_stride % self.iuv_out_stride == 0\n # iuv_logit = aligned_bilinear(iuv_logit, int(iuv_feat_stride / self.iuv_out_stride))\n\n return [iuv_logit_high_out, iuv_logit_mid_out, iuv_logit_low_out]\n\n\n\n\n\n"
] | [
[
"torch.nn.Sequential",
"torch.mean",
"torch.zeros",
"torch.cat",
"torch.tensor"
],
[
"numpy.sqrt",
"numpy.asarray",
"numpy.cumsum",
"numpy.all",
"numpy.max",
"numpy.round",
"numpy.concatenate",
"numpy.mean",
"numpy.argmin",
"numpy.searchsorted",
"numpy.exp",
"numpy.unique",
"numpy.arange",
"scipy.ndimage.zoom",
"scipy.io.loadmat",
"numpy.count_nonzero",
"numpy.repeat",
"numpy.zeros",
"numpy.logical_not",
"numpy.spacing",
"numpy.min",
"numpy.logical_or",
"numpy.require",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.ones"
],
[
"torch.nn.Sequential",
"torch.zeros",
"torch.cat",
"torch.nn.Conv2d",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tpawlowski/image_analytics | [
"60445177a45c81a2c9c389b2f85f0d49d561c211"
] | [
"neuroscience/scidb/stream_mask.py"
] | [
"#!/usr/bin/python\n\n#\n#DFZ 11/15/2016: it's hard to control the chunk size read from the\n# stream() interface, see run_mri_stream.output for a concrete idea.\n#\n\n#the following import block is for testing only\nimport dipy.core.gradients as dpg\nimport os.path as op\nfrom dipy.segment.mask import median_otsu\nimport nibabel as nib\nfrom dipy.denoise import nlmeans\nimport dipy.core.gradients as dpg\nfrom dipy.denoise.noise_estimate import estimate_sigma\nimport time\nimport sys\nimport numpy as np\nimport os\nfrom __builtin__ import float\n\n#SciDB handler\n#from scidbpy import connect\n#sdb = connect('http://localhost:8080')\n\n\ntm_start = time.time()\nsys.stderr.write(\"\\n\\n=====> DFZ DEBUG 3/2/2017: \" + time.ctime() + \" OMG I start again! \\n\")\n\nSUB_ID = 101107\n\nend_of_interaction = 0\nwhile (end_of_interaction != 1):\n header = sys.stdin.readline().rstrip()\n #declare the local denoised array\n if(header != \"0\"):\n sys.stderr.write(\"=====> DFZ 2/24/2017: header = \" + header + \"\\n\")\n #We receive a message from the SciDB instance:\n num_lines = int(header) #how many lines did we get?\n sys.stderr.write(\"=====> DFZ 1/25/2017: num_lines = \"+str(num_lines)+\"\\n\")\n #n_vol = num_lines / 145 / 174 / 145\n #sys.stderr.write(\"=====> DFZ 1/25/2017: n_vol = \"+str(n_vol)+\"(should equal to 288/4 )\\n\")\n\n #Collect all lines into a list:\n input_lines = []\n for i in range(0, num_lines):\n line = sys.stdin.readline().rstrip()\n try:\n f = float(line)\n except:\n f = 0.0\n input_lines.append(f)\n\n#################################################\n############## MRI Logic ########################\n#################################################\n\n #construct the values into a numpy array for MRI\n nparray = np.asarray(input_lines, dtype=np.float32)\n# sys.stderr.write(\"=====> DFZ DEBUG: convertion completed.\\n\")\n sys.stderr.write(\"=====> DFZ DEBUG 2/16/2017: nparray.shape = \" + str(nparray.size) + \"; len(input_lines) = \" + str(len(input_lines)) +\"\\n\")\n mean_b0 = np.reshape(nparray, (145, 174, 145)) #last param should reflect the chunk size\n sys.stderr.write(\"=====> DFZ DEBUG: data loading completed.\\n\")\n\n #masking\n DATA_LOC = \"/home/ubuntu/mri_data/101107/\"\n gtab = dpg.gradient_table(DATA_LOC + 'bvals', DATA_LOC + 'bvecs', b0_threshold=10)\n mask = median_otsu(mean_b0, 4, 2, False, vol_idx=np.where(gtab.b0s_mask), dilate=1)\n sys.stderr.write(\"mask: \\n\")\n sys.stderr.write(str(mask)) #TODO: write it back to SciDB\n\n\n # if you need interative results:\n print(2)\n print(\"Total lines: \" + str(num_lines))\n print(\"I'm tired ----> First line: \" + str(input_lines[0]))\n sys.stdout.flush()\n#This will appear in the scidb-sterr.log file:\n sys.stderr.write(time.ctime() + \"I finished a chunk with \"+ str(num_lines) + \" lines of text!\\n\")\n\n else:\n\n #If we receive \"0\", it means the SciDB instance has no more\n #Data to give us. Here we have the option of also responding with \"0\"\n #Or sending some other message (i.e. a global sum):\n end_of_interaction = 1\n print(\"1\")\n# print(\"KTHXBYE\")\n print(\"KTHXBYE: subject \" + str(SUB_ID) + \" done in \" + str(time.time() - tm_start) + \" seconds\")\n sys.stdout.flush()\n\n#ok = 0\n# So I cannot 'return' or 'print' even after 'return'; the following statements would cause errors\n#exit(0)\n# print \"Start at \" + str(time.ctime())\n"
] | [
[
"numpy.asarray",
"numpy.where",
"numpy.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CyrilGarneau/COVID19-Model | [
"e8b7e459d0cfca580ded33fda05ebd6858e19c86"
] | [
"src/coronaHelper2.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 18 11:01:22 2020\n\n@author: twallema\nCopyright (c) 2020 by T.W. Alleman, BIOMATH, Ghent University. All Rights Reserved.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom random import choices\nimport scipy\nfrom scipy.integrate import odeint\nimport math\nimport models\nimport networkx\nfrom scipy import interpolate as inter\nfrom gekko import GEKKO\n\ndef sampleFromDistribution(filename,k):\n df = pd.read_csv(filename)\n x = df.iloc[:,0]\n y = df.iloc[:,1]\n return(np.asarray(choices(x, y, k = k)))\n\ndef runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospitalvect,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE,\n initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs):\n tN = simtime + 1\n if monteCarlo == True: \n n_samples = dcfvect.size\n S = np.zeros([tN,n_samples])\n E = np.zeros([tN,n_samples])\n SM = np.zeros([tN,n_samples])\n M = np.zeros([tN,n_samples])\n H = np.zeros([tN,n_samples])\n C = np.zeros([tN,n_samples])\n HH = np.zeros([tN,n_samples])\n CH = np.zeros([tN,n_samples])\n R = np.zeros([tN,n_samples])\n F = np.zeros([tN,n_samples])\n SQ = np.zeros([tN,n_samples])\n EQ = np.zeros([tN,n_samples])\n SMQ = np.zeros([tN,n_samples])\n MQ = np.zeros([tN,n_samples])\n RQ = np.zeros([tN,n_samples])\n i=0\n t = np.linspace(0,simtime,tN)\n for sigma in sigmavect:\n dcf = dcfvect[i]\n dcr = dcrvect[i]\n dhospital = dhospitalvect[i]\n sm = smvect[i]\n m = (1-sm)*0.81 \n h = (1-sm)*0.14 \n c = (1-sm)*0.05 \n # perform simulation\n if modelType == 'deterministic':\n if method == 'findInfected' or method == 'findTime' or method == 'none':\n model = models.SEIRSAgeModel(initN=initN,beta=beta,sigma=sigma,Nc=Nc,zeta=zeta,sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,\n totalTests=totalTests,psi_FP=psi_FP,psi_PP=psi_PP,dq=dq,\n initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,initMQ=initMQ,\n initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n elif method == 'findGovernmentResponse':\n extraTime = stoArgs['extraTime']\n measureTime = stoArgs['measureTime']\n initE = 1\n Nc0 = 11.2\n checkpoints = {\n 't': [measureTime+extraTime],\n 'Nc': [Nc]\n }\n model = models.SEIRSAgeModel(initN=initN,beta=beta,sigma=sigma,Nc=Nc0,zeta=zeta,sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,\n totalTests=totalTests,psi_FP=psi_FP,psi_PP=psi_PP,dq=dq,\n initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,initMQ=initMQ,\n initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n else:\n raise Exception('Suitable methods to run model are: none, findTime, findInfected, findGovernmentResponse. The provided method was: {}'.format(method))\n elif modelType == 'stochastic':\n if method == 'findInfected' or method == 'findTime' or method == 'none':\n model = models.SEIRSNetworkModel(G=stoArgs['G'],beta=beta,sigma=sigma,zeta=zeta,p=stoArgs['p'],sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,theta_S=theta_S,\n theta_E=theta_E,theta_SM=theta_SM,theta_M=theta_M,theta_R=theta_R,phi_S=phi_S,phi_E=phi_E,phi_SM=phi_SM,phi_R=phi_R,psi_FP=psi_FP,psi_PP=psi_PP,\n dq=dq,initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,\n initMQ=initMQ,initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n # output is not returned every single day, so the results must be interpolated\n x = y.tseries\n if x[-1] < simtime:\n x=np.append(x,simtime+1)\n y.numS=np.append(y.numS,y.numS[-1])\n y.numE=np.append(y.numE,y.numE[-1])\n y.numSM=np.append(y.numSM,y.numSM[-1])\n y.numM=np.append(y.numM,y.numM[-1])\n y.numH=np.append(y.numH,y.numH[-1])\n y.numC=np.append(y.numC,y.numC[-1])\n y.numHH=np.append(y.numHH,y.numHH[-1])\n y.numCH=np.append(y.numCH,y.numCH[-1])\n y.numR=np.append(y.numR,y.numR[-1])\n y.numF=np.append(y.numF,y.numF[-1])\n y.numSQ=np.append(y.numSQ,y.numSQ[-1])\n y.numEQ=np.append(y.numEQ,y.numEQ[-1])\n y.numSMQ=np.append(y.numSMQ,y.numSMQ[-1])\n y.numMQ=np.append(y.numMQ,y.numMQ[-1])\n y.numRQ=np.append(y.numRQ,y.numRQ[-1])\n\n # first variable\n inte = inter.interp1d(x,y.numS)\n y.numS = inte(t)\n\n inte = inter.interp1d(x,y.numE)\n y.numE = inte(t)\n\n inte = inter.interp1d(x,y.numSM)\n y.numSM = inte(t)\n\n inte = inter.interp1d(x,y.numM)\n y.numM = inte(t)\n\n inte = inter.interp1d(x,y.numH)\n y.numH = inte(t)\n\n inte = inter.interp1d(x,y.numC)\n y.numC = inte(t)\n\n inte = inter.interp1d(x,y.numHH)\n y.numHH = inte(t)\n\n inte = inter.interp1d(x,y.numCH)\n y.numCH = inte(t)\n\n inte = inter.interp1d(x,y.numR)\n y.numR = inte(t)\n\n inte = inter.interp1d(x,y.numF)\n y.numF = inte(t)\n\n inte = inter.interp1d(x,y.numSQ)\n y.numSQ = inte(t)\n\n inte = inter.interp1d(x,y.numEQ)\n y.numEQ = inte(t)\n\n inte = inter.interp1d(x,y.numSMQ)\n y.numSMQ = inte(t)\n\n inte = inter.interp1d(x,y.numMQ)\n y.numMQ = inte(t)\n\n inte = inter.interp1d(x,y.numRQ)\n y.numRQ = inte(t)\n\n elif method == 'findGovernmentResponse':\n extraTime = stoArgs['extraTime']\n measureTime = stoArgs['measureTime']\n initE = 1\n beta0 = 0.244\n checkpoints = {\n 't': [measureTime+extraTime],\n 'beta': [beta]\n }\n model = models.SEIRSNetworkModel(G=stoArgs['G'],beta=beta,sigma=sigma,zeta=zeta,p=stoArgs['p'],sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,theta_S=theta_S,\n theta_E=theta_E,theta_SM=theta_SM,theta_M=theta_M,theta_R=theta_R,phi_S=phi_S,phi_E=phi_E,phi_SM=phi_SM,phi_R=phi_R,psi_FP=psi_FP,psi_PP=psi_PP,\n dq=dq,initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,\n initMQ=initMQ,initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n # output is not returned every single day, so the results must be interpolated\n x = y.tseries\n if x[-1] < simtime:\n x=np.append(x,simtime+1)\n y.numS=np.append(y.numS,y.numS[-1])\n y.numE=np.append(y.numE,y.numE[-1])\n y.numSM=np.append(y.numSM,y.numSM[-1])\n y.numM=np.append(y.numM,y.numM[-1])\n y.numH=np.append(y.numH,y.numH[-1])\n y.numC=np.append(y.numC,y.numC[-1])\n y.numHH=np.append(y.numHH,y.numHH[-1])\n y.numCH=np.append(y.numCH,y.numCH[-1])\n y.numR=np.append(y.numR,y.numR[-1])\n y.numF=np.append(y.numF,y.numF[-1])\n y.numSQ=np.append(y.numSQ,y.numSQ[-1])\n y.numEQ=np.append(y.numEQ,y.numEQ[-1])\n y.numSMQ=np.append(y.numSMQ,y.numSMQ[-1])\n y.numMQ=np.append(y.numMQ,y.numMQ[-1])\n y.numRQ=np.append(y.numRQ,y.numRQ[-1])\n\n # first variable\n inte = inter.interp1d(x,y.numS)\n y.numS = inte(t)\n\n inte = inter.interp1d(x,y.numE)\n y.numE = inte(t)\n\n inte = inter.interp1d(x,y.numSM)\n y.numSM = inte(t)\n\n inte = inter.interp1d(x,y.numM)\n y.numM = inte(t)\n\n inte = inter.interp1d(x,y.numH)\n y.numH = inte(t)\n\n inte = inter.interp1d(x,y.numC)\n y.numC = inte(t)\n\n inte = inter.interp1d(x,y.numHH)\n y.numHH = inte(t)\n\n inte = inter.interp1d(x,y.numCH)\n y.numCH = inte(t)\n\n inte = inter.interp1d(x,y.numR)\n y.numR = inte(t)\n\n inte = inter.interp1d(x,y.numF)\n y.numF = inte(t)\n\n inte = inter.interp1d(x,y.numSQ)\n y.numSQ = inte(t)\n\n inte = inter.interp1d(x,y.numEQ)\n y.numEQ = inte(t)\n\n inte = inter.interp1d(x,y.numSMQ)\n y.numSMQ = inte(t)\n\n inte = inter.interp1d(x,y.numMQ)\n y.numMQ = inte(t)\n\n inte = inter.interp1d(x,y.numRQ)\n y.numRQ = inte(t) \n else:\n raise Exception('Suitable methods to run function dxdt are: none, findTime, findInfected, findGovernmentResponse. The provided method was: {}'.format(method))\n else:\n raise Exception('Modeltype is either deterministic or stochastic. The provided modeltype was: {}'.format(modelType))\n \n # extract results\n if modelType == \"deterministic\":\n S[:,i] = y.sumS.reshape(y.sumS.size,1)[:,0] \n E[:,i] = y.sumE.reshape(y.sumE.size,1)[:,0] \n SM[:,i] = y.sumSM.reshape(y.sumSM.size,1)[:,0] \n M[:,i] = y.sumM.reshape(y.sumM.size,1)[:,0] \n H[:,i] = y.sumH.reshape(y.sumH.size,1)[:,0] \n C[:,i] = y.sumC.reshape(y.sumC.size,1)[:,0] \n HH[:,i] = y.sumHH.reshape(y.sumHH.size,1)[:,0] \n CH[:,i] = y.sumCH.reshape(y.sumCH.size,1)[:,0] \n R[:,i] = y.sumR.reshape(y.sumR.size,1)[:,0] \n F[:,i] = y.sumF.reshape(y.sumF.size,1)[:,0] \n SQ[:,i] = y.sumSQ.reshape(y.sumSQ.size,1)[:,0] \n EQ[:,i] = y.sumEQ.reshape(y.sumEQ.size,1)[:,0] \n SMQ[:,i] = y.sumSMQ.reshape(y.sumSMQ.size,1)[:,0] \n MQ[:,i] = y.sumMQ.reshape(y.sumMQ.size,1)[:,0] \n RQ[:,i] = y.sumRQ.reshape(y.sumRQ.size,1)[:,0]\n\n elif modelType == \"stochastic\":\n S[:,i] = y.numS.reshape(y.numS.size,1)[:,0] \n E[:,i] = y.numE.reshape(y.numE.size,1)[:,0] \n SM[:,i] = y.numSM.reshape(y.numSM.size,1)[:,0] \n M[:,i] = y.numM.reshape(y.numM.size,1)[:,0] \n H[:,i] = y.numH.reshape(y.numH.size,1)[:,0] \n C[:,i] = y.numC.reshape(y.numC.size,1)[:,0] \n HH[:,i] = y.numHH.reshape(y.numHH.size,1)[:,0] \n CH[:,i] = y.numCH.reshape(y.numCH.size,1)[:,0] \n R[:,i] = y.numR.reshape(y.numR.size,1)[:,0] \n F[:,i] = y.numF.reshape(y.numF.size,1)[:,0] \n SQ[:,i] = y.numSQ.reshape(y.numSQ.size,1)[:,0] \n EQ[:,i] = y.numEQ.reshape(y.numEQ.size,1)[:,0] \n SMQ[:,i] = y.numSMQ.reshape(y.numSMQ.size,1)[:,0] \n MQ[:,i] = y.numMQ.reshape(y.numMQ.size,1)[:,0] \n RQ[:,i] = y.numRQ.reshape(y.numRQ.size,1)[:,0] \n i = i + 1\n else:\n S = np.zeros([tN,1])\n E = np.zeros([tN,1])\n SM = np.zeros([tN,1])\n M = np.zeros([tN,1])\n H = np.zeros([tN,1])\n C = np.zeros([tN,1])\n HH = np.zeros([tN,1])\n CH = np.zeros([tN,1])\n R = np.zeros([tN,1])\n F = np.zeros([tN,1])\n SQ = np.zeros([tN,1])\n EQ = np.zeros([tN,1])\n SMQ = np.zeros([tN,1])\n MQ = np.zeros([tN,1])\n RQ = np.zeros([tN,1])\n t = np.linspace(0,simtime,tN)\n dcf = dcfvect\n dcr = dcrvect\n dhospital = dhospitalvect\n sm = smvect\n m = (1-sm)*0.81 \n h = (1-sm)*0.14 \n c = (1-sm)*0.05\n sigma = sigmavect \n # perform simulation\n if modelType == 'deterministic':\n if method == 'findInfected' or method == 'findTime' or method == 'none':\n model = models.SEIRSAgeModel(initN=initN,beta=beta,sigma=sigma,Nc=Nc,zeta=zeta,sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,\n totalTests=totalTests,psi_FP=psi_FP,psi_PP=psi_PP,dq=dq,\n initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,initMQ=initMQ,\n initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n elif method == 'findGovernmentResponse':\n extraTime = stoArgs['extraTime']\n measureTime = stoArgs['measureTime']\n initE = 1\n Nc0 = 11.2\n checkpoints = {\n 't': [measureTime+extraTime],\n 'Nc': [Nc]\n }\n model = models.SEIRSAgeModel(initN=initN,beta=beta,sigma=sigma,Nc=Nc,zeta=zeta,sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,\n totalTests=totalTests,psi_FP=psi_FP,psi_PP=psi_PP,dq=dq,\n initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,initMQ=initMQ,\n initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n else:\n raise Exception('Suitable methods to run the model are: none, findTime, findInfected, findGovernmentResponse. The provided method was: {}'.format(method))\n \n elif modelType == 'stochastic':\n if method == 'findInfected' or method == 'findTime' or method == 'none':\n model = models.SEIRSNetworkModel(G=stoArgs['G'],beta=beta,sigma=sigma,zeta=zeta,p=stoArgs['p'],sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,theta_S=theta_S,\n theta_E=theta_E,theta_SM=theta_SM,theta_M=theta_M,theta_R=theta_R,phi_S=phi_S,phi_E=phi_E,phi_SM=phi_SM,phi_R=phi_R,psi_FP=psi_FP,psi_PP=psi_PP,\n dq=dq,initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,\n initMQ=initMQ,initRQ=initRQ)\n print(simtime)\n y = model.run(T=simtime,checkpoints=checkpoints)\n # output is not returned every single day, so the results must be interpolated\n x = y.tseries\n if x[-1] < simtime:\n x=np.append(x,simtime+1)\n y.numS=np.append(y.numS,y.numS[-1])\n y.numE=np.append(y.numE,y.numE[-1])\n y.numSM=np.append(y.numSM,y.numSM[-1])\n y.numM=np.append(y.numM,y.numM[-1])\n y.numH=np.append(y.numH,y.numH[-1])\n y.numC=np.append(y.numC,y.numC[-1])\n y.numHH=np.append(y.numHH,y.numHH[-1])\n y.numCH=np.append(y.numCH,y.numCH[-1])\n y.numR=np.append(y.numR,y.numR[-1])\n y.numF=np.append(y.numF,y.numF[-1])\n y.numSQ=np.append(y.numSQ,y.numSQ[-1])\n y.numEQ=np.append(y.numEQ,y.numEQ[-1])\n y.numSMQ=np.append(y.numSMQ,y.numSMQ[-1])\n y.numMQ=np.append(y.numMQ,y.numMQ[-1])\n y.numRQ=np.append(y.numRQ,y.numRQ[-1])\n\n # first variable\n inte = inter.interp1d(x,y.numS)\n y.numS = inte(t)\n\n inte = inter.interp1d(x,y.numE)\n y.numE = inte(t)\n\n inte = inter.interp1d(x,y.numSM)\n y.numSM = inte(t)\n\n inte = inter.interp1d(x,y.numM)\n y.numM = inte(t)\n\n inte = inter.interp1d(x,y.numH)\n y.numH = inte(t)\n\n inte = inter.interp1d(x,y.numC)\n y.numC = inte(t)\n\n inte = inter.interp1d(x,y.numHH)\n y.numHH = inte(t)\n\n inte = inter.interp1d(x,y.numCH)\n y.numCH = inte(t)\n\n inte = inter.interp1d(x,y.numR)\n y.numR = inte(t)\n\n inte = inter.interp1d(x,y.numF)\n y.numF = inte(t)\n\n inte = inter.interp1d(x,y.numSQ)\n y.numSQ = inte(t)\n\n inte = inter.interp1d(x,y.numEQ)\n y.numEQ = inte(t)\n\n inte = inter.interp1d(x,y.numSMQ)\n y.numSMQ = inte(t)\n\n inte = inter.interp1d(x,y.numMQ)\n y.numMQ = inte(t)\n\n inte = inter.interp1d(x,y.numRQ)\n y.numRQ = inte(t)\n\n elif method == 'findGovernmentResponse':\n extraTime = stoArgs['extraTime']\n measureTime = stoArgs['measureTime']\n initE = 1\n beta0 = 0.290\n checkpoints = {\n 't': [measureTime+extraTime],\n 'beta': [beta]\n }\n model = models.SEIRSNetworkModel(G=stoArgs['G'],beta=beta,sigma=sigma,zeta=zeta,p=stoArgs['p'],sm=sm,m=m,h=h,c=c,dsm=dsm,dm=dm,dhospital=dhospital,dh=dh,dcf=dcf,dcr=dcr,mc0=mc0,ICU=ICU,theta_S=theta_S,\n theta_E=theta_E,theta_SM=theta_SM,theta_M=theta_M,theta_R=theta_R,phi_S=phi_S,phi_E=phi_E,phi_SM=phi_SM,phi_R=phi_R,psi_FP=psi_FP,psi_PP=psi_PP,\n dq=dq,initE=initE,initSM=initSM,initM=initM,initH=initH,initC=initC,initHH=initHH,initCH=initCH,initR=initR,initF=initF,initSQ=initSQ,initEQ=initEQ,initSMQ=initSMQ,\n initMQ=initMQ,initRQ=initRQ)\n y = model.run(T=simtime,checkpoints=checkpoints)\n # output is not returned every single day, so the results must be interpolated\n x = y.tseries\n if x[-1] < simtime:\n x=np.append(x,simtime+1)\n y.numS=np.append(y.numS,y.numS[-1])\n y.numE=np.append(y.numE,y.numE[-1])\n y.numSM=np.append(y.numSM,y.numSM[-1])\n y.numM=np.append(y.numM,y.numM[-1])\n y.numH=np.append(y.numH,y.numH[-1])\n y.numC=np.append(y.numC,y.numC[-1])\n y.numHH=np.append(y.numHH,y.numHH[-1])\n y.numCH=np.append(y.numCH,y.numCH[-1])\n y.numR=np.append(y.numR,y.numR[-1])\n y.numF=np.append(y.numF,y.numF[-1])\n y.numSQ=np.append(y.numSQ,y.numSQ[-1])\n y.numEQ=np.append(y.numEQ,y.numEQ[-1])\n y.numSMQ=np.append(y.numSMQ,y.numSMQ[-1])\n y.numMQ=np.append(y.numMQ,y.numMQ[-1])\n y.numRQ=np.append(y.numRQ,y.numRQ[-1])\n\n # first variable\n inte = inter.interp1d(x,y.numS)\n y.numS = inte(t)\n\n inte = inter.interp1d(x,y.numE)\n y.numE = inte(t)\n\n inte = inter.interp1d(x,y.numSM)\n y.numSM = inte(t)\n\n inte = inter.interp1d(x,y.numM)\n y.numM = inte(t)\n\n inte = inter.interp1d(x,y.numH)\n y.numH = inte(t)\n\n inte = inter.interp1d(x,y.numC)\n y.numC = inte(t)\n\n inte = inter.interp1d(x,y.numHH)\n y.numHH = inte(t)\n\n inte = inter.interp1d(x,y.numCH)\n y.numCH = inte(t)\n\n inte = inter.interp1d(x,y.numR)\n y.numR = inte(t)\n\n inte = inter.interp1d(x,y.numF)\n y.numF = inte(t)\n\n inte = inter.interp1d(x,y.numSQ)\n y.numSQ = inte(t)\n\n inte = inter.interp1d(x,y.numEQ)\n y.numEQ = inte(t)\n\n inte = inter.interp1d(x,y.numSMQ)\n y.numSMQ = inte(t)\n\n inte = inter.interp1d(x,y.numMQ)\n y.numMQ = inte(t)\n\n inte = inter.interp1d(x,y.numRQ)\n y.numRQ = inte(t) \n else:\n raise Exception('Suitable methods to run model are: none, findTime, findInfected, findGovernmentResponse. The provided method was: {}'.format(method))\n \n else:\n raise Exception('Modeltype is either deterministic or stochastic. The provided modeltype was: {}'.format(modelType))\n \n # extract results\n if modelType == \"deterministic\":\n S[:,0] = y.sumS.reshape(y.sumS.size,1)[:,0] \n E[:,0] = y.sumE.reshape(y.sumE.size,1)[:,0] \n SM[:,0] = y.sumSM.reshape(y.sumSM.size,1)[:,0] \n M[:,0] = y.sumM.reshape(y.sumM.size,1)[:,0] \n H[:,0] = y.sumH.reshape(y.sumH.size,1)[:,0] \n C[:,0] = y.sumC.reshape(y.sumC.size,1)[:,0] \n HH[:,0] = y.sumHH.reshape(y.sumHH.size,1)[:,0] \n CH[:,0] = y.sumCH.reshape(y.sumCH.size,1)[:,0] \n R[:,0] = y.sumR.reshape(y.sumR.size,1)[:,0] \n F[:,0] = y.sumF.reshape(y.sumF.size,1)[:,0] \n SQ[:,0] = y.sumSQ.reshape(y.sumSQ.size,1)[:,0] \n EQ[:,0] = y.sumEQ.reshape(y.sumEQ.size,1)[:,0] \n SMQ[:,0] = y.sumSMQ.reshape(y.sumSMQ.size,1)[:,0] \n MQ[:,0] = y.sumMQ.reshape(y.sumMQ.size,1)[:,0] \n RQ[:,0] = y.sumRQ.reshape(y.sumRQ.size,1)[:,0] \n elif modelType == \"stochastic\":\n S[:,0] = y.numS.reshape(y.numS.size,1)[:,0] \n E[:,0] = y.numE.reshape(y.numE.size,1)[:,0] \n SM[:,0] = y.numSM.reshape(y.numSM.size,1)[:,0] \n M[:,0] = y.numM.reshape(y.numM.size,1)[:,0] \n H[:,0] = y.numH.reshape(y.numH.size,1)[:,0] \n C[:,0] = y.numC.reshape(y.numC.size,1)[:,0] \n HH[:,0] = y.numHH.reshape(y.numHH.size,1)[:,0] \n CH[:,0] = y.numCH.reshape(y.numCH.size,1)[:,0] \n R[:,0] = y.numR.reshape(y.numR.size,1)[:,0] \n F[:,0] = y.numF.reshape(y.numF.size,1)[:,0] \n SQ[:,0] = y.numSQ.reshape(y.numSQ.size,1)[:,0] \n EQ[:,0] = y.numEQ.reshape(y.numEQ.size,1)[:,0] \n SMQ[:,0] = y.numSMQ.reshape(y.numSMQ.size,1)[:,0] \n MQ[:,0] = y.numMQ.reshape(y.numMQ.size,1)[:,0] \n RQ[:,0] = y.numRQ.reshape(y.numRQ.size,1)[:,0] \n\n if modelType == 'deterministic':\n return(t,S,E,SM,M,H,C,HH,CH,R,F,SQ,EQ,SMQ,MQ,RQ)\n elif modelType == 'stochastic':\n return(t,S,E,SM,M,H,C,HH,CH,R,F,SQ,EQ,SMQ,MQ,RQ,y.numNodes)\n\n\ndef LSQ(thetas,data,fitTo,\n initN,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,monteCarlo,method,modelType,checkpoints,stoArgs):\n\n if method == 'findInfected':\n # check if number of provided bounds is two\n if len(thetas) != 2:\n raise Exception('Number of bounds for method findInfected is 2. The number of provided bounds was: {}'.format(len(thetas)))\n # define length of simulation from provided data\n simtime = data.size-1\n # assign estimates to correct varaiable\n beta = thetas[0]\n B0=thetas[1]\n # calculate initial condition\n if modelType == 'stochastic':\n raise Exception('A stochastic model should be calibrated using the method findTime. The provided calibration method was: {}'.format(method))\n initN = initN\n initE = np.ones(Nc.shape[0])*B0\n initSM = np.zeros(Nc.shape[0])\n initM = np.zeros(Nc.shape[0])\n initH = np.zeros(Nc.shape[0])\n initC = np.zeros(Nc.shape[0])\n initHH = np.zeros(Nc.shape[0])\n initCH = np.zeros(Nc.shape[0])\n initR = np.zeros(Nc.shape[0])\n initF = np.zeros(Nc.shape[0])\n initSQ = np.zeros(Nc.shape[0])\n initEQ = np.zeros(Nc.shape[0])\n initSMQ = np.zeros(Nc.shape[0])\n initMQ = np.zeros(Nc.shape[0])\n initRQ = np.zeros(Nc.shape[0]) \n # run simulation\n y = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1)\n # calculate quadratic error\n SSE = sum((ymodel-data)**2)\n\n elif method == 'findTime':\n # check if number of provided bounds is two or three for deterministic/stochastic respectively\n # assign the estimates to the correct variables\n if modelType == 'deterministic':\n if len(thetas) != 2:\n raise Exception('Number of bounds for deterministic model and method findTime is 2. The number of provided bounds was: {}'.format(len(thetas)))\n beta = thetas[0]\n extraTime = int(thetas[1])\n stoArgs.update({'extraTime': int(thetas[1])})\n elif modelType == 'stochastic':\n if len(thetas) != 3:\n raise Exception('Number of bounds for stochastic model and method findTime is 3. The number of provided bounds was: {}'.format(len(thetas)))\n beta = thetas[0]\n extraTime = int(thetas[1])\n stoArgs.update({'extraTime': int(thetas[1])})\n p = thetas[2]\n stoArgs.update({'p': thetas[2]})\n else:\n raise Exception('Invalid modelType. The provided modelType was: {}'.format(modelType))\n # define length of simulation from provided data\n simtime = data.size+extraTime-1\n # calculate initial condition\n initN = initN\n initE = np.ones(Nc.shape[0])\n initSM = np.zeros(Nc.shape[0])\n initM = np.zeros(Nc.shape[0])\n initH = np.zeros(Nc.shape[0])\n initC = np.zeros(Nc.shape[0])\n initHH = np.zeros(Nc.shape[0])\n initCH = np.zeros(Nc.shape[0])\n initR = np.zeros(Nc.shape[0])\n initF = np.zeros(Nc.shape[0])\n initSQ = np.zeros(Nc.shape[0])\n initEQ = np.zeros(Nc.shape[0])\n initSMQ = np.zeros(Nc.shape[0])\n initMQ = np.zeros(Nc.shape[0])\n initRQ = np.zeros(Nc.shape[0]) \n # run simulation\n y = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n if modelType == 'deterministic':\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + (np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1))\n ymodel = ymodel[extraTime-1:-1,0].reshape(ymodel[extraTime-1:-1,0].size,1)\n # calculate quadratic error\n SSE = sum((ymodel-data)**2) \n elif modelType == 'stochastic':\n r = initN/y[-1] # ratio between stochastic population and total population\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + (np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1))*r # extrapolate to whole population\n ymodel = ymodel[extraTime-1:-1,0].reshape(ymodel[extraTime-1:-1,0].size,1)\n # calculate quadratic error\n SSE = sum((ymodel-data)**2)\n \n elif method == 'findGovernmentResponse':\n # check if number of provided bounds is three\n if len(thetas) != 3:\n raise Exception('Number of bounds for method findGovernmentResponse is 3. The number of provided bounds was: {}'.format(len(thetas)))\n # assign beta and normal Nc\n beta = 0.0314\n Nc = np.array([11.2])\n # assign estimates to correct variable\n Nc_star = np.array([thetas[0]]) \n extraTime = int(thetas[1])\n stoArgs.update({'extraTime': int(thetas[1])})\n measureTime = int(thetas[2])\n stoArgs.update({'measureTime': int(thetas[2])})\n checkpoints={\n 't': [extraTime+measureTime],\n 'Nc': [Nc_star]\n }\n # define length of simulation from provided data\n simtime = data.size+int(extraTime)-1\n # calculate initial condition\n initN = initN\n initE = np.ones(Nc.shape[0])\n initSM = np.zeros(Nc.shape[0])\n initM = np.zeros(Nc.shape[0])\n initH = np.zeros(Nc.shape[0])\n initC = np.zeros(Nc.shape[0])\n initHH = np.zeros(Nc.shape[0])\n initCH = np.zeros(Nc.shape[0])\n initR = np.zeros(Nc.shape[0])\n initF = np.zeros(Nc.shape[0])\n initSQ = np.zeros(Nc.shape[0])\n initEQ = np.zeros(Nc.shape[0])\n initSMQ = np.zeros(Nc.shape[0])\n initMQ = np.zeros(Nc.shape[0])\n initRQ = np.zeros(Nc.shape[0])\n method='none' \n # run simulation\n y = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1)\n ymodel = ymodel[extraTime-1:-1,0].reshape(ymodel[extraTime-1:-1,0].size,1)\n # calculate quadratic error\n SSE = sum(abs(ymodel-data)) \n elif method == 'socialInteraction':\n # source: https://github.com/kieshaprem/covid19-agestructureSEIR-wuhan-social-distancing/tree/master/data\n Nc_home = np.loadtxt(\"Belgium/BELhome.txt\", dtype='f', delimiter='\\t')\n Nc_work = np.loadtxt(\"Belgium/BELwork.txt\", dtype='f', delimiter='\\t')\n Nc_schools = np.loadtxt(\"Belgium/BELschools.txt\", dtype='f', delimiter='\\t')\n Nc_others = np.loadtxt(\"Belgium/BELothers.txt\", dtype='f', delimiter='\\t')\n Nc_all = np.loadtxt(\"Belgium/BELall.txt\", dtype='f', delimiter='\\t')\n Nc = Nc_all\n checkpoints={\n 't': [26,29,29+5,29+10,29+15],\n 'Nc': [Nc_all-Nc_schools,\n Nc_home + thetas[0]*(1-0.20)*Nc_work +thetas[0]*(1-0.70)*Nc_others,\n Nc_home + thetas[1]*(1-0.40)*Nc_work + thetas[1]*(1-0.70)*Nc_others,\n Nc_home + thetas[2]*(1-0.52)*Nc_work + thetas[2]*(1-0.70)*Nc_others,\n Nc_home + thetas[3]*(1-0.52)*Nc_work + thetas[3]*(1-0.70)*Nc_others]\n }\n # define length of simulation from provided data\n extraTime = 27\n simtime = data.size+27-1\n beta = 0.032155\n # calculate initial condition\n initN = initN\n initE = np.ones(Nc.shape[0])\n initSM = np.zeros(Nc.shape[0])\n initM = np.zeros(Nc.shape[0])\n initH = np.zeros(Nc.shape[0])\n initC = np.zeros(Nc.shape[0])\n initHH = np.zeros(Nc.shape[0])\n initCH = np.zeros(Nc.shape[0])\n initR = np.zeros(Nc.shape[0])\n initF = np.zeros(Nc.shape[0])\n initSQ = np.zeros(Nc.shape[0])\n initEQ = np.zeros(Nc.shape[0])\n initSMQ = np.zeros(Nc.shape[0])\n initMQ = np.zeros(Nc.shape[0])\n initRQ = np.zeros(Nc.shape[0])\n # run simulation\n method='findTime'\n y = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospital,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints)\n if modelType == 'deterministic':\n # extract results\n ymodel=0\n for i in fitTo:\n ymodel = ymodel + (np.mean(y[i],axis=1).reshape(np.mean(y[i],axis=1).size,1))\n ymodel = ymodel[extraTime-1:-1,0].reshape(ymodel[extraTime-1:-1,0].size,1)\n # calculate quadratic error\n SSE = sum((ymodel-data)**2) \n else:\n raise Exception('Method not suited for least-squares fit: choose either findTime, findInfected or findGovernmentResponse. The provided method was: {}'.format(method))\n return(SSE)\n\ndef modelFit(bounds,data,fitTo,initN,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,monteCarlo,n_samples,method,modelType,checkpoints,\ndisp,polish,maxiter,popsize,**stoArgs):\n # Monte Carlo sampling of parameters gamma, dHD, dHI, sm, and call to genetic optimisation algorithm is performed here\n if monteCarlo == True:\n sigmavect = sampleFromDistribution('corona_incubatie.csv',n_samples)\n dcfvect = np.random.normal(18.5, 5.2, n_samples)\n dcrvect = np.random.normal(22.0, 5.2, n_samples)\n smvect = np.random.normal(0.86, 0.04/1.96, n_samples)\n mvect = (1-smvect)*0.81\n hvect = (1-smvect)*0.14\n cvect = (1-smvect)*0.05\n dhospitalvect = np.random.normal(9.10, 0.50/1.96, n_samples)\n thetas = scipy.optimize.differential_evolution(LSQ, bounds, args=(data,fitTo,initN,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospitalvect,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,monteCarlo,method,modelType,checkpoints,stoArgs),disp=disp,polish=polish,workers=5,maxiter=maxiter, popsize=popsize,tol=1e-18)\n else:\n sigma = 5.2\n dcf = 18.5 \n dcr = 22.0\n sm = 0.86\n m = (1-sm)*0.81 \n h = (1-sm)*0.14 \n c = (1-sm)*0.05\n dhospital = 9.1\n thetas = scipy.optimize.differential_evolution(LSQ, bounds, args=(data,fitTo,initN,sigma,Nc,zeta,sm,m,h,c,dsm,dm,dhospital,dh,dcf,dcr,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,monteCarlo,method,modelType,checkpoints,stoArgs),disp=disp,polish=polish,workers=5,maxiter=maxiter, popsize=popsize,tol=1e-18)\n fit = thetas.x\n return(fit)\n\ndef simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints,**stoArgs):\n # This function is a wrapper for 'runSimulation' to include monte carlo sampling and extract the results in a dictionary\n # Monte Carlo sampling of parameters gamma, dHD, dHI, sm, and call to genetic optimisation algorithm is performed here\n if monteCarlo == True:\n sigmavect = sampleFromDistribution('corona_incubatie.csv',n_samples)\n dcfvect = np.random.normal(18.5, 5.2, n_samples)\n dcrvect = np.random.normal(22.0, 5.2, n_samples)\n smvect = np.random.normal(0.86, 0.04/1.96, n_samples)\n mvect = (1-smvect)*0.81\n hvect = (1-smvect)*0.14\n cvect = (1-smvect)*0.05\n dhospitalvect = np.random.normal(9.10, 0.50/1.96, n_samples)\n simout = runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospitalvect,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,\n phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n \n else:\n sigma = 5.2\n dcf = 18.5 \n dcr = 22.0\n sm = 0.86\n m = (1-sm)*0.81 \n h = (1-sm)*0.14 \n c = (1-sm)*0.05\n dhospital = 9.1 \n simout = runSimulation(initN,beta,sigma,Nc,zeta,sm,m,h,c,dsm,dm,dhospital,dh,dcf,dcr,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,\n initE, initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs)\n \n # -----------------------------------------------------------------------------\n # extract results, rescale to population size initN in case of stochastic model\n # -----------------------------------------------------------------------------\n if modelType == 'deterministic':\n simout = {\n 't': simout[0],\n 'S': simout[1],\n 'E': simout[2],\n 'SM': simout[3],\n 'M': simout[4],\n 'H': simout[5],\n 'C': simout[6],\n 'HH': simout[7],\n 'CH': simout[8],\n 'R': simout[9],\n 'F': simout[10],\n 'SQ': simout[11],\n 'EQ': simout[12],\n 'SMQ': simout[13],\n 'MQ': simout[14],\n 'RQ': simout[15],\n } \n elif modelType == 'stochastic':\n r = initN/simout[-1]\n simout = {\n 't': simout[0],\n 'S': simout[1]*r,\n 'E': simout[2]*r,\n 'SM': simout[3]*r,\n 'M': simout[4]*r,\n 'H': simout[5]*r,\n 'C': simout[6]*r,\n 'HH': simout[7]*r,\n 'CH': simout[8]*r,\n 'R': simout[9]*r,\n 'F': simout[10]*r,\n 'SQ': simout[11]*r,\n 'SMQ': simout[12]*r,\n 'MQ': simout[13]*r,\n 'RQ': simout[14]*r,\n } \n\n return(simout)\n\ndef constructHorizon(theta,period): \n n = len(theta)\n t = np.zeros([n-1])\n for i in range(n-1):\n t[i] = period*(i+1) \n checkpoints = {'t': t,\n 'Nc': theta[1:]}\n return(checkpoints)\n\ndef constructHorizonPlot(theta,period):\n if type(theta) is np.ndarray:\n n = theta.size\n Nc = np.ones([period*n+1])\n for i in range(n):\n Nc[period*i:(period*i+period)]=theta[i]\n elif type(theta) is float:\n n = 1\n Nc = np.ones([period*n])\n for i in range(n):\n Nc[period*i:(period*i+period)]=theta\n else:\n raise Exception('Theta must be a vector or float. The provided datatype was: {}'.format(type(theta)))\n return(Nc)\n \ndef constructHorizonTesting(theta1,theta2,period): \n n = len(theta1)\n t = np.zeros([n-1])\n for i in range(n-1):\n t[i] = period*(i+1) \n checkpoints = {'t': t,\n 'Nc': theta1[1:],\n 'totalTests': theta2[1:]}\n return(checkpoints)\n\ndef constructHorizonTestingPlot(theta1,theta2,period):\n if type(theta1) is np.ndarray:\n n = theta1.size\n Nc = np.ones([period*n+1])\n theta_M = np.ones([period*n+1])\n for i in range(n):\n if i == 0:\n Nc[period*i:(period*i+period)+1]=theta1[i]\n theta_M[period*i:(period*i+period)+1]=theta2[i]\n else:\n Nc[period*i:(period*i+period)]=theta1[i]\n theta_M[period*i:(period*i+period)]=theta2[i] \n elif type(theta1) is float:\n n = 1\n Nc = np.ones([period*n])\n theta_M = np.ones([period*n])\n for i in range(n):\n if i == 0:\n Nc[period*i:(period*i+period)+1]=theta1\n theta_M[period*i:(period*i+period)+1]=theta2\n else:\n Nc[period*i:(period*i+period)]=theta1[i]\n theta_M[period*i:(period*i+period)]=theta2[i] \n else:\n raise Exception('Theta must be a vector or float. The provided datatype was: {}'.format(type(theta1)))\n return(Nc,theta_M)\n\ndef MPCcalcWeights(thetas,initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,roundOff,period,P,stoArgs):\n controlDoF = 1\n if controlDoF == 1:\n thetas[thetas<5.6] = 1.8\n thetas[(thetas>=5.6)&(thetas<8)] = 6\n thetas[thetas>=8] = 11.2\n # Add thetas to a list\n Ncs=[]\n for i in range(thetas.size):\n Ncs.append(np.array([thetas[i]]))\n # Build prediction horizon\n for i in range(P-thetas.size):\n Ncs.append(Ncs[-1])\n checkpoints = constructHorizon(Ncs,period)\n # Set correct simtime\n simtime = checkpoints['t'].size*period\n # run simulation\n method == 'none' # nothing special \n Nc = np.array([thetas[0]]) # first checkpoint cannot be at time 0\n simout = simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints,**stoArgs)\n if monteCarlo == True:\n CH = np.mean(simout['CH'],axis=1)\n CH = np.mean(simout['CH'],axis=1).reshape(CH.size,1) \n else:\n CH = simout['CH']\n # regeling van de kritiek zieken\n y_sp = ICU # maximum aantal bedden op ICU\n ymodel = CH # voorspelde aantal kritiek zieken bij ingang beta\n error = y_sp - ymodel # vector met fouten in de tijd\n SSE = sum(error**2)\n\n elif controlDoF == 2:\n # Split list of thetas in half\n length = thetas.size\n middle_index = length//2\n thetas1 = thetas[:middle_index]\n # Discretise thetas1 (=Nc)\n thetas1[thetas1<5.6] = 1.8\n thetas1[(thetas1>=5.6)&(thetas1<8)] = 6\n thetas1[thetas1>=8] = 11.2\n thetas2 = thetas[middle_index:]\n # Add thetas to list\n Ncs1=[]\n for i in range(thetas1.size):\n Ncs1.append(np.array([thetas1[i]]))\n Ncs2=[]\n for i in range(thetas2.size):\n Ncs2.append(np.array([thetas2[i]]))\n # Build prediction horizons\n for i in range(P-thetas.size//2):\n Ncs1.append(Ncs1[-1])\n Ncs2.append(Ncs2[-1])\n # Construct checkpoints dictionary\n checkpoints = constructHorizonTesting(Ncs1,Ncs2,period)\n #print(checkpoints)\n # Define simtime\n simtime = checkpoints['t'].size*period \n # run simulation\n method == 'none' # nothing special \n Nc = np.array([thetas[0]]) # first checkpoint cannot be at time 0\n totalTests = np.array([thetas[middle_index]])\n simout = simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints,**stoArgs)\n if monteCarlo == True:\n CH = np.mean(simout['CH'],axis=1)\n CH = np.mean(simout['CH'],axis=1).reshape(CH.size,1) \n else:\n CH = simout['CH']\n # regeling van de kritiek zieken\n y_sp = ICU # maximum aantal bedden op ICU\n ymodel = CH # voorspelde aantal kritiek zieken bij ingang beta\n error = y_sp - ymodel # vector met fouten in de tijd\n SSE = sum(error**2)\n return(SSE)\n\ndef MPCcalcWeightsAge(thetas,initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,period,P):\n\n # source: https://github.com/kieshaprem/covid19-agestructureSEIR-wuhan-social-distancing/tree/master/data\n Nc_home = np.loadtxt(\"Belgium/BELhome.txt\", dtype='f', delimiter='\\t')\n Nc_work = np.loadtxt(\"Belgium/BELwork.txt\", dtype='f', delimiter='\\t')\n Nc_schools = np.loadtxt(\"Belgium/BELschools.txt\", dtype='f', delimiter='\\t')\n Nc_others = np.loadtxt(\"Belgium/BELothers.txt\", dtype='f', delimiter='\\t')\n Nc_all = np.loadtxt(\"Belgium/BELall.txt\", dtype='f', delimiter='\\t')\n # Use values of thetas to build a list object Ncs containing discrete scenarios\n Ncs=[]\n for i in range(thetas.size):\n if thetas[i]<=1 and thetas[i]>=0:\n Ncs.append(Nc_all)\n elif thetas[i]<=2 and thetas[i]> 1:\n Ncs.append(Nc_home + Nc_schools + 0.01*(1-0.52)*Nc_work + 0.01*(1-0.70)*Nc_others)\n elif thetas[i]<=3 and thetas[i]> 2:\n Ncs.append(Nc_home + 0.01*(1-0.52)*Nc_work + 0.01*(1-0.70)*Nc_others)\n\n # build prediction horizon\n for i in range(P-thetas.size):\n Ncs.append(Ncs[-1])\n checkpoints = constructHorizon(Ncs,period)\n simtime = checkpoints['t'].size*period\n # run simulation\n method == 'none' # nothing special\n Nc = Ncs[0] # first checkpoint cannot be at time 0\n simout = simModel(initN,beta,Nc,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,checkpoints)\n if monteCarlo == True:\n CH = np.mean(simout['CH'],axis=1)\n CH = np.mean(simout['CH'],axis=1).reshape(CH.size,1) \n else:\n CH = simout['CH']\n # regeling van de kritiek zieken\n y_sp = ICU # maximum aantal bedden op ICU\n ymodel = CH # voorspelde aantal kritiek zieken bij ingang beta\n error = y_sp - ymodel # vector met fouten in de tijd\n SSE = sum(error**2)\n return(SSE)\n\ndef MPCoptimize(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,roundOff,period,P,N,\n disp,polish,maxiter,popsize,**stoArgs):\n controlDoF = 1\n if controlDoF == 1:\n # Geef bounds op\n bounds=[]\n for i in range(N):\n bounds.append((0,11.2))\n # Perform optimisation \n fit = scipy.optimize.differential_evolution(MPCcalcWeights, bounds, args=(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,roundOff,period,P,stoArgs),disp=disp,polish=polish,workers=-1,maxiter=maxiter, popsize=popsize,tol=1e-30)\n thetas=fit.x\n\n elif controlDoF == 2:\n # Geef bounds op\n bounds=[]\n # First variable is Nc\n for i in range(N):\n bounds.append((0,11.2))\n # Second variable is theta_M\n for i in range(N):\n bounds.append((0,1e6)) \n # Perform optimisation \n fit = scipy.optimize.differential_evolution(MPCcalcWeights, bounds, args=(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,roundOff,period,P,stoArgs),disp=disp,polish=polish,workers=-1,maxiter=maxiter, popsize=popsize,tol=1e-30)\n thetas=fit.x \n print(thetas)\n return(thetas)\n \ndef MPCoptimizeAge(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,period,P,N,\n disp,polish,maxiter,popsize):\n # source: https://github.com/kieshaprem/covid19-agestructureSEIR-wuhan-social-distancing/tree/master/data\n Nc_home = np.loadtxt(\"Belgium/BELhome.txt\", dtype='f', delimiter='\\t')\n Nc_work = np.loadtxt(\"Belgium/BELwork.txt\", dtype='f', delimiter='\\t')\n Nc_schools = np.loadtxt(\"Belgium/BELschools.txt\", dtype='f', delimiter='\\t')\n Nc_others = np.loadtxt(\"Belgium/BELothers.txt\", dtype='f', delimiter='\\t')\n Nc_all = np.loadtxt(\"Belgium/BELall.txt\", dtype='f', delimiter='\\t')\n \n # Geef bounds op\n bounds=[]\n for i in range(N):\n bounds.append((0,3))\n # Prepare solver\n # Perform optimisation (CONTINUOUS) \n fit = scipy.optimize.differential_evolution(MPCcalcWeightsAge, bounds, args=(initN,beta,zeta,dsm,dm,dhospital,dh,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE, initSM, initM, initH, initC, \n initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,n_samples,method,modelType,discrete,period,P),disp=disp,polish=polish,workers=-1,maxiter=maxiter, popsize=popsize,tol=1e-18,mutation=(1.9, 1.99), recombination=1)\n thetas = fit.x\n\n # discretise thetas if needed\n thetas=fit.x\n Ncs=[]\n for i in range(thetas.size):\n if thetas[i]<=1 and thetas[i]>=0:\n Ncs.append(Nc_all)\n elif thetas[i]<=2 and thetas[i]> 1:\n Ncs.append(Nc_home + Nc_schools + 0.01*(1-0.52)*Nc_work + 0.01*(1-0.70)*Nc_others)\n elif thetas[i]<=3 and thetas[i]> 2:\n Ncs.append(Nc_home + 0.01*(1-0.52)*Nc_work + 0.01*(1-0.70)*Nc_others)\n return(Ncs,thetas)\n\n# You cannot keep extending the control horizon because the number of parameters will get so big\n# that optimisation becomes a problem. To simulate the full course of the outbreak, it is better\n# to optimise one policy interval, advance the simulation to the next policy interval and repeat\ndef MPClongTerm(y0,nat,mort,dSM,dM,dZ,m,z,h,mh,ICU,monteCarlo,n_samples,period,maxiter,popsize,polish,disp,P,N,discrete,roundOff,Kh,Kd,Ki,nPeriods):\n betaVect=[]\n for i in range(nPeriods):\n # optimise control horizon over prediction horizon\n beta = MPCoptimize(y0,nat,mort,dSM,dM,dZ,m,z,h,mh,ICU,monteCarlo,n_samples,period,maxiter,popsize,polish,disp,P,N,discrete,roundOff,Kh,Kd,Ki)\n betaVect.append(beta[0])\n # advance the simulation one policy interval\n simtime = period # - 2\n tN = simtime + 1 \n t = np.linspace(0,simtime,tN)\n u = np.ones([tN])\n u = u*beta[0]\n simout = simModel(y0,nat,mort,u,dSM,dM,dZ,m,z,h,mh,ICU,tN,simtime,monteCarlo,n_samples,'variableBeta')\n O = simout[1]\n B = simout[2]\n SM = simout[3]\n M = simout[4]\n Z = simout[5]\n H = simout[6]\n I = simout[7]\n D = simout[8]\n T = simout[9]\n O = np.mean(O,axis=1)\n B = np.mean(B,axis=1)\n SM = np.mean(SM,axis=1)\n M = np.mean(M,axis=1)\n Z = np.mean(Z,axis=1)\n H = np.mean(H,axis=1)\n I = np.mean(I,axis=1)\n D = np.mean(D,axis=1)\n T = np.mean(T,axis=1)\n y0 = np.array([O[-1],B[-1],SM[-1],M[-1],Z[-1],H[-1],I[-1],D[-1],T[-1]])\n return(betaVect) \n \n"
] | [
[
"scipy.optimize.differential_evolution",
"pandas.read_csv",
"numpy.linspace",
"numpy.ones",
"numpy.random.normal",
"scipy.interpolate.interp1d",
"numpy.mean",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
manuelprogramming/OSA | [
"3a57ea944eef3e8680055a35e8cebd36b93dac51"
] | [
"handlers/plotting.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef format_plot(func):\n def func_wrapper(*args):\n func(*args)\n plt.ylabel(\"Intensity [dBm]\")\n plt.xlabel(\"Wavelength [nm]\")\n plt.tight_layout()\n plt.show()\n return func\n\n return func_wrapper\n\n\ndef format_ani_plot(func):\n def func_wrapper(*args):\n func(*args)\n plt.ylabel(\"Intensity [dBm]\")\n plt.xlabel(\"Wavelength [nm]\")\n plt.tight_layout()\n return func\n return func_wrapper\n\n\ndef interactive_off_on(func):\n def func_wrapper(*args):\n plt.ioff()\n func(*args)\n plt.ion()\n return func\n return func_wrapper\n\n\ndef config_matplotlib(debug_mode: bool) -> None:\n plt.style.use(\"seaborn-whitegrid\")\n if not debug_mode:\n plt.ion()\n\n\nif __name__ == '__main__':\n x = np.random.random(15)\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"numpy.random.random",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BesterRanX/BesterTF | [
"2e7e6938f74d027ebf9aee9b8af432a3e7b54519"
] | [
"BesterTF/Layers.py"
] | [
"import tensorflow as tf\n\n\nclass Layer():\n def __init__(self, output_dim, input_dim=0, activation=None):\n # cache parameters\n self.activation = activation\n self.input_dim = input_dim\n self.output_dim = output_dim\n\n\n\nclass Dense(Layer):\n def __init__(self, output_dim, input_dim=0, activation=None):\n # super class init\n Layer.__init__(output_dim, input_dim, activation)\n\n def compile(self):\n # initialise weights\n self.Weights = tf.Variable(tf.random_uniform([self.input_dim, self.output_dim], -1, 1))\n # initialise biases\n self.biases = tf.Variable(tf.zeros([1, self.output_dim]) + 0.1)\n\n # activation\n def act(self, inputs=None):\n Wx_plus_b = tf.matmul(inputs, self.Weights, name='Wx_plus_b') + self.biases\n return self.activation(Wx_plus_b)\n"
] | [
[
"tensorflow.matmul",
"tensorflow.random_uniform",
"tensorflow.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
strawsyz/straw | [
"db313c78c2e3c0355cd10c70ac25a15bb5632d41",
"db313c78c2e3c0355cd10c70ac25a15bb5632d41",
"db313c78c2e3c0355cd10c70ac25a15bb5632d41",
"db313c78c2e3c0355cd10c70ac25a15bb5632d41",
"db313c78c2e3c0355cd10c70ac25a15bb5632d41",
"db313c78c2e3c0355cd10c70ac25a15bb5632d41"
] | [
"study/dgl_study/02.py",
"my_cv/utils/cv2_util.py",
"my_cv/utils/numpy_util.py",
"my_cv/09/09_02_animals_ann.py",
"study/pytorch_study/00_create_net.py",
"EasyDeep/datasets/image_dataset.py"
] | [
"import dgl\nimport networkx as nx\n\n# create a graph\ng_nx = nx.petersen_graph()\ng_dgl = dgl.DGLGraph(g_nx)\n\nimport matplotlib.pyplot as plt\n\nplt.subplot(121)\nnx.draw(g_nx, with_labels=True)\nplt.subplot(122)\nnx.draw(g_dgl.to_networkx(), with_labels=True)\n\nplt.show()\n\n# add edges and nodes into graph\nimport dgl\nimport torch as th\n\ng = dgl.DGLGraph()\ng.add_nodes(10)\n# A couple edges one-by-one\nfor i in range(1, 5):\n g.add_edge(i, 0)\n# A few more with a paired list\nsrc = list(range(5, 8));\ndst = [0] * 3\ng.add_edges(src, dst)\n# finish with a pair of tensors\nsrc = th.tensor([8, 9]);\ndst = th.tensor([0, 0])\ng.add_edges(src, dst)\ng.add_edges([2], [8])\nnx.draw(g.to_networkx(), with_labels=True)\nplt.show()\n# Edge broadcasting will do star graph in one go!\ng.clear();\ng.add_nodes(10)\nsrc = th.tensor(list(range(1, 10)));\ng.add_edges(src, 0)\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nnx.draw(g.to_networkx(), with_labels=True)\nplt.show()\n\n# assigin a feature\nimport dgl\nimport torch\n\n# assign node features\nx = torch.randn(10, 3)\n# g.clear()\ng.ndata['x'] = x\n\n# print(g.ndata['x'] == g.nodes[:].data['x'])\nprint(g.ndata['x'])\nprint('x value of first node in graph : {}'.format(g.nodes[0].data['x']))\n# Access node set with integer, list, or integer tensor\ng.nodes[0].data['x'] = torch.zeros(1, 3)\ng.nodes[[0, 1, 2]].data['x'] = torch.zeros(3, 3)\ng.nodes[torch.tensor([0, 1, 2])].data['x'] = torch.zeros(3, 3)\n\n# Assign edge features\ng.edata['w'] = th.randn(9, 2)\nprint(g.edata['w'])\nprint('w value of first edge in graph : {}'.format(g.edges[0].data['w']))\n# Access edge set with IDs in integer, list, or integer tensor\ng.edges[1].data['w'] = th.randn(1, 2)\ng.edges[[0, 1, 2]].data['w'] = th.zeros(3, 2)\nprint(\"g.edges[[0, 1, 2]].data['w'] : \\n{}\".format(g.edges[[0, 1, 2]].data['w']))\ng.edges[th.tensor([0, 1, 2])].data['w'] = th.zeros(3, 2)\n\n# You can also access the edges by giving endpoints\ng.edges[1, 0].data['w'] = th.ones(1, 2) # edge 1 -> 0\ng.edges[[1, 2, 3], [0, 0, 0]].data['w'] = th.ones(3, 2) # edges [1, 2, 3] -> 0\n\nprint(g.node_attr_schemes())\ng.ndata['x'] = th.zeros((10, 4))\nprint(g.node_attr_schemes())\n\n# remove node or edge states\ng.ndata.pop('x')\ng.edata.pop('w')\nprint(g.node_attr_schemes())\n\n# create multigraphs\ng_multi = dgl.DGLGraph(multigraph=True)\ng_multi.add_nodes(10)\ng_multi.ndata['x'] = torch.randn(10, 2)\ng_multi.add_edges(list(range(1, 10)), 0)\ng_multi.add_edge(1, 0) # two edges on 1->0\n\ng_multi.edata['w'] = th.randn(10, 2)\ng_multi.edges[1].data['w'] = th.zeros(1, 2)\nprint(g_multi.edges())\nplt.figure()\nnx.draw(g_dgl.to_networkx(), with_labels=True)\n\nplt.show()\n\n# in multigraphs, use edge's id to query edge\neid_10 = g_multi.edge_id(1, 0)\ng_multi.edges[eid_10].data['w'] = th.ones(len(eid_10), 2)\nprint(g_multi.edata['w'])\n\n# !!!!nodes and edges can be added but not remove\n",
"import cv2\nimport numpy as np\nfrom PIL import Image\n\n\ndef draw_approx_polyDP(cnt, epsilon=0.01, closed=True):\n \"\"\"用多边形来近似的表示曲线\"\"\"\n epsilon = epsilon * cv2.arcLength(cnt, closed) # 得到轮廓的周长信息作为参考值\n return cv2.approxPolyDP(cnt, epsilon, closed) # 得到近似多边形框\n\n\ndef draw_convex_hull(cnt):\n \"\"\"画凸包,传入的是一些点\"\"\"\n return cv2.convexHull(cnt) # 获取处理过的轮廓信息\n\n\ndef show_img(file_name, window_name='win'):\n img = cv2.imread(file_name)\n cv2.imshow(window_name, img)\n # 按任意键,图片消失\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\ndef camera_show(window_name='camera'):\n \"\"\"最好在改进一下关闭窗口部分的功能\n 建立一个窗口捕捉摄像头显示的内容\n 当左键点击过窗口,且按过任意键盘键,才会退出窗口\"\"\"\n clicked = False\n camera_capture = cv2.VideoCapture(0)\n\n def on_mouse(event, x, y, flags, param):\n global clicked\n if event == cv2.EVENT_LBUTTONUP:\n clicked = True\n\n cv2.namedWindow(window_name)\n cv2.setMouseCallback(window_name, on_mouse)\n\n success, frame = camera_capture.read()\n # cv2.waitKey(1) 参数表示等待键盘触发的时间,返回值为-1表示没有见按下\n while success and cv2.waitKey(1) == -1 and not clicked:\n cv2.imshow(window_name, frame)\n success, frame = camera_capture.read()\n cv2.destroyAllWindows()\n camera_capture.release()\n\n\ndef camera_save(file_name, seconds=3, fps=60):\n # 获得设备\n camera_capture = cv2.VideoCapture(0)\n size = (int(camera_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(camera_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n video_writer = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, size)\n\n success, frame = camera_capture.read()\n num_frames_remaining = seconds * fps - 1\n while success and num_frames_remaining > 0:\n video_writer.write(frame)\n success, frame = camera_capture.read()\n num_frames_remaining -= 1\n camera_capture.release()\n\n\ndef copy(orig_img, start_height, start_width, part):\n height, width = part.shape\n orig_img[start_height: start_height + height, start_width: start_width + width] = part\n return orig_img\n\n\ndef draw_gray_random(height, width):\n flat_numpy_array = np.random.randint(0, 256, height * width)\n gray_image = flat_numpy_array.reshape(height, width)\n return gray_image\n\n\ndef draw_random(height, width, channel=3):\n flat_numpy_array = np.random.randint(0, 256, height * width * channel)\n bgr_image = flat_numpy_array.reshape((height, width, channel))\n return bgr_image\n\n\ndef draw_gray_black(height, width):\n img = np.zeros((height, width), dtype=np.uint8)\n return img\n\n\ndef draw_line(img, x1, y1, x2, y2, color=(0, 255, 0), thickness=2):\n return cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\n\ndef draw_rectangle(img, box, contour_idx=0, color=(0, 0, 255), thickness=3):\n return cv2.drawContours(img, box, contour_idx, color, thickness)\n\n\ndef draw_cicile(img, center, radius, color=(0, 255, 0), thickness=2):\n return cv2.circle(img, center, radius, color, thickness)\n\n\ndef draw_black(height, width):\n img = draw_black(height, width)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n return img\n\n\ndef img2array(img):\n return bytearray(img)\n\n\ndef array_img(arr, height, width, channel=3):\n return np.array(arr).reshape(height, width, channel)\n\n\ndef array2img_gray(arr, height, width):\n return np.array(arr).reshape(height, width)\n\n\nif __name__ == '__main__':\n\n img = cv2.imread('sphere.png')\n cv2.imshow('win', img)\n # empire = Image.open('sphere.png')\n # cv2.waitKey()\n # cv2.destroyAllWindows()\n # print(empire.shape())\n # empire.convert('RGB')\n # print(empire.mode)\n # print(empire.shape())\n\n img = Image.open('sphere.png')\n img = img.resize((137, 137))\n # 将黑色的部分变为透明\n print(img.info)\n print(img.mode)\n img = img.convert(\"RGBA\")\n print(img.mode)\n width = img.size[0]\n height = img.size[1]\n for x in range(width):\n for y in range(height):\n r, g, b, a = img.getpixel((x, y))\n rgba = (r, g, b, a)\n if (r == g == b == 0):\n img.putpixel((x, y), (0, 0, 0, 0))\n img.save('sphere_2.png')\n img.show()\n",
"import numpy as np\n\n\ndef create_dark(size=(1080, 1920, 3)):\n return np.zeros(size)\n\n\ndef reverse_color(img):\n return 255 - img\n\n\ndef standard(img, min, max):\n \"\"\"标准化\"\"\"\n return (max - min) * img / 255 + min\n\n\n# 直方图均衡化是指将一幅图像的灰度直方图变平,\n# 使变换后的图像中每个灰度值的分布概率都相同。\ndef histeq(im, nbr_bins=256):\n \"\"\"\n 对灰度图像进行直方图均衡化\n :param im:灰度图像数组\n :param nbr_bins:直方图中使用小区间的数目\n :return:直方图均衡化后的图像,用来做像素值映射的累积分布函数\n \"\"\"\n\n from pylab import *\n imhist, bins = histogram(im.flatten(), nbr_bins, normed=True)\n # cumulative distribution function\n cdf = imhist.cumsum()\n # 归一化,使范围在0~255\n cdf = 255 * cdf / cdf[-1]\n # 使用累积分布函数的线性插值,计算新的像素值\n im2 = interp(im.flatten(), bins[:-1], cdf)\n return im2.reshape(im.shape), cdf\n\n\ndef pca(x):\n \"\"\"\n 主成分分析\n :param x:矩阵X ,其中该矩阵中存储训练数据,每一行为一张图片的所有像素\n :return:投影矩阵(按照维度的重要性排序)、方差和均值\n \"\"\"\n # 获取维数\n num_data, dim = x.shape\n\n # 数据中心化\n mean_x = x.mean(axis=0)\n x = x - mean_x\n\n if dim > num_data:\n # PCA- 使用紧致技巧\n M = np.dot(x, x.T) # 协方差矩阵\n e, EV = np.linalg.eigh(M) # 特征值和特征向量\n tmp = np.dot(x.T, EV).T # 这就是紧致技巧\n V = tmp[::-1] # 由于最后的特征向量是我们所需要的,所以需要将其逆转\n S = np.sqrt(e)[::-1] # 由于特征值是按照递增顺序排列的,所以需要将其逆转\n for i in range(V.shape[1]):\n V[:, i] /= S\n else:\n # PCA- 使用SVD 方法\n U, S, V = np.linalg.svd(x)\n V = V[:num_data] # 仅仅返回前nun_data 维的数据才合理\n\n # 返回投影矩阵、方差和均值\n return V, S, mean_x\n\n\ndef save(path, data, dtype='%i'):\n np.savetxt(path, data, dtype)\n\n\ndef load(path):\n return np.loadtxt(path)\n\n\n# 图像平均操作是减少图像噪声的一种简单方式,\n# 通常用于艺术特效。\ndef compute_average(im_list):\n from PIL import Image\n from pylab import *\n \"\"\"\n 计算图像列表的平均像素\n 不使用mean()函数减少内存占用\n 需要所有图像的大小相同\n :param im_list: 图像路径列表\n :return: 平均后的图像\n \"\"\"\n # 打开第一幅图像,将其存储在浮点型数组中\n averageim = array(Image.open(im_list[0]), 'f')\n for im_name in im_list[1:]:\n try:\n averageim += array(Image.open(im_name))\n except:\n print(im_name + '...skipped')\n averageim /= len(im_list)\n\n # 返回uint8 类型的平均图像\n return array(averageim, 'uint8')\n",
"import cv2\nimport numpy as np\nfrom random import randint\n\nanimals_net = cv2.ml.ANN_MLP_create()\n# 设定train函数为弹性(resilient)反向传播\nanimals_net.setTrainMethod(cv2.ml.ANN_MLP_RPROP | cv2.ml.ANN_MLP_UPDATE_WEIGHTS)\n# 设置sigmoid作为激活函数\nanimals_net.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM)\n# 按照书里的把隐藏层节点数设为8,效果非常差,所以改成3\nanimals_net.setLayerSizes(np.array([3, 3, 4]))\n# 设置终止条件\nanimals_net.setTermCriteria((cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1))\n\n\"\"\"Input arrays\nweight, length, teeth\n\"\"\"\n\n\"\"\"Output arrays\ndog, eagle, dolphin and dragon\n\"\"\"\n\n\ndef dog_sample():\n return [randint(10, 20), 1, randint(38, 42)]\n\n\ndef dog_class():\n return [1, 0, 0, 0]\n\n\ndef condor_sample():\n return [randint(3, 10), randint(3, 5), 0]\n\n\ndef condor_class():\n return [0, 1, 0, 0]\n\n\ndef dolphin_sample():\n return [randint(30, 190), randint(5, 15), randint(80, 100)]\n\n\ndef dolphin_class():\n return [0, 0, 1, 0]\n\n\ndef dragon_sample():\n return [randint(1200, 1800), randint(30, 40), randint(160, 180)]\n\n\ndef dragon_class():\n return [0, 0, 0, 1]\n\n\nSAMPLES = 5000\n# 每种动物添加5000个样本\nfor x in range(1, SAMPLES + 1):\n if x % 100 == 0:\n print(\"Samples %d/%d\" % (x, SAMPLES))\n animals_net.train(np.array([dog_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE,\n np.array([dog_class()], dtype=np.float32))\n animals_net.train(np.array([condor_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE,\n np.array([condor_class()], dtype=np.float32))\n animals_net.train(np.array([dolphin_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE,\n np.array([dolphin_class()], dtype=np.float32))\n animals_net.train(np.array([dragon_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE,\n np.array([dragon_class()], dtype=np.float32))\n\nprint(animals_net.predict(np.array([dog_sample()], dtype=np.float32)))\nprint(animals_net.predict(np.array([condor_sample()], dtype=np.float32)))\nprint(animals_net.predict(np.array([dolphin_sample()], dtype=np.float32)))\nprint(animals_net.predict(np.array([dragon_sample()], dtype=np.float32)))\n",
"from collections import OrderedDict\n\nimport torch\nimport torch.nn.functional as F\n\n\n# four ways to create net\n\nclass Net_1(torch.nn.Module):\n def __init__(self):\n super(Net_1, self).__init__()\n self.conv1 = torch.nn.Conv2d(3, 32, 3, 1, 1)\n self.dense1 = torch.nn.Linear(32 * 3 * 3, 128)\n self.dense2 = torch.nn.Linear(128, 10)\n\n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), 2)\n x = x.view(x.size(0), -1)\n x = F.relu(self.dense1(x))\n x = self.dense2(x)\n return x\n\n\nprint('Method 1:')\nmodel1 = Net_1()\nprint(model1)\n\n\n# 方法二,利用torch.nn.Sequential()容器进行快速搭建\n\nclass Net_2(torch.nn.Module):\n def __init__(self):\n super(Net_2, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(3, 32, 3, 1, 1),\n torch.nn.ReLU(),\n torch.nn.MaxPool2d(2)\n )\n self.dense = torch.nn.Sequential(\n torch.nn.Linear(32 * 3 * 3, 128),\n torch.nn.ReLU(),\n torch.nn.Linear(128, 10)\n )\n\n def forward(self, x):\n conv_out = self.conv1(x)\n res = conv_out.view(conv_out.size(0), -1)\n out = self.dense(res)\n return out\n\n\nprint('Method 2')\nmodel2 = Net_2()\nprint(model2)\n\n\n# 对第二种方法进行改进,每一层增加了一个单独的名字\n\nclass Net_3(torch.nn.Module):\n def __init__(self):\n super(Net_3, self).__init__()\n self.conv = torch.nn.Sequential()\n self.conv.add_module('conv1', torch.nn.Conv2d(3, 32, 3, 1, 1))\n self.conv.add_module('relu1', torch.nn.ReLU())\n self.conv.add_module('pool1', torch.nn.MaxPool2d(2))\n self.dense = torch.nn.Sequential()\n self.dense.add_module('dense1', torch.nn.Linear(32 * 3 * 3, 128))\n self.dense.add_module('relu2', torch.nn.ReLU())\n self.dense.add_module('dense2', torch.nn.Linear(128, 10))\n\n def forward(self, x):\n conv_out = self.conv1(x)\n res = conv_out.view(conv_out.size(0), -1)\n out = self.dense(res)\n return out\n\n\nprint('Method 3:')\nmodel3 = Net_3()\nprint(model3)\n\n\nclass Net_4(torch.nn.Module):\n def __init__(self):\n super(Net_4, self).__init__()\n self.conv = torch.nn.Sequential(\n OrderedDict([\n ('conv1', torch.nn.Conv2d(3, 32, 3, 1, 1)),\n ('relu1', torch.nn.ReLU()),\n ('pool1', torch.nn.MaxPool2d(2))\n ])\n )\n\n self.dense = torch.nn.Sequential(\n OrderedDict([\n ('dense1', torch.nn.Linear(32 * 3 * 3, 128)),\n ('relu2', torch.nn.ReLU()),\n ('dense2', torch.nn.Linear(128, 10))\n ])\n )\n\n def forward(self, x):\n conv_out = self.conv1(x)\n res = conv_out.view(conv_out.size(0), -1)\n out = self.dense(res)\n return out\n\n\nprint('Method 4:')\nmodel4 = Net_4()\nprint(model4)\n",
"import os\nimport random\n\nimport torch\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\n\nfrom base.base_dataset import BaseDataSet\nfrom configs.dataset_config import ImageDataSetConfig\nfrom utils.common_utils import copy_attr\nfrom utils.common_utils import copy_need_attr\n\n\"\"\"\nmask图像和原图像两个文件夹\n根据文件名判断是否是同一个文件夹\n\"\"\"\n\n\nclass MyImageDataSet(ImageDataSetConfig):\n def __init__(self):\n super(MyImageDataSet, self).__init__()\n self.train_path = r\"/home/shi/Downloads/dataset/polyp/TMP/09/train/\"\n self.test_path = r\"/home/shi/Downloads/dataset/polyp/TMP/09/test/\"\n self.train_dataset = ImageDataSet(self.train_path, self.image_transforms, self.mask_transforms,\n random_state=self.random_state)\n self.num_train = len(self.train_dataset)\n self.test_dataset = ImageDataSet(self.test_path, self.image_transforms, self.mask_transforms,\n random_state=self.random_state)\n self.num_valid = self.num_test = len(self.test_dataset)\n print(\"num train : {}, num_test : {}, num valid : {}\".format(self.num_train, self.num_test, self.num_valid))\n\n def get_dataloader(self, target):\n self.train_loader = DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=self.shuffle)\n self.valid_loader = DataLoader(self.test_dataset, batch_size=self.batch_size4test, shuffle=self.shuffle)\n self.test_loader = DataLoader(self.test_dataset, batch_size=self.batch_size4test, shuffle=self.shuffle)\n copy_need_attr(self, target, [\"valid_loader\", \"train_loader\", \"test_loader\"])\n\n def train(self):\n self.train_dataset.train()\n self.test_dataset.train()\n\n\nclass MyImageEdgeDataSet(ImageDataSetConfig):\n def __init__(self):\n super(MyImageEdgeDataSet, self).__init__()\n self.train_path = r\"/home/shi/Downloads/dataset/polyp/TMP/09/train/\"\n self.test_path = r\"/home/shi/Downloads/dataset/polyp/TMP/09/test/\"\n self.train_dataset = ImageEdgeDataset(self.train_path, self.image_transforms, self.mask_transforms,\n edge_transforms=self.edge_transforms,\n random_state=self.random_state)\n self.num_train = len(self.train_dataset)\n self.test_dataset = ImageEdgeDataset(self.test_path, self.image_transforms, self.mask_transforms,\n edge_transforms=self.edge_transforms,\n random_state=self.random_state)\n self.num_valid = self.num_test = len(self.test_dataset)\n\n def get_dataloader(self, target):\n self.train_loader = DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=self.shuffle)\n self.valid_loader = DataLoader(self.test_dataset, batch_size=self.batch_size4test, shuffle=self.shuffle)\n self.test_loader = DataLoader(self.test_dataset, batch_size=self.batch_size4test, shuffle=self.shuffle)\n copy_need_attr(self, target, [\"valid_loader\", \"train_loader\", \"test_loader\"])\n\n def train(self):\n self.train_dataset.train()\n self.test_dataset.train()\n\n\nclass ImageEdgeDataset(BaseDataSet):\n def __init__(self, root_path=None, image_transforms=None, mask_transforms=None, edge_transforms=None,\n random_state=None):\n super(ImageEdgeDataset, self).__init__()\n self.image_paths = []\n self.mask_paths = []\n self.edge_paths = []\n if random_state is not None:\n self.random_state = random_state\n # set seed for random\n self.set_seed()\n if root_path is not None:\n self.root_path = root_path\n\n self.image_root_path = os.path.join(self.root_path, \"data\")\n self.mask_root_path = os.path.join(self.root_path, \"mask\")\n self.edge_root_path = os.path.join(self.root_path, \"edge\")\n if image_transforms is not None:\n self.image_transforms = image_transforms\n if mask_transforms is not None:\n self.mask_transforms = mask_transforms\n if edge_transforms is not None:\n self.edge_transforms = edge_transforms\n\n filenames = sorted(os.listdir(self.image_root_path))\n\n random.shuffle(filenames)\n self.image_paths = [os.path.join(self.image_root_path, filename) for filename in filenames]\n self.mask_paths = [os.path.join(self.mask_root_path, filename) for filename in filenames]\n self.edge_paths = [os.path.join(self.edge_root_path, filename) for filename in filenames]\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, index):\n image = Image.open(self.image_paths[index])\n mask = Image.open(self.mask_paths[index])\n edge = Image.open(self.edge_paths[index])\n if self.image_transforms is not None:\n image = self.image_transforms(image)\n if self.mask_transforms is not None:\n mask = self.mask_transforms(mask)\n if self.edge_transforms is not None:\n edge = self.edge_transforms(edge)\n image = torch.cat([image, edge])\n # if use few sample for test ,will not have test_model\n if getattr(self, \"test_model\", False):\n return image, mask, os.path.basename(self.image_paths[index])\n else:\n return image, mask\n\n def set_data_num(self, num):\n self.image_paths = self.image_paths[:num]\n\n\nclass ImageDataSet(BaseDataSet):\n def __init__(self, root_path=None, image_transforms=None, mask_transforms=None, random_state=None):\n super(ImageDataSet, self).__init__()\n self.image_paths = []\n self.mask_paths = []\n if random_state is not None:\n self.random_state = random_state\n # set seed for random\n self.set_seed()\n if root_path is not None:\n self.root_path = root_path\n self.image_root_path = os.path.join(self.root_path, \"data\")\n self.mask_root_path = os.path.join(self.root_path, \"mask\")\n if image_transforms is not None:\n self.image_transforms = image_transforms\n if mask_transforms is not None:\n self.mask_transforms = mask_transforms\n\n filenames = sorted(os.listdir(self.image_root_path))\n\n random.shuffle(filenames)\n self.image_paths = [os.path.join(self.image_root_path, filename) for filename in filenames]\n self.mask_paths = [os.path.join(self.mask_root_path, filename) for filename in filenames]\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, index):\n image = Image.open(self.image_paths[index])\n mask = Image.open(self.mask_paths[index])\n if self.image_transforms is not None:\n image = self.image_transforms(image)\n if self.mask_transforms is not None:\n mask = self.mask_transforms(mask)\n # if use few sample for test ,will not have test_model\n if getattr(self, \"test_model\", False):\n return image, mask, os.path.basename(self.image_paths[index])\n else:\n return image, mask\n\n def set_data_num(self, num):\n self.image_paths = self.image_paths[:num]\n\n\nclass ImageDataSet0302(BaseDataSet, ImageDataSetConfig):\n\n def __init__(self):\n super(ImageDataSet0302, self).__init__()\n self.image_paths = []\n self.mask_paths = []\n self.IMAGE_PATHS = []\n self.MASK_PATHS = []\n self.set_seed()\n file_names = sorted(os.listdir(self.image_path))\n if self.shuffle:\n random.shuffle(file_names)\n for file_name in file_names:\n self.IMAGE_PATHS.append(os.path.join(self.image_path, file_name))\n self.MASK_PATHS.append(os.path.join(self.mask_path, file_name))\n self.image_paths.append(os.path.join(self.image_path, file_name))\n self.mask_paths.append(os.path.join(self.mask_path, file_name))\n if self.num_train is None:\n self.num_train = len(self.mask_paths)\n if self.num_test is None:\n self.num_test = len(self.mask_paths)\n\n def get_sample_dataloader(self, num_samples, target):\n \"\"\"get sample dataloader to test\"\"\"\n self.image_paths, self.mask_paths = self.IMAGE_PATHS[:num_samples * 3], self.MASK_PATHS[:num_samples * 3]\n self.train_data, self.valid_data, self.test_data = torch.utils.data.random_split(self,\n [num_samples,\n num_samples,\n num_samples])\n self.train_loader = DataLoader(self.train_data, batch_size=self.batch_size, shuffle=True)\n self.valid_loader = DataLoader(self.valid_data, batch_size=self.batch_size, shuffle=True)\n self.test_loader = DataLoader(self.test_data, batch_size=self.batch_size4test, shuffle=True)\n copy_need_attr(target, [\"valid_loader\", \"train_loader\", \"test_loader\"])\n\n def get_dataloader(self, target):\n if not self.test_model:\n if self.num_train is not None:\n if len(self) > self.num_train:\n self.set_data_num(self.num_train)\n else:\n self.num_train = len(self)\n else:\n self.num_train = len(self.train_data)\n if self.valid_rate is None:\n self.train_loader = DataLoader(self, batch_size=self.batch_size, shuffle=True)\n return self.train_loader\n else:\n num_valid_data = int(self.num_train * self.valid_rate)\n if num_valid_data == 0 or num_valid_data == self.num_train:\n self.logger.error(\"valid datateset is None or train dataset is None\")\n self.train_data, self.val_data = torch.utils.data.random_split(self,\n [self.num_train - num_valid_data,\n num_valid_data])\n self.train_loader = DataLoader(self.train_data, batch_size=self.batch_size, shuffle=True)\n self.valid_loader = DataLoader(self.val_data, batch_size=self.batch_size, shuffle=True)\n else:\n if self.num_test is not None:\n self.set_data_num(self.num_test)\n else:\n self.num_test = len(self)\n self.test_loader = DataLoader(self, batch_size=self.batch_size4test, shuffle=True)\n\n copy_attr(self, target)\n\n def sort_dataset(self):\n \"\"\"calculate number of train_dataset, valid_dataset and test_dataset\"\"\"\n # calculate the number of samples to train\n if not self.test_model:\n if self.num_train is not None:\n if len(self) > self.num_train:\n self.set_data_num(self.num_train)\n else:\n self.num_train = len(self)\n else:\n self.num_train = len(self.train_data)\n if self.valid_rate is None:\n self.num_valid = 0\n else:\n num_valid_data = int(self.num_train * self.valid_rate)\n if num_valid_data == 0 or num_valid_data == self.num_train:\n self.logger.error(\"valid datateset is None or train dataset is None\")\n self.train_data, self.val_data = torch.utils.data.random_split(self,\n [self.num_train - num_valid_data,\n num_valid_data])\n self.train_loader = DataLoader(self.train_data, batch_size=self.batch_size, shuffle=True)\n self.valid_loader = DataLoader(self.val_data, batch_size=self.batch_size, shuffle=True)\n else:\n if self.num_test is not None:\n self.set_data_num(self.num_test)\n else:\n self.num_test = len(self)\n self.test_loader = DataLoader(self, batch_size=self.batch_size4test, shuffle=True)\n\n if self.test_model:\n if self.num_test is not None:\n self.set_data_num(self.num_test)\n else:\n self.num_test = len(self)\n else:\n if self.num_train is None:\n self.num_train = len(self.train_data)\n else:\n if len(self) > self.num_train:\n self.set_data_num(self.num_train)\n else:\n self.num_train = len(self)\n if self.num_train is not None:\n # if number of train which been set is bigger than amount of train_dataset\n if len(self.Y) > self.num_train:\n self.set_data_num(self.num_train)\n else:\n self.num_train = len(self)\n else:\n self.num_train = len(self.Y)\n\n # calculate the number of samples to valid\n if self.valid_rate is None:\n self.num_valid = 0\n else:\n self.num_valid = int(self.num_train * self.valid_rate)\n self.num_train = self.num_train - self.num_valid\n if self.num_valid == 0 or self.num_valid == self.num_train:\n self.logger.error(\"valid dataset is None or train dataset is None\")\n\n self.logger.info(\n \"num_train:{} \\t num_valid:{} \\t num_test:{} \".format(self.num_train, self.num_valid, self.num_test))\n return self.num_train, self.num_test, self.num_valid\n\n def create_dataset(self):\n pass\n\n def create_dataloader(self):\n pass\n\n def load_config(self):\n copy_attr(ImageDataSetConfig(), self)\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, index):\n image = Image.open(self.image_paths[index])\n mask = Image.open(self.mask_paths[index])\n if self.image_transforms is not None:\n image = self.image_transforms(image)\n if self.mask_transforms is not None:\n mask = self.mask_transforms(mask)\n # if use few sample for test ,will not have test_model\n if getattr(self, \"test_model\", False):\n return image, mask, os.path.basename(self.image_paths[index])\n else:\n return image, mask\n\n def set_data_num(self, num):\n if self.test_model:\n self.image_paths, self.mask_paths = self.IMAGE_PATHS[-num:], self.MASK_PATHS[-num:]\n else:\n self.image_paths, self.mask_paths = self.IMAGE_PATHS[:num], self.MASK_PATHS[:num]\n\n\nfrom configs.dataset_config import ImageDataSet4EdgeConfig\n\n\nclass ImageDataSet4Edge(BaseDataSet, ImageDataSet4EdgeConfig):\n\n def __init__(self, ):\n super(ImageDataSet4Edge, self).__init__()\n self.image_paths = []\n self.mask_paths = []\n self.IMAGE_PATHS = []\n self.MASK_PATHS = []\n self.PREDICT_PATHS = []\n self.EDGE_PATHS = []\n self.edge_paths = []\n self.predict_paths = []\n self.set_seed()\n file_names = sorted(os.listdir(self.image_path))\n if self.shuffle:\n random.shuffle(file_names)\n for file_name in file_names:\n self.IMAGE_PATHS.append(os.path.join(self.image_path, file_name))\n self.MASK_PATHS.append(os.path.join(self.mask_path, file_name))\n self.EDGE_PATHS.append(os.path.join(self.edge_path, file_name))\n self.PREDICT_PATHS.append(os.path.join(self.predict_path, file_name))\n self.image_paths.append(os.path.join(self.image_path, file_name))\n self.mask_paths.append(os.path.join(self.mask_path, file_name))\n self.edge_paths.append(os.path.join(self.edge_path, file_name))\n self.predict_paths.append(os.path.join(self.predict_path, file_name))\n\n def copy_attr(self, target, attr_names):\n for attr_name in attr_names:\n setattr(target, attr_name, getattr(self, attr_name))\n\n def get_dataloader(self, target):\n if not self.test_model:\n if self.num_train is not None:\n if len(self) > self.num_train:\n self.set_data_num(self.num_train)\n else:\n self.num_train = len(self)\n else:\n self.num_train = len(self.train_data)\n if self.valid_rate is None:\n self.train_loader = DataLoader(self, batch_size=self.batch_size, shuffle=True)\n return self.train_loader\n else:\n num_valid_data = int(self.num_train * self.valid_rate)\n if num_valid_data == 0 or num_valid_data == self.num_train:\n self.logger.error(\"valid datateset is None or train dataset is None\")\n self.train_data, self.val_data = torch.utils.data.random_split(self,\n [self.num_train - num_valid_data,\n num_valid_data])\n self.train_loader = DataLoader(self.train_data, batch_size=self.batch_size, shuffle=True)\n self.valid_loader = DataLoader(self.val_data, batch_size=self.batch_size, shuffle=True)\n else:\n if self.num_test is not None:\n self.set_data_num(self.num_test)\n else:\n self.num_test = len(self)\n self.test_loader = DataLoader(self, batch_size=self.batch_size4test, shuffle=True)\n\n from utils.common_utils import copy_attr\n copy_attr(self, target)\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, index):\n image_path = self.image_paths[index]\n edge_path = self.edge_paths[index]\n mask_path = self.mask_paths[index]\n predict_path = self.predict_paths[index]\n image = Image.open(image_path)\n edge = Image.open(edge_path)\n mask = Image.open(mask_path)\n predict = Image.open(predict_path)\n if self.image_transforms is not None:\n image = self.image_transforms(image)\n if self.mask_transforms is not None:\n mask = self.mask_transforms(mask)\n if self.edge_transforms is not None:\n edge = self.edge_transforms(edge)\n if self.predict_transforms is not None:\n predict = self.predict_transforms(predict)\n\n # concate source image and edge image to create X data\n image = torch.cat([image, edge, predict], dim=0)\n # image = torch.cat([image, predict], dim=0)\n # if use few sample for test ,will not have test_model\n if getattr(self, \"test_model\", False):\n return image, mask, os.path.basename(self.image_paths[index])\n else:\n return image, mask\n\n def set_data_num(self, num):\n if self.test_model:\n self.image_paths, self.mask_paths = self.IMAGE_PATHS[-num:], self.MASK_PATHS[-num:]\n else:\n self.image_paths, self.mask_paths = self.IMAGE_PATHS[:num], self.MASK_PATHS[:num]\n"
] | [
[
"torch.ones",
"torch.zeros",
"torch.randn",
"torch.tensor",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.dot",
"numpy.linalg.svd",
"numpy.sqrt",
"numpy.linalg.eigh",
"numpy.savetxt",
"numpy.zeros",
"numpy.loadtxt"
],
[
"numpy.array"
],
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.ReLU"
],
[
"torch.utils.data.random_split",
"torch.utils.data.DataLoader",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FaustinCarter/lmfit-py | [
"7fbb75b2fd3f383e78692fd85c9a646793d4b071",
"7fbb75b2fd3f383e78692fd85c9a646793d4b071",
"7fbb75b2fd3f383e78692fd85c9a646793d4b071",
"7fbb75b2fd3f383e78692fd85c9a646793d4b071"
] | [
"tests/test_itercb.py",
"tests/test_basicfit.py",
"examples/doc_parameters_basic.py",
"examples/doc_builtinmodels_peakmodels.py"
] | [
"import numpy as np\nfrom lmfit import Parameters, minimize, report_fit\nfrom lmfit.models import LinearModel, GaussianModel\nfrom lmfit.lineshapes import gaussian\n\ndef per_iteration(pars, iter, resid, *args, **kws):\n \"\"\"iteration callback, will abort at iteration 23\n \"\"\"\n # print( iter, ', '.join([\"%s=%.4f\" % (p.name, p.value) for p in pars.values()]))\n return iter == 23\n\ndef test_itercb():\n x = np.linspace(0, 20, 401)\n y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)\n y = y - .20*x + 3.333 + np.random.normal(scale=0.23, size=len(x))\n mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')\n\n pars = mod.make_params(peak_amplitude=21.0,\n peak_center=7.0,\n peak_sigma=2.0,\n bkg_intercept=2,\n bkg_slope=0.0)\n\n out = mod.fit(y, pars, x=x, iter_cb=per_iteration)\n\n assert(out.nfev == 23)\n assert(out.aborted)\n assert(not out.errorbars)\n assert(not out.success)\n",
"import numpy as np\nfrom lmfit import minimize, Parameters, Parameter, report_fit\nfrom lmfit_testutils import assert_paramval, assert_paramattr\n\n\ndef test_basic():\n # create data to be fitted\n x = np.linspace(0, 15, 301)\n data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +\n np.random.normal(size=len(x), scale=0.2) )\n\n # define objective function: returns the array to be minimized\n def fcn2min(params, x, data):\n \"\"\" model decaying sine wave, subtract data\"\"\"\n amp = params['amp']\n shift = params['shift']\n omega = params['omega']\n decay = params['decay']\n\n model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)\n return model - data\n\n # create a set of Parameters\n params = Parameters()\n params.add('amp', value= 10, min=0)\n params.add('decay', value= 0.1)\n params.add('shift', value= 0.0, min=-np.pi/2., max=np.pi/2)\n params.add('omega', value= 3.0)\n\n # do fit, here with leastsq model\n result = minimize(fcn2min, params, args=(x, data))\n\n # calculate final result\n final = data + result.residual\n\n # report_fit(result)\n\n assert(result.nfev > 5)\n assert(result.nfev < 500)\n assert(result.chisqr > 1)\n assert(result.nvarys == 4)\n assert_paramval(result.params['amp'], 5.03, tol=0.05)\n assert_paramval(result.params['omega'], 2.0, tol=0.05)\n\n\nif __name__ == '__main__':\n test_basic()\n",
"#!/usr/bin/env python\n\n# <examples/doc_parameters_basic.py>\nimport numpy as np\n\nfrom lmfit import Minimizer, Parameters, report_fit\n\n# create data to be fitted\nx = np.linspace(0, 15, 301)\ndata = (5. * np.sin(2*x - 0.1) * np.exp(-x*x*0.025) +\n np.random.normal(size=len(x), scale=0.2))\n\n\n# define objective function: returns the array to be minimized\ndef fcn2min(params, x, data):\n \"\"\"Model a decaying sine wave and subtract data.\"\"\"\n amp = params['amp']\n shift = params['shift']\n omega = params['omega']\n decay = params['decay']\n model = amp * np.sin(x*omega + shift) * np.exp(-x*x*decay)\n return model - data\n\n\n# create a set of Parameters\nparams = Parameters()\nparams.add('amp', value=10, min=0)\nparams.add('decay', value=0.1)\nparams.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2)\nparams.add('omega', value=3.0)\n\n# do fit, here with leastsq model\nminner = Minimizer(fcn2min, params, fcn_args=(x, data))\nresult = minner.minimize()\n\n# calculate final result\nfinal = data + result.residual\n\n# write error report\nreport_fit(result)\n\n# try to plot results\ntry:\n import matplotlib.pyplot as plt\n plt.plot(x, data, 'k+')\n plt.plot(x, final, 'r')\n plt.show()\nexcept ImportError:\n pass\n# <end of examples/doc_parameters_basic.py>\n",
"#!/usr/bin/env python\n\n# <examples/doc_builtinmodels_peakmodels.py>\nimport matplotlib.pyplot as plt\nfrom numpy import loadtxt\n\nfrom lmfit.models import GaussianModel, LorentzianModel, VoigtModel\n\ndata = loadtxt('test_peak.dat')\nx = data[:, 0]\ny = data[:, 1]\n\ngamma_free = False\n\nMODEL = 'gauss'\n# MODEL = 'loren'\n# MODEL = 'voigt'\n# gamma_free = True\n\nif MODEL.lower().startswith('g'):\n mod = GaussianModel()\n gamma_free = False\n figname = '../doc/_images/models_peak1.png'\nelif MODEL.lower().startswith('l'):\n mod = LorentzianModel()\n gamma_free = False\n figname = '../doc/_images/models_peak2.png'\nelif MODEL.lower().startswith('v'):\n mod = VoigtModel()\n figname = '../doc/_images/models_peak3.png'\n\npars = mod.guess(y, x=x)\n\nif gamma_free:\n pars['gamma'].set(value=0.7, vary=True, expr='')\n figname = '../doc/_images/models_peak4.png'\n\nout = mod.fit(y, pars, x=x)\nprint(out.fit_report(min_correl=0.25))\n\nplt.plot(x, y, 'b-')\nplt.plot(x, out.best_fit, 'r-')\n# plt.savefig(figname)\nplt.show()\n# <end examples/doc_builtinmodels_peakmodels.py>\n"
] | [
[
"numpy.linspace"
],
[
"numpy.exp",
"numpy.linspace",
"numpy.sin"
],
[
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.exp",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JamesJeffryes/kb_phylogenomics | [
"133b7b7c4179b5fb1b51bade70069a545bca91fc"
] | [
"lib/kb_phylogenomics/kb_phylogenomicsImpl.py"
] | [
"# -*- coding: utf-8 -*-\n#BEGIN_HEADER\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport shutil\nimport hashlib\nimport subprocess\nimport requests\nrequests.packages.urllib3.disable_warnings()\nimport re\nimport traceback\nimport uuid\nfrom datetime import datetime\nfrom pprint import pprint, pformat\n\nimport numpy as np\nimport math\nfrom Bio import SeqIO\n\nfrom biokbase.workspace.client import Workspace as workspaceService\n#from Workspace.WorkspaceClient import Workspace as workspaceService\nfrom DataFileUtil.DataFileUtilClient import DataFileUtil as DFUClient\nfrom KBaseReport.KBaseReportClient import KBaseReport\n\nfrom DomainAnnotation.DomainAnnotationClient import DomainAnnotation\n\nimport ete3\nimport matplotlib.pyplot as pyplot # use this instead\nfrom matplotlib.patches import Arc\nfrom matplotlib.patches import Rectangle\n\n#END_HEADER\n\n\nclass kb_phylogenomics:\n '''\n Module Name:\n kb_phylogenomics\n\n Module Description:\n A KBase module: kb_phylogenomics\n\nThis module contains methods for running and visualizing results of phylogenomics and comparative genomics analyses\n '''\n\n ######## WARNING FOR GEVENT USERS ####### noqa\n # Since asynchronous IO can lead to methods - even the same method -\n # interrupting each other, you must be *very* careful when using global\n # state. A method could easily clobber the state set by another while\n # the latter method is running.\n ######################################### noqa\n VERSION = \"1.2.0\"\n GIT_URL = \"https://github.com/kbaseapps/kb_phylogenomics\"\n GIT_COMMIT_HASH = \"43733230d3f70a2eccc123b3867e99775b0d9f4c\"\n\n #BEGIN_CLASS_HEADER\n\n def log(self, target, message):\n if target is not None:\n target.append(message)\n print(message)\n sys.stdout.flush()\n\n #END_CLASS_HEADER\n\n # config contains contents of config file in a hash or None if it couldn't\n # be found\n def __init__(self, config):\n #BEGIN_CONSTRUCTOR\n self.workspaceURL = config['workspace-url']\n self.shockURL = config['shock-url']\n #self.handleURL = config['handle-service-url']\n self.serviceWizardURL = config['service-wizard-url']\n self.callbackURL = os.environ['SDK_CALLBACK_URL']\n self.scratch = os.path.abspath(config['scratch'])\n\n #pprint(config)\n\n if not os.path.exists(self.scratch):\n os.makedirs(self.scratch)\n\n #self.genome_feature_id_delim = '.f:'\n\n #END_CONSTRUCTOR\n pass\n\n\n def view_tree(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_tree_Input\" (view_tree() ** **\n show a KBase Tree and make newick and images downloadable) ->\n structure: parameter \"workspace_name\" of type \"workspace_name\" (**\n Common types), parameter \"input_tree_ref\" of type \"data_obj_ref\",\n parameter \"desc\" of String\n :returns: instance of type \"view_tree_Output\" -> structure: parameter\n \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_tree\n\n #### STEP 0: init\n ##\n dfu = DFUClient(self.callbackURL)\n console = []\n invalid_msgs = []\n self.log(console,'Running view_tree() with params=')\n self.log(console, \"\\n\"+pformat(params))\n report = ''\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join (self.scratch, 'output_'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\n #### STEP 1: do some basic checks\n ##\n if 'workspace_name' not in params:\n raise ValueError('workspace_name parameter is required')\n if 'input_tree_ref' not in params:\n raise ValueError('input_tree_ref parameter is required')\n #if 'output_name' not in params:\n # raise ValueError('output_name parameter is required')\n\n\n #### STEP 2: load the method provenance from the context object\n ##\n self.log(console,\"SETTING PROVENANCE\") # DEBUG\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n # add additional info to provenance here, in this case the input data object reference\n provenance[0]['input_ws_objects'] = []\n provenance[0]['input_ws_objects'].append(params['input_tree_ref'])\n provenance[0]['service'] = 'kb_phylogenomics'\n provenance[0]['method'] = 'view_tree'\n\n\n #### STEP 3: Get tree and save as newick file\n ##\n try:\n ws = workspaceService(self.workspaceURL, token=ctx['token'])\n objects = ws.get_objects([{'ref': params['input_tree_ref']}])\n data = objects[0]['data']\n info = objects[0]['info']\n intree_name = info[1]\n intree_type_name = info[2].split('.')[1].split('-')[0]\n\n except Exception as e:\n raise ValueError('Unable to fetch input_tree_ref object from workspace: ' + str(e))\n #to get the full stack trace: traceback.format_exc()\n \n if intree_type_name == 'Tree':\n tree_in = data\n else:\n raise ValueError('Cannot yet handle input_tree type of: '+type_name)\n\n intree_newick_file_path = os.path.join(output_dir, intree_name+\".newick\")\n self.log(console, 'writing intree file: '+intree_newick_file_path)\n with open(intree_newick_file_path, 'w', 0) as intree_newick_file_handle:\n intree_newick_file_handle.write(tree_in['tree'])\n\n # upload\n try:\n newick_upload_ret = dfu.file_to_shock({'file_path': intree_newick_file_path,\n #'pack': 'zip'})\n 'make_handle': 0})\n except:\n raise ValueError ('error uploading newick file to shock')\n\n\n #### STEP 4: if labels defined, make separate newick-labels file\n ## (NOTE: adjust IDs so ETE3 parse doesn't choke on conflicting chars)\n ##\n if 'default_node_labels' in tree_in:\n newick_labels_file = intree_name+'-labels.newick'\n output_newick_labels_file_path = os.path.join(output_dir, newick_labels_file);\n #default_row_ids = tree_in['default_row_labels']\n #new_ids = dict()\n #for row_id in default_row_ids.keys():\n # new_ids[row_id] = default_row_ids[row_id]\n\n mod_newick_buf = tree_in['tree']\n mod_newick_buf = re.sub('\\|','%'+'|'.encode(\"hex\"), mod_newick_buf)\n #for row_id in new_ids.keys():\n for node_id in tree_in['default_node_labels'].keys():\n label = tree_in['default_node_labels'][node_id]\n #self.log (console, \"node \"+node_id+\" label B4: '\"+label+\"'\") # DEBUG\n label = re.sub(' \\(kb[^\\)]*\\)', '', label) # just get rid of problematic (kb|g.1234)\n label = re.sub('\\s','_',label)\n #label = re.sub('\\/','%'+'/'.encode(\"hex\"), label)\n #label = re.sub(r'\\\\','%'+'\\\\'.encode(\"hex\"), label)\n #label = re.sub('\\[','%'+'['.encode(\"hex\"), label)\n #label = re.sub('\\]','%'+']'.encode(\"hex\"), label)\n label = re.sub('\\(','[', label)\n label = re.sub('\\)',']', label)\n label = re.sub('\\:','%'+':'.encode(\"hex\"), label)\n label = re.sub('\\;','%'+';'.encode(\"hex\"), label)\n label = re.sub('\\|','%'+'|'.encode(\"hex\"), label)\n #self.log (console, \"node \"+node_id+\" label AF: '\"+label+\"'\") # DEBUG\n #self.log (console, \"NEWICK B4: '\"+mod_newick_buf+\"'\") # DEBUG\n mod_node_id = re.sub('\\|','%'+'|'.encode(\"hex\"), node_id)\n mod_newick_buf = re.sub ('\\('+mod_node_id+'\\:', '('+label+':', mod_newick_buf)\n mod_newick_buf = re.sub ('\\,'+mod_node_id+'\\:', ','+label+':', mod_newick_buf)\n #self.log (console, \"NEWICK AF: '\"+mod_newick_buf+\"'\") # DEBUG\n\n #self.log(console, \"new_id: '\"+new_id+\"' label: '\"+label+\"'\") # DEBUG\n \n mod_newick_buf = re.sub ('_', ' ', mod_newick_buf)\n with open (output_newick_labels_file_path, 'w', 0) as output_newick_labels_file_handle:\n output_newick_labels_file_handle.write(mod_newick_buf)\n\n # upload\n try:\n newick_labels_upload_ret = dfu.file_to_shock({'file_path': output_newick_labels_file_path,\n #'pack': 'zip'})\n 'make_handle': 0})\n except:\n raise ValueError ('error uploading newick labels file to shock')\n\n\n #### STEP 5: Create html with tree image\n ##\n html_output_dir = os.path.join(output_dir, 'output_html.'+str(timestamp))\n if not os.path.exists(html_output_dir):\n os.makedirs(html_output_dir)\n html_file = intree_name+'.html'\n png_file = intree_name+'.png'\n pdf_file = intree_name+'.pdf'\n output_html_file_path = os.path.join(html_output_dir, html_file);\n output_png_file_path = os.path.join(html_output_dir, png_file);\n output_pdf_file_path = os.path.join(output_dir, pdf_file);\n newick_buf = tree_in['tree']\n if 'default_node_labels' in tree_in:\n newick_buf = mod_newick_buf\n self.log(console, \"NEWICK_BUF: '\"+newick_buf+\"'\")\n\n # init ETE3 objects\n t = ete3.Tree(newick_buf)\n ts = ete3.TreeStyle()\n\n # customize\n ts.show_leaf_name = True\n ts.show_branch_length = False\n ts.show_branch_support = True\n #ts.scale = 50 # 50 pixels per branch length unit\n ts.branch_vertical_margin = 5 # pixels between adjacent branches\n title_disp = intree_name\n if 'desc' in params and params['desc'] != None and params['desc'] != '':\n title_disp += ': '+params['desc']\n ts.title.add_face(ete3.TextFace(title_disp, fsize=10), column=0)\n\n node_style = ete3.NodeStyle()\n node_style[\"fgcolor\"] = \"#606060\" # for node balls\n node_style[\"size\"] = 10 # for node balls (gets reset based on support)\n node_style[\"vt_line_color\"] = \"#606060\"\n node_style[\"hz_line_color\"] = \"#606060\"\n node_style[\"vt_line_width\"] = 2\n node_style[\"hz_line_width\"] = 2\n node_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n node_style[\"hz_line_type\"] = 0\n\n leaf_style = ete3.NodeStyle()\n leaf_style[\"fgcolor\"] = \"#ffffff\" # for node balls\n leaf_style[\"size\"] = 2 # for node balls (we're using it to add space)\n leaf_style[\"vt_line_color\"] = \"#606060\" # unecessary\n leaf_style[\"hz_line_color\"] = \"#606060\"\n leaf_style[\"vt_line_width\"] = 2\n leaf_style[\"hz_line_width\"] = 2\n leaf_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n leaf_style[\"hz_line_type\"] = 0\n\n for n in t.traverse():\n if n.is_leaf():\n style = leaf_style\n else:\n style = ete3.NodeStyle()\n for k in node_style.keys():\n style[k] = node_style[k]\n\n if n.support > 0.95:\n style[\"size\"] = 6\n elif n.support > 0.90:\n style[\"size\"] = 5\n elif n.support > 0.80:\n style[\"size\"] = 4\n else:\n style[\"size\"] = 2\n\n n.set_style(style)\n\n # save images\n dpi = 300\n img_units = \"in\"\n img_pix_width = 1200\n img_in_width = round(float(img_pix_width)/float(dpi), 1)\n img_html_width = img_pix_width // 2\n t.render(output_png_file_path, w=img_in_width, units=img_units, dpi=dpi, tree_style=ts)\n t.render(output_pdf_file_path, w=img_in_width, units=img_units, tree_style=ts) # dpi irrelevant\n\n # make html\n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head><title>KBase Tree: '+intree_name+'</title></head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n html_report_lines += ['<img width='+str(img_html_width)+' src=\"'+png_file+'\">']\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n\n html_report_str = \"\\n\".join(html_report_lines)\n with open (output_html_file_path, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n\n\n # upload images and html\n try:\n png_upload_ret = dfu.file_to_shock({'file_path': output_png_file_path,\n #'pack': 'zip'})\n 'make_handle': 0})\n except:\n raise ValueError ('error uploading png file to shock')\n try:\n pdf_upload_ret = dfu.file_to_shock({'file_path': output_pdf_file_path,\n #'pack': 'zip'})\n 'make_handle': 0})\n except:\n raise ValueError ('error uploading pdf file to shock')\n try:\n html_upload_ret = dfu.file_to_shock({'file_path': html_output_dir,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('error uploading png file to shock')\n\n\n # Create report obj\n #\n reportName = 'blast_report_'+str(uuid.uuid4())\n #report += output_newick_buf+\"\\n\"\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n #reportObj['objects_created'].append({'ref': str(params['workspace_name'])+'/'+str(params['output_name']),'description': params['output_name']+' Tree'})\n reportObj['html_links'] = [{'shock_id': html_upload_ret['shock_id'],\n 'name': html_file,\n 'label': intree_name+' HTML'\n }\n ]\n reportObj['file_links'] = [{'shock_id': newick_upload_ret['shock_id'],\n 'name': intree_name+'.newick',\n 'label': intree_name+' NEWICK'\n }\n ]\n if 'default_node_labels' in tree_in:\n reportObj['file_links'].append({'shock_id': newick_labels_upload_ret['shock_id'],\n 'name': intree_name+'-labels.newick',\n 'label': intree_name+' NEWICK (with labels)'\n })\n\n reportObj['file_links'].extend([{'shock_id': png_upload_ret['shock_id'],\n 'name': intree_name+'.png',\n 'label': intree_name+' PNG'\n },\n {'shock_id': pdf_upload_ret['shock_id'],\n 'name': intree_name+'.pdf',\n 'label': intree_name+' PDF'\n }])\n\n SERVICE_VER = 'release'\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n report_info = reportClient.create_extended_report(reportObj)\n\n\n # Done\n #\n self.log(console,\"BUILDING RETURN OBJECT\")\n output = { 'report_name': report_info['name'],\n 'report_ref': report_info['ref']\n }\n\n self.log(console,\"view_tree() DONE\")\n #END view_tree\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_tree return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def trim_tree_to_genomeSet(self, ctx, params):\n \"\"\"\n :param params: instance of type \"trim_tree_to_genomeSet_Input\"\n (trim_tree_to_genomeSet() ** ** trim a KBase Tree to match\n genomeset, and make newick and images downloadable) -> structure:\n parameter \"workspace_name\" of type \"workspace_name\" (** Common\n types), parameter \"input_genomeSet_ref\" of type \"data_obj_ref\",\n parameter \"input_tree_ref\" of type \"data_obj_ref\", parameter\n \"desc\" of String\n :returns: instance of type \"trim_tree_to_genomeSet_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN trim_tree_to_genomeSet\n #END trim_tree_to_genomeSet\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method trim_tree_to_genomeSet return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def run_DomainAnnotation_Sets(self, ctx, params):\n \"\"\"\n :param params: instance of type \"run_DomainAnnotation_Sets_Input\"\n (run_DomainAnnotation_Sets() ** ** run the DomainAnnotation App\n against a GenomeSet) -> structure: parameter \"workspace_name\" of\n type \"workspace_name\" (** Common types), parameter\n \"input_genomeSet_ref\" of type \"data_obj_ref\", parameter\n \"override_annot\" of type \"bool\"\n :returns: instance of type \"run_DomainAnnotation_Sets_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN run_DomainAnnotation_Sets\n console = []\n self.log(console, 'Running run_DomainAnnotation_Sets() with params=')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n\n ### STEP 1: basic parameter checks + parsing\n required_params = ['workspace_name',\n 'input_genomeSet_ref'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+required_param+\"'\")\n\n\n ### STEP 2: build a list of genomes to iterate through\n\n # get genome set\n input_ref = params['input_genomeSet_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseSearch.GenomeSet\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n genomeSet_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genomeSet: \"+input_ref)\n\n # get genome refs and object names\n genome_ids = genomeSet_obj['elements'].keys() # note: genome_id may be meaningless\n genome_refs = []\n for genome_id in genome_ids:\n genome_refs.append (genomeSet_obj['elements'][genome_id]['ref'])\n\n genome_obj_name_by_ref = dict()\n uniq_genome_ws_ids = dict()\n ws_name_by_genome_ref = dict()\n\n for genome_ref in genome_refs:\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n ws_name_by_genome_ref[input_ref] = input_obj_info[WORKSPACE_I]\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[input_ref] = input_name\n\n\n ### STEP 3: Determine which genomes have already got domain annotations\n domain_annot_done = dict()\n for ws_id in uniq_genome_ws_ids.keys():\n try:\n dom_annot_obj_info_list = wsClient.list_objects({'ids':[ws_id],'type':\"KBaseGeneFamilies.DomainAnnotation\"})\n except Exception as e:\n raise ValueError (\"Unable to list DomainAnnotation objects from workspace: \"+str(ws_id)+\" \"+str(e))\n\n for info in dom_annot_obj_info_list:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n \n dom_annot_ref = str(info[WSID_I])+'/'+str(info[OBJID_I])+'/'+str(info[VERSION_I])\n try:\n domain_data = wsClient.get_objects([{'ref':dom_annot_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch domain annotation: \"+dom_annot_ref)\n\n # read domain data object\n genome_ref = domain_data['genome_ref']\n if genome_ref not in genome_refs:\n continue\n domain_annot_done[genome_ref] = True\n\n\n ### STEP 4: run DomainAnnotation on each genome in set\n try:\n SERVICE_VER = 'dev' # DEBUG\n daClient = DomainAnnotation (url=self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER) # SDK Local\n #daClient = DomainAnnotation (url=self.serviceWizardURL, token=ctx['token'], service_ver=SERVICE_VER) # Dynamic service\n except:\n raise ValueError (\"unable to instantiate DomainAnnotationClient\")\n\n # RUN DomainAnnotations\n report_text = ''\n for genome_i,genome_ref in enumerate(genome_refs):\n\n if 'override_annot' not in params or params['override_annot'] != '1':\n if genome_ref in domain_annot_done:\n self.log (console, \"SKIPPING repeat domain annotation for genome: \"+genome_obj_name_by_ref[genome_ref])\n\n continue\n\n genome_obj_name = genome_obj_name_by_ref[genome_ref]\n domains_obj_name = re.sub ('[\\.\\-\\_\\:]GenomeAnnotation$', '', genome_obj_name)\n domains_obj_name = re.sub ('[\\.\\-\\_\\:]Genome$', '', domains_obj_name)\n domains_obj_name += '.DomainAnnotation'\n domains_obj_name = 'domains_'+domains_obj_name # DEBUG\n DomainAnnotation_Params = { 'genome_ref': genome_ref,\n 'dms_ref': 'KBasePublicGeneDomains/All',\n 'ws': params['workspace_name'],\n #'ws': ws_name_by_genome_ref[genome_ref],\n 'output_result_id': domains_obj_name\n }\n self.log (console, \"RUNNING domain annotation for genome: \"+genome_obj_name_by_ref[genome_ref])\n self.log(console, \"\\n\"+pformat(DomainAnnotation_Params))\n self.log(console, str(datetime.now()))\n\n #da_retVal = daClient.search_domains (DomainAnnotation_Params)[0]\n da_retVal = daClient.search_domains (DomainAnnotation_Params)\n this_output_ref = da_retVal['output_result_id']\n this_report_name = da_retVal['report_name']\n this_report_ref = da_retVal['report_ref']\n\n try:\n this_report_obj = wsClient.get_objects([{'ref':this_report_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch report: \"+this_report_ref)\n report_text += this_report_obj['text_message']\n report_text += \"\\n\\n\"\n\n\n ### STEP 5: build and save the report\n reportObj = {\n 'objects_created': [],\n 'text_message': report_text\n }\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n report_info = reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n\n\n ### STEP 6: construct the output to send back\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END run_DomainAnnotation_Sets\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method run_DomainAnnotation_Sets return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_fxn_profile(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_fxn_profile_Input\"\n (view_fxn_profile() ** ** show a table/heatmap of general\n categories or custom gene families for a set of Genomes) ->\n structure: parameter \"workspace_name\" of type \"workspace_name\" (**\n Common types), parameter \"input_genomeSet_ref\" of type\n \"data_obj_ref\", parameter \"namespace\" of String, parameter\n \"custom_target_fams\" of type \"CustomTargetFams\" (parameter groups)\n -> structure: parameter \"target_fams\" of list of String, parameter\n \"extra_target_fam_groups_COG\" of list of String, parameter\n \"extra_target_fam_groups_PFAM\" of list of String, parameter\n \"extra_target_fam_groups_TIGR\" of list of String, parameter\n \"extra_target_fam_groups_SEED\" of list of String, parameter\n \"count_category\" of String, parameter \"heatmap\" of type \"bool\",\n parameter \"vertical\" of type \"bool\", parameter \"top_hit\" of type\n \"bool\", parameter \"e_value\" of Double, parameter \"log_base\" of\n Double, parameter \"show_blanks\" of type \"bool\"\n :returns: instance of type \"view_fxn_profile_Output\" -> structure:\n parameter \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_fxn_profile\n\n ### STEP 0: basic init\n console = []\n self.log(console, 'Running view_fxn_profile(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_genomeSet_ref',\n 'namespace'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n if params['namespace'] == 'custom':\n if ('custom_target_fams' not in params or not params['custom_target_fams']) \\\n or ( \\\n ('target_fams' not in params['custom_target_fams'] or not params['custom_target_fams']['target_fams']) \\\n and ('extra_target_fam_groups_COG' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_COG']) \\\n and ('extra_target_fam_groups_PFAM' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_PFAM']) \\\n and ('extra_target_fam_groups_TIGR' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_TIGR']) \\\n and ('extra_target_fam_groups_SEED' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_SEED'])\n ):\n \n raise ValueError (\"Must define either param: 'target_fams' or 'extra_target_fam_groups' if using CUSTOM targets\")\n\n # base config\n namespace_classes = ['COG', 'PF', 'TIGR', 'SEED']\n show_blanks = False\n if 'show_blanks' in params and params['show_blanks'] == '1':\n show_blanks = True\n e_value_thresh = None\n if 'e_value' in params and params['e_value'] != None and params['e_value'] != '':\n e_value_thresh = float (params['e_value'])\n top_hit_flag = False\n if 'top_hit' in params and params['top_hit'] != None and params['top_hit'] != '' and params['top_hit'] != 0:\n top_hit_flag = True\n\n domain_desc_basepath = os.path.abspath('/kb/module/data/domain_desc')\n domain_to_cat_map_path = dict()\n domain_cat_names_path = dict()\n domain_fam_names_path = dict()\n domain_to_cat_map_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_cat_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014_funcat.tsv')\n domain_fam_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_to_cat_map_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_cat_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans_names.tsv')\n domain_fam_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_to_cat_map_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_cat_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrrole2go.txt')\n #domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrfams2go.txt')\n domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_to_cat_map_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_funcat.txt')\n #domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_fam_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_genomeSet_ref'])]\n\n\n # set the output path\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\n # configure categories\n #\n cats = []\n cat2name = dict()\n cat2group = dict()\n domfam2cat = dict()\n cat2domfams = dict()\n namespaces_reading = dict()\n\n # categories are high-level summations\n if params['namespace'] != 'custom':\n for namespace in ['COG','PF','TIGR','SEED']:\n if params['namespace'] == namespace:\n namespaces_reading[namespace] = True\n\n # read all mappings between groups and domfams\n for namespace in ['COG','PF','TIGR','SEED']:\n\n cat2name[namespace] = dict()\n cat2group[namespace] = dict()\n domfam2cat[namespace] = dict()\n cat2domfams[namespace] = dict()\n\n # get high-level cats\n tigrrole_id2cat = dict()\n with open (domain_cat_names_path[namespace], 'r', 0) as dom_cat_handle:\n for line in dom_cat_handle.readlines():\n line = line.strip()\n \n if namespace == 'COG':\n [cat, cat_group, cat_name] = line.split(\"\\t\")[0:3]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'PF':\n [cat, cat_name] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = None\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [cat, cat_id, cat_group, cat_name_plus_go_terms] = line.split(\"\\t\")[0:4]\n tigrrole_id2cat[cat_id] = cat\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_name = re.sub (' *\\> GO:.*$', '', cat_name_plus_go_terms)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'SEED':\n #[cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n [cat_group, cat] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_disp = re.sub ('_', ' ', cat)\n cat2name[namespace][cat] = cat_disp\n cat2group[namespace][cat] = cat_group\n\n # get domfam to cat map, and vice versa\n with open (domain_to_cat_map_path[namespace], 'r', 0) as dom2cat_map_handle:\n for line in dom2cat_map_handle.readlines():\n line = line.strip()\n\n if namespace == 'COG':\n [domfam, cat_str, cat_name] = line.split(\"\\t\")[0:3]\n cat = cat_str[0] # only use first cat\n\n elif namespace == 'PF':\n [domfam, cat, cat_name, dom_id, dom_name] = line.split(\"\\t\")[0:5]\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if cat_id != '' and int(cat_id) != 0 and cat_id in tigrrole_id2cat:\n cat = tigrrole_id2cat[cat_id]\n else:\n continue\n\n elif namespace == 'SEED':\n [cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n\n domfam2cat[namespace][domfam] = cat\n if cat not in cat2domfams[namespace]:\n cat2domfams[namespace][cat] = []\n cat2domfams[namespace][cat].append(domfam)\n\n # custom domains\n if params['namespace'] == 'custom':\n\n # add target fams\n target_fams = []\n if 'target_fams' in params['custom_target_fams'] and params['custom_target_fams']['target_fams']:\n for target_fam in params['custom_target_fams']['target_fams']:\n target_fam = target_fam.strip()\n if target_fam == '':\n continue\n\n target_fam = re.sub (\"^cog\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^pf\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^tigr\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^seed\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PFAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P-FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P_FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGRFAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n\n target_fam = re.sub (\"^COG:\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG-\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG_\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG *\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF:\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF-\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF_\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF *\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR:\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR *\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED:\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED-\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED_\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED *\", \"SEED\", target_fam, flags=re.IGNORECASE)\n\n num_id_len = dict()\n num_id_len['COG'] = 4\n num_id_len['PF'] = 5\n num_id_len['TIGR'] = 5\n\n #self.log (console, \"TARGET_FAM A: '\"+target_fam+\"'\") # DEBUG\n \n if target_fam.startswith('SEED'):\n namespaces_reading['SEED'] = True\n target_fam = target_fam.strip()\n target_fam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' ', '_', target_fam)\n else:\n namespace_found = False\n for namespace_iter in ['COG','PF','TIGR']:\n if target_fam.startswith(namespace_iter):\n this_namespace = namespace_iter\n namespaces_reading[this_namespace] = True\n target_fam = re.sub(this_namespace, \"\", target_fam)\n namespace_found = True\n break\n if not namespace_found:\n raise ValueError (\"unrecognized custom domain family: '\"+str(target_fam)+\"'\")\n leading_zeros = ''\n for c_i in range(num_id_len[this_namespace] - len(str(target_fam))):\n leading_zeros += '0'\n target_fam = this_namespace + leading_zeros + target_fam\n\n #self.log (console, \"TARGET_FAM B: '\"+target_fam+\"'\") # DEBUG\n\n target_fams.append(target_fam)\n\n # add extra target fams\n extra_target_fams = []\n extra_target_fam_groups = []\n domfam2group = dict()\n for target_set in ['extra_target_fam_groups_COG', 'extra_target_fam_groups_PFAM', 'extra_target_fam_groups_TIGR', 'extra_target_fam_groups_SEED']:\n if target_set in params['custom_target_fams'] and params['custom_target_fams'][target_set]:\n extra_target_fam_groups.extend (params['custom_target_fams'][target_set])\n\n if extra_target_fam_groups:\n for target_group in extra_target_fam_groups:\n target_group = target_group.strip()\n if target_group == '':\n continue\n\n namespace = re.sub (\":.*$\", \"\", target_group)\n namespaces_reading[namespace] = True\n\n if namespace == 'COG':\n this_group = re.sub (\"COG: \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'PF':\n this_group = re.sub (\"PF: Clan \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'TIGR':\n this_group = re.sub (\"TIGR: role:\", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n this_group = 'TIGR_role:'+this_group\n elif namespace == 'SEED':\n this_group = re.sub (\"SEED: \", \"\", target_group)\n\n for domfam in cat2domfams[namespace][this_group]:\n extra_target_fams.append(domfam)\n domfam2group[domfam] = target_group\n\n # we have our targets\n cats = target_fams + extra_target_fams\n\n # store names of targets\n domfam2name = dict()\n for namespace in namespaces_reading.keys():\n domfam2name[namespace] = dict()\n\n if namespace == 'COG':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, cat_class, domfam_name] = line.split(\"\\t\")[0:3]\n domfam2name[namespace][domfam] = domfam_name\n\n elif namespace == 'PF':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, class_id, class_name, domfam_id, domfam_name] = line.split(\"\\t\")[0:5]\n if domfam_name.startswith(domfam_id):\n combo_name = domfam_name\n else:\n combo_name = domfam_id+': '+domfam_name\n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'TIGR':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if domfam_name != '':\n if domfam_desc.startswith(domfam_name):\n combo_name = domfam_desc\n else:\n combo_name = domfam_name+': '+domfam_desc\n else:\n if domfam_desc.startswith(domfam_id):\n combo_name = domfam_desc\n else:\n combo_name = domfam_id+': '+domfam_desc\n if ec_id != '':\n combo_name += ' (EC '+ec_id+')'\n \n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'SEED':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [level1, level2, level3, domfam] = line.split(\"\\t\")[0:4]\n\n domfam_desc = domfam\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n if domfam in domfam2name[namespace]:\n if len(domfam_desc) > len(domfam2name[namespace][domfam]):\n domfam2name[namespace][domfam] = domfam_desc\n else:\n domfam2name[namespace][domfam] = domfam_desc\n\n # just in case\n elif params['namespace'] != 'COG' \\\n and params['namespace'] != 'PF' \\\n and params['namespace'] != 'TIGR' \\\n and params['namespace'] != 'SEED':\n raise ValueError (\"Unknown namespace: '\"+str(params['namespace'])+\"'\")\n\n\n # get genome set\n #\n input_ref = params['input_genomeSet_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseSearch.GenomeSet\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n genomeSet_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genomeSet: \"+input_ref)\n\n\n # get genome refs, object names, sci names, protein-coding gene counts, and SEED annot\n #\n genome_ids = genomeSet_obj['elements'].keys() # note: genome_id may be meaningless\n genome_refs = []\n for genome_id in genome_ids:\n genome_refs.append (genomeSet_obj['elements'][genome_id]['ref'])\n\n genome_obj_name_by_ref = dict()\n genome_sci_name_by_ref = dict()\n genome_CDS_count_by_ref = dict()\n uniq_genome_ws_ids = dict()\n\n dom_hits = dict() # initialize dom_hits here because reading SEED within genome\n genes_with_hits_cnt = dict()\n\n for genome_ref in genome_refs:\n\n dom_hits[genome_ref] = dict()\n genes_with_hits_cnt[genome_ref] = dict()\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[genome_ref] = input_name\n\n try:\n genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n # sci name\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n \n # CDS cnt\n cds_cnt = 0\n for feature in genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n cds_cnt += 1\n genome_CDS_count_by_ref[genome_ref] = cds_cnt\n\n # SEED annotations\n #\n #f_cnt = 0 # DEBUG\n if 'SEED' in namespaces_reading:\n for feature in genome_obj['features']:\n #if f_cnt % 100 == 0:\n # self.log (console, \"iterating features: \"+str(f_cnt)) # DEBUG\n\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n #if f_cnt % 100 == 0:\n # self.log (console, \"prot: \"+str(feature['protein_translation'])) # DEBUG\n\n if 'function' in feature and feature['function'] != None and feature['function'] != '':\n gene_name = feature['id']\n \n #if f_cnt % 100 == 0:\n # self.log (console, \"fxn: '\"+str(feature['function'])+\"'\") # DEBUG\n\n # store assignments for gene\n for namespace in ['SEED']:\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n dom_hits[genome_ref][gene_name][namespace] = dict()\n\n domfam_list = []\n annot_set = feature['function'].strip().split(';')\n for annot in annot_set:\n annot_set_2 = annot.strip().split('@')\n for annot2 in annot_set_2:\n domfam = annot2.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n domfam_list.append(domfam)\n #if f_cnt % 100 == 0:\n # self.log (console, \"domfam: '\"+str(domfam)+\"'\") # DEBUG\n\n if top_hit_flag: # does SEED give more than one function?\n dom_hits[genome_ref][gene_name][namespace][domfam_list[0]] = True\n else:\n for domfam in domfam_list:\n dom_hits[genome_ref][gene_name][namespace][domfam] = True\n\n #f_cnt += 1 # DEBUG\n\n\n # capture domain hits to genes within each namespace\n #\n if params['namespace'] != 'SEED':\n dom_annot_found = dict()\n\n KBASE_DOMAINHIT_GENE_ID_I = 0\n KBASE_DOMAINHIT_GENE_BEG_I = 1 # not used\n KBASE_DOMAINHIT_GENE_END_I = 2 # not used\n KBASE_DOMAINHIT_GENE_STRAND_I = 3 # not used\n KBASE_DOMAINHIT_GENE_HITS_DICT_I = 4\n KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J = 0\n KBASE_DOMAINHIT_GENE_HITS_DICT_END_J = 1\n KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J = 2\n KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J = 3\n KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J = 4\n\n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT A: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n for ws_id in uniq_genome_ws_ids.keys():\n try:\n dom_annot_obj_info_list = wsClient.list_objects({'ids':[ws_id],'type':\"KBaseGeneFamilies.DomainAnnotation\"})\n except Exception as e:\n raise ValueError (\"Unable to list DomainAnnotation objects from workspace: \"+str(ws_id)+\" \"+str(e))\n\n for info in dom_annot_obj_info_list:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n \n dom_annot_ref = str(info[WSID_I])+'/'+str(info[OBJID_I])+'/'+str(info[VERSION_I])\n try:\n domain_data = wsClient.get_objects([{'ref':dom_annot_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch domain annotation: \"+dom_annot_ref)\n\n # read domain data object\n genome_ref = domain_data['genome_ref']\n if genome_ref not in genome_refs:\n continue\n dom_annot_found[genome_ref] = True\n\n if genome_ref not in dom_hits:\n dom_hits[genome_ref] = dict()\n\n if genome_ref not in genes_with_hits_cnt:\n genes_with_hits_cnt[genome_ref] = dict()\n\n for scaffold_id_iter in domain_data['data'].keys():\n for CDS_domain_list in domain_data['data'][scaffold_id_iter]:\n gene_ID = CDS_domain_list[KBASE_DOMAINHIT_GENE_ID_I]\n #gene_name = re.sub ('^'+genome_object_name+'.', '', gene_ID) \n gene_name = gene_ID\n #(contig_name, gene_name) = (gene_ID[0:gene_ID.index(\".\")], gene_ID[gene_ID.index(\".\")+1:])\n #print (\"DOMAIN_HIT: \"+contig_name+\" \"+gene_name) # DEBUG\n #print (\"DOMAIN_HIT for gene: \"+gene_name) # DEBUG\n #gene_beg = CDS_domain_list[KBASE_DOMAINHIT_GENE_BEG_I]\n #gene_end = CDS_domain_list[KBASE_DOMAINHIT_GENE_END_I]\n #gene_strand = CDS_domain_list[KBASE_DOMAINHIT_GENE_STRAND_I]\n gene_hits_dict = CDS_domain_list[KBASE_DOMAINHIT_GENE_HITS_DICT_I]\n\n dom_hits_by_namespace = dict()\n top_hit_evalue_by_namespace = dict()\n top_hit_dom_by_namespace = dict()\n\n for namespace in namespace_classes:\n dom_hits_by_namespace[namespace] = dict()\n top_hit_evalue_by_namespace[namespace] = 100\n top_hit_dom_by_namespace[namespace] = None\n\n for domfam in gene_hits_dict.keys():\n if domfam.startswith('PF'):\n domfam_clean = re.sub('\\.[^\\.]*$','',domfam)\n else:\n domfam_clean = domfam\n known_namespace = False\n for this_namespace in namespace_classes:\n if domfam.startswith(this_namespace):\n namespace = this_namespace\n known_namespace = True\n if not known_namespace:\n continue\n\n for hit in gene_hits_dict[domfam]:\n beg = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J])\n end = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_END_J])\n e_value = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J])\n bit_score = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J])\n aln_perc = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J])\n\n if e_value_thresh != None and e_value > e_value_thresh:\n continue\n if top_hit_flag:\n if top_hit_dom_by_namespace[namespace] == None \\\n or top_hit_evalue_by_namespace[namespace] > e_value:\n top_hit_dom_by_namespace[namespace] = domfam_clean\n top_hit_evalue_by_namespace[namespace] = e_value\n \n dom_hits_by_namespace[namespace][domfam_clean] = True\n\n # store assignments for gene\n for namespace in namespace_classes:\n if namespace == 'SEED':\n continue\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n if dom_hits_by_namespace[namespace]:\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n \n if top_hit_flag:\n dom_hits[genome_ref][gene_name][namespace] = { top_hit_dom_by_namespace[namespace]: True }\n else:\n dom_hits[genome_ref][gene_name][namespace] = dom_hits_by_namespace[namespace]\n\n # make sure we have domain annotations for all genomes\n missing_annot = []\n for genome_ref in genome_refs:\n if genome_ref not in dom_annot_found:\n missing_annot.append(\"\\t\"+'MISSING DOMAIN ANNOTATION FOR: '+genome_ref)\n if missing_annot:\n error_msg = \"ABORT: You must run the DomainAnnotation App first\\n\"\n error_msg += \"\\n\".join(missing_annot)\n raise ValueError (error_msg)\n \n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT B: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n \n # calculate table\n #\n table_data = dict()\n INSANE_VALUE = 10000000000000000\n overall_low_val = INSANE_VALUE\n overall_high_val = -INSANE_VALUE\n\n # count raw\n for genome_ref in genome_refs:\n if genome_ref not in table_data:\n table_data[genome_ref] = dict()\n for cat in cats:\n table_data[genome_ref][cat] = 0\n\n # custom\n if params['namespace'] == 'custom':\n for cat in cats:\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n if cat in dom_hits[genome_ref][gene_name][namespace]:\n table_data[genome_ref][cat] += 1\n\n # high level summation\n else:\n namespace = params['namespace']\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n for domfam in dom_hits[genome_ref][gene_name][namespace].keys():\n #self.log(console, \"DOMFAM: '\"+str(domfam)+\"'\") # DEBUG\n\n if domfam in domfam2cat[namespace]:\n cat = domfam2cat[namespace][domfam]\n #self.log(console, \"CAT: '\"+str(cat)+\"'\") # DEBUG\n if cat in cats:\n #self.log(console, \"CAT_FOUND: '\"+str(cat)+\"'\") # DEBUG\n table_data[genome_ref][cat] += 1\n \n # adjust to percs\n if params['count_category'].startswith('perc'):\n for genome_ref in genome_refs:\n for cat in cats:\n if params['count_category'] == 'perc_annot':\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n else:\n namespace = params['namespace']\n total_genes = genes_with_hits_cnt[genome_ref][namespace]\n else:\n total_genes = genome_CDS_count_by_ref[genome_ref]\n\n table_data[genome_ref][cat] /= float(total_genes)\n table_data[genome_ref][cat] *= 100.0\n\n # determine high and low val\n for genome_ref in genome_refs:\n for cat in cats:\n val = table_data[genome_ref][cat]\n if val == 0: continue\n #self.log (console, \"HIGH VAL SCAN CAT: '\"+cat+\"' VAL: '\"+str(val)+\"'\") # DEBUG\n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n if val > overall_high_val:\n overall_high_val = val\n if val < overall_low_val:\n overall_low_val = val\n if overall_high_val == -INSANE_VALUE:\n raise ValueError (\"unable to find any counts\")\n\n\n # determine cats with a value and build group\n #\n cat_seen = dict()\n group_size = dict()\n group_size_with_blanks = dict()\n group_order = []\n group_order_with_blanks = []\n for cat in cats:\n cat_seen[cat] = False\n if params['namespace'] == 'custom':\n # get cats seen and group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n else:\n namespace = params['namespace']\n # get group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != None and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n\n\n # build report\n #\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n\n\n # build html report\n sp = ' '\n text_color = \"#606060\"\n text_color_2 = \"#606060\"\n head_color_1 = \"#eeeeee\"\n head_color_2 = \"#eeeeee\"\n border_color = \"#cccccc\"\n border_cat_color = \"#ffccff\"\n #graph_color = \"lightblue\"\n #graph_width = 100\n #graph_char = \".\"\n graph_char = sp\n color_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e']\n max_color = len(color_list)-1\n cat_disp_trunc_len = 40\n cell_width = '10px'\n if len(genome_refs) > 20:\n graph_gen_fontsize = \"1\"\n elif len(genome_refs) > 10:\n graph_gen_fontsize = \"2\"\n else:\n graph_gen_fontsize = \"3\"\n if len(cats) > 20:\n graph_cat_fontsize = \"1\"\n elif len(cats) > 5:\n graph_cat_fontsize = \"2\"\n else:\n graph_cat_fontsize = \"3\"\n if int(graph_cat_fontsize) < int(graph_gen_fontsize):\n cell_fontsize = graph_gen_fontsize = graph_cat_fontsize\n else:\n cell_fontsize = graph_cat_fontsize = graph_gen_fontsize\n graph_padding = \"5\"\n graph_spacing = \"3\"\n #border = \"1\"\n border = \"0\"\n #row_spacing = \"-2\"\n num_rows = len(genome_refs)\n show_groups = False\n if len(group_order) > 0: show_groups = True\n\n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Functional Domain Profile</title>']\n html_report_lines += ['<style>']\n html_report_lines += [\".vertical-text {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 0.65em;\\n}\\n.vertical-text__inner {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.1;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += [\".vertical-text_title {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 1.0em;\\n}\\n.vertical-text__inner_title {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.0;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner_title:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += ['</style>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n\n # genomes as rows\n if 'vertical' in params and params['vertical'] == \"1\":\n # table header\n html_report_lines += ['<table cellpadding='+graph_padding+' cellspacing='+graph_spacing+' border='+border+'>']\n corner_rowspan = \"1\"\n if show_groups: corner_rowspan = \"2\"\n label = ''\n if params['namespace'] != 'custom':\n label = params['namespace']\n if label == 'PF':\n label = 'PFAM'\n elif label == 'TIGR':\n label = 'TIGRFAM'\n html_report_lines += ['<tr><td valign=bottom align=right rowspan='+corner_rowspan+'><div class=\"vertical-text_title\"><div class=\"vertical-text__inner_title\"><font color=\"'+text_color+'\">'+label+'</font></div></div></td>']\n \n # group headers\n if show_groups:\n for cat_group in group_order:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n max_group_width = 3*group_size[cat_group]\n if len(cat_group) > max_group_width:\n new_cat_group_words = []\n sentence_len = 0\n for w_i,word in enumerate(cat_group_words):\n new_cat_group_words.append(word)\n sentence_len += len(word)\n if w_i < len(cat_group_words)-1:\n if sentence_len + 1 + len(cat_group_words[w_i+1]) > max_group_width:\n new_cat_group_words[w_i] += '<br>'\n sentence_len = 0\n cat_group_words = new_cat_group_words\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \" \".join(cat_group_words)\n\n # DEBUG\n #if cat_group not in group_size:\n # self.log(console, \"CAT_GROUP: '\"+str(cat_group)+\"'\") # DEBUG\n # self.log(console, \"CAT_GROUP_DISP: '\"+str(cat_group_disp)+\"'\") # DEBUG\n # for cg in group_size:\n # self.log(console, \"CG: '\"+str(cg)+\"'\") # DEBUG\n\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white colspan='+str(group_size[cat_group])+'></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_1+'\"valign=middle align=center colspan='+str(group_size[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n html_report_lines += ['</tr><tr>']\n\n # column headers\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub (\"\\d*$\", \"\", cat)\n cell_title = domfam2name[namespace][cat].strip()\n cat_disp = cat\n cat_disp = re.sub ('^SEED', 'SEED:', cat_disp)\n else:\n cell_title = cat2name[params['namespace']][cat].strip()\n cat_disp = cat\n cat_disp = re.sub (\"TIGR_\", \"\", cat_disp)\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_2+'\"title=\"'+cell_title+'\" valign=bottom align=center>']\n if params['namespace'] != 'COG':\n html_report_lines += ['<div class=\"vertical-text\"><div class=\"vertical-text__inner\">']\n html_report_lines += ['<font color=\"'+text_color_2+'\" size='+graph_cat_fontsize+'><b>']\n #for c_i,c in enumerate(cat_disp):\n # if c_i < len(cat_disp)-1:\n # html_report_lines += [c+'<br>']\n # else:\n # html_report_lines += [c]\n html_report_lines += [cat_disp]\n html_report_lines += ['</b></font>']\n if params['namespace'] != 'COG':\n html_report_lines += ['</div></div>']\n html_report_lines += ['</td>']\n html_report_lines += ['</tr>']\n \n # rest of rows\n for genome_ref in genome_refs:\n genome_sci_name = genome_sci_name_by_ref[genome_ref]\n html_report_lines += ['<tr>']\n html_report_lines += ['<td align=right><font color=\"'+text_color+'\" size='+graph_gen_fontsize+'><b><nobr>'+genome_sci_name+'</nobr></b></font></td>']\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n val = table_data[genome_ref][cat]\n if val == 0:\n cell_color = 'white'\n else: \n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n cell_color_i = max_color - int(round(max_color * (val-overall_low_val) / float(overall_high_val-overall_low_val)))\n c = color_list[cell_color_i]\n cell_color = '#'+c+c+c+c+'FF'\n\n if params['count_category'].startswith('perc'):\n cell_val = str(\"%.3f\"%table_data[genome_ref][cat])\n cell_val += '%'\n else:\n cell_val = str(table_data[genome_ref][cat])\n\n if 'heatmap' in params and params['heatmap'] == '1':\n if table_data[genome_ref][cat] == 0:\n this_text_color = text_color\n #this_graph_char = \"0\"\n this_graph_char = sp\n else:\n this_text_color = cell_color\n this_graph_char = graph_char\n html_report_lines += ['<td align=center valign=middle title=\"'+cell_val+'\" style=\"width:'+cell_width+'\" bgcolor=\"'+cell_color+'\"><font color=\"'+this_text_color+'\" size='+cell_fontsize+'>'+this_graph_char+'</font></td>']\n else:\n html_report_lines += ['<td align=center valign=middle style=\"'+cell_width+'; border-right:solid 2px '+border_color+'; border-bottom:solid 2px '+border_color+'\"><font color=\"'+text_color+'\" size='+cell_fontsize+'>'+cell_val+'</font></td>']\n\n html_report_lines += ['</tr>']\n html_report_lines += ['</table>']\n\n # genomes as columns\n else:\n raise ValueError (\"Do not yet support Genomes as columns\")\n\n\n # key table\n html_report_lines += ['<p>']\n html_report_lines += ['<table cellpadding=3 cellspacing=2 border='+border+'>']\n html_report_lines += ['<tr><td valign=middle align=left colspan=3 style=\"border-bottom:solid 4px '+border_color+'\"><font color=\"'+text_color+'\"><b>KEY</b></font></td></tr>']\n\n if show_groups:\n group_cat_i = 0\n for cat_group in group_order_with_blanks:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \" <br>\".join(cat_group_words)\n cat_group_disp += sp\n\n html_report_lines += ['<tr>']\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white rowspan='+str(group_size_with_blanks[cat_group])+' style=\"border-right:solid 4px '+border_color+'\"></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 4px '+border_color+'\" valign=top align=right rowspan='+str(group_size_with_blanks[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n # add first cat for group\n first_cat = cats[group_cat_i]\n cell_color = 'white'\n #if not cat_seen[first_cat] and not show_blanks:\n if not cat_seen[first_cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = first_cat\n if first_cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', first_cat)\n cat_disp = re.sub ('^SEED', 'SEED:', first_cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = first_cat\n desc = cat2name[namespace][first_cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n # add rest of cats in group\n for c_i in range(group_cat_i, group_cat_i+group_size_with_blanks[cat_group]-1):\n cat = cats[c_i]\n cell_color = 'white'\n #if not cat_seen[cat] and not show_blanks:\n if not cat_seen[cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n \n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n else:\n for cat in cats:\n cell_color = 'white'\n if not cat_seen[cat] and not show_blanks:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', domfam)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n\n html_report_lines += ['</table>']\n\n # close\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n html_file = os.path.join (output_dir, 'domain_profile_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n upload_ret = dfu.file_to_shock({'file_path': html_file,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'domain_profile_report.html',\n 'label': 'Functional Domain Profile report'}\n ]\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_fxn_profile\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_fxn_profile return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_fxn_profile_featureSet(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_fxn_profile_featureSet_Input\"\n (view_fxn_profile_featureSet() ** ** show a table/heatmap of\n general categories or custom gene families for a set of Genomes)\n -> structure: parameter \"workspace_name\" of type \"workspace_name\"\n (** Common types), parameter \"input_featureSet_ref\" of type\n \"data_obj_ref\", parameter \"namespace\" of String, parameter\n \"custom_target_fams\" of type \"CustomTargetFams\" (parameter groups)\n -> structure: parameter \"target_fams\" of list of String, parameter\n \"extra_target_fam_groups_COG\" of list of String, parameter\n \"extra_target_fam_groups_PFAM\" of list of String, parameter\n \"extra_target_fam_groups_TIGR\" of list of String, parameter\n \"extra_target_fam_groups_SEED\" of list of String, parameter\n \"count_category\" of String, parameter \"heatmap\" of type \"bool\",\n parameter \"vertical\" of type \"bool\", parameter \"top_hit\" of type\n \"bool\", parameter \"e_value\" of Double, parameter \"log_base\" of\n Double, parameter \"show_blanks\" of type \"bool\"\n :returns: instance of type \"view_fxn_profile_featureSet_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_fxn_profile_featureSet\n\n ### STEP 0: basic init\n console = []\n self.log(console, 'Running view_fxn_profile_featureSet(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_featureSet_ref',\n 'namespace'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n if params['namespace'] == 'custom':\n if ('custom_target_fams' not in params or not params['custom_target_fams']) \\\n or ( \\\n ('target_fams' not in params['custom_target_fams'] or not params['custom_target_fams']['target_fams']) \\\n and ('extra_target_fam_groups_COG' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_COG']) \\\n and ('extra_target_fam_groups_PFAM' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_PFAM']) \\\n and ('extra_target_fam_groups_TIGR' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_TIGR']) \\\n and ('extra_target_fam_groups_SEED' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_SEED'])\n ):\n \n raise ValueError (\"Must define either param: 'target_fams' or 'extra_target_fam_groups' if using CUSTOM targets\")\n\n # base config\n namespace_classes = ['COG', 'PF', 'TIGR', 'SEED']\n show_blanks = False\n if 'show_blanks' in params and params['show_blanks'] == '1':\n show_blanks = True\n e_value_thresh = None\n if 'e_value' in params and params['e_value'] != None and params['e_value'] != '':\n e_value_thresh = float (params['e_value'])\n top_hit_flag = False\n if 'top_hit' in params and params['top_hit'] != None and params['top_hit'] != '' and params['top_hit'] != 0:\n top_hit_flag = True\n\n domain_desc_basepath = os.path.abspath('/kb/module/data/domain_desc')\n domain_to_cat_map_path = dict()\n domain_cat_names_path = dict()\n domain_fam_names_path = dict()\n domain_to_cat_map_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_cat_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014_funcat.tsv')\n domain_fam_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_to_cat_map_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_cat_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans_names.tsv')\n domain_fam_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_to_cat_map_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_cat_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrrole2go.txt')\n #domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrfams2go.txt')\n domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_to_cat_map_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_funcat.txt')\n #domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_fam_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_featureSet_ref'])]\n\n\n # set the output path\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\n # configure categories\n #\n cats = []\n cat2name = dict()\n cat2group = dict()\n domfam2cat = dict()\n cat2domfams = dict()\n namespaces_reading = dict()\n\n # categories are high-level summations\n if params['namespace'] != 'custom':\n for namespace in ['COG','PF','TIGR','SEED']:\n if params['namespace'] == namespace:\n namespaces_reading[namespace] = True\n\n # read all mappings between groups and domfams\n for namespace in ['COG','PF','TIGR','SEED']:\n\n cat2name[namespace] = dict()\n cat2group[namespace] = dict()\n domfam2cat[namespace] = dict()\n cat2domfams[namespace] = dict()\n\n # get high-level cats\n tigrrole_id2cat = dict()\n with open (domain_cat_names_path[namespace], 'r', 0) as dom_cat_handle:\n for line in dom_cat_handle.readlines():\n line = line.strip()\n \n if namespace == 'COG':\n [cat, cat_group, cat_name] = line.split(\"\\t\")[0:3]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'PF':\n [cat, cat_name] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = None\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [cat, cat_id, cat_group, cat_name_plus_go_terms] = line.split(\"\\t\")[0:4]\n tigrrole_id2cat[cat_id] = cat\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_name = re.sub (' *\\> GO:.*$', '', cat_name_plus_go_terms)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'SEED':\n #[cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n [cat_group, cat] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_disp = re.sub ('_', ' ', cat)\n cat2name[namespace][cat] = cat_disp\n cat2group[namespace][cat] = cat_group\n\n # get domfam to cat map, and vice versa\n with open (domain_to_cat_map_path[namespace], 'r', 0) as dom2cat_map_handle:\n for line in dom2cat_map_handle.readlines():\n line = line.strip()\n\n if namespace == 'COG':\n [domfam, cat_str, cat_name] = line.split(\"\\t\")[0:3]\n cat = cat_str[0] # only use first cat\n\n elif namespace == 'PF':\n [domfam, cat, cat_name, dom_id, dom_name] = line.split(\"\\t\")[0:5]\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if cat_id != '' and int(cat_id) != 0 and cat_id in tigrrole_id2cat:\n cat = tigrrole_id2cat[cat_id]\n else:\n continue\n\n elif namespace == 'SEED':\n [cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n\n domfam2cat[namespace][domfam] = cat\n if cat not in cat2domfams[namespace]:\n cat2domfams[namespace][cat] = []\n cat2domfams[namespace][cat].append(domfam)\n\n # custom domains\n if params['namespace'] == 'custom':\n\n # add target fams\n target_fams = []\n if 'target_fams' in params['custom_target_fams'] and params['custom_target_fams']['target_fams']:\n for target_fam in params['custom_target_fams']['target_fams']:\n target_fam = target_fam.strip()\n if target_fam == '':\n continue\n\n target_fam = re.sub (\"^cog\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^pf\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^tigr\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^seed\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PFAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P-FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P_FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGRFAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n\n target_fam = re.sub (\"^COG:\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG-\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG_\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG *\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF:\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF-\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF_\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF *\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR:\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR *\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED:\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED-\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED_\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED *\", \"SEED\", target_fam, flags=re.IGNORECASE)\n\n num_id_len = dict()\n num_id_len['COG'] = 4\n num_id_len['PF'] = 5\n num_id_len['TIGR'] = 5\n\n #self.log (console, \"TARGET_FAM A: '\"+target_fam+\"'\") # DEBUG\n \n if target_fam.startswith('SEED'):\n namespaces_reading['SEED'] = True\n target_fam = target_fam.strip()\n target_fam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' ', '_', target_fam)\n else:\n namespace_found = False\n for namespace_iter in ['COG','PF','TIGR']:\n if target_fam.startswith(namespace_iter):\n this_namespace = namespace_iter\n namespaces_reading[this_namespace] = True\n target_fam = re.sub(this_namespace, \"\", target_fam)\n namespace_found = True\n break\n if not namespace_found:\n raise ValueError (\"unrecognized custom domain family: '\"+str(target_fam)+\"'\")\n leading_zeros = ''\n for c_i in range(num_id_len[this_namespace] - len(str(target_fam))):\n leading_zeros += '0'\n target_fam = this_namespace + leading_zeros + target_fam\n\n #self.log (console, \"TARGET_FAM B: '\"+target_fam+\"'\") # DEBUG\n\n target_fams.append(target_fam)\n\n # add extra target fams\n extra_target_fams = []\n extra_target_fam_groups = []\n domfam2group = dict()\n for target_set in ['extra_target_fam_groups_COG', 'extra_target_fam_groups_PFAM', 'extra_target_fam_groups_TIGR', 'extra_target_fam_groups_SEED']:\n if target_set in params['custom_target_fams'] and params['custom_target_fams'][target_set]:\n extra_target_fam_groups.extend (params['custom_target_fams'][target_set])\n\n if extra_target_fam_groups:\n for target_group in extra_target_fam_groups:\n target_group = target_group.strip()\n if target_group == '':\n continue\n\n namespace = re.sub (\":.*$\", \"\", target_group)\n namespaces_reading[namespace] = True\n\n if namespace == 'COG':\n this_group = re.sub (\"COG: \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'PF':\n this_group = re.sub (\"PF: Clan \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'TIGR':\n this_group = re.sub (\"TIGR: role:\", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n this_group = 'TIGR_role:'+this_group\n elif namespace == 'SEED':\n this_group = re.sub (\"SEED: \", \"\", target_group)\n\n for domfam in cat2domfams[namespace][this_group]:\n extra_target_fams.append(domfam)\n domfam2group[domfam] = target_group\n\n # we have our targets\n cats = target_fams + extra_target_fams\n\n # store names of targets\n domfam2name = dict()\n for namespace in namespaces_reading.keys():\n domfam2name[namespace] = dict()\n\n if namespace == 'COG':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, cat_class, domfam_name] = line.split(\"\\t\")[0:3]\n domfam2name[namespace][domfam] = domfam_name\n\n elif namespace == 'PF':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, class_id, class_name, domfam_id, domfam_name] = line.split(\"\\t\")[0:5]\n if domfam_name.startswith(domfam_id):\n combo_name = domfam_name\n else:\n combo_name = domfam_id+': '+domfam_name\n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'TIGR':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if domfam_name != '':\n if domfam_desc.startswith(domfam_name):\n combo_name = domfam_desc\n else:\n combo_name = domfam_name+': '+domfam_desc\n else:\n if domfam_desc.startswith(domfam_id):\n combo_name = domfam_desc\n else:\n combo_name = domfam_id+': '+domfam_desc\n if ec_id != '':\n combo_name += ' (EC '+ec_id+')'\n \n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'SEED':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [level1, level2, level3, domfam] = line.split(\"\\t\")[0:4]\n\n domfam_desc = domfam\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n if domfam in domfam2name[namespace]:\n if len(domfam_desc) > len(domfam2name[namespace][domfam]):\n domfam2name[namespace][domfam] = domfam_desc\n else:\n domfam2name[namespace][domfam] = domfam_desc\n\n # just in case\n elif params['namespace'] != 'COG' \\\n and params['namespace'] != 'PF' \\\n and params['namespace'] != 'TIGR' \\\n and params['namespace'] != 'SEED':\n raise ValueError (\"Unknown namespace: '\"+str(params['namespace'])+\"'\")\n\n\n # get genome set from featureSet\n #\n input_ref = params['input_featureSet_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseCollections.FeatureSet\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n featureSet_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch featureSet: \"+input_ref)\n\n\n # get genome refs, object names, sci names, protein-coding gene counts, and SEED annot\n #\n #genome_ids = genomeSet_obj['elements'].keys() # note: genome_id may be meaningless\n genome_refs = []\n genome_ref_seen = dict()\n #for genome_id in genome_ids:\n # genome_refs.append (genomeSet_obj['elements'][genome_id]['ref'])\n for element_id in featureSet_obj['elements'].keys():\n genome_ref = featureSet_obj['elements'][element_id][0]\n if genome_ref not in genome_ref_seen:\n genome_ref_seen[genome_ref] = True\n genome_refs.append(genome_ref)\n\n genome_obj_name_by_ref = dict()\n genome_sci_name_by_ref = dict()\n genome_CDS_count_by_ref = dict()\n uniq_genome_ws_ids = dict()\n\n dom_hits = dict() # initialize dom_hits here because reading SEED within genome\n genes_with_hits_cnt = dict()\n\n for genome_ref in genome_refs:\n\n dom_hits[genome_ref] = dict()\n genes_with_hits_cnt[genome_ref] = dict()\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[genome_ref] = input_name\n\n try:\n genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n # sci name\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n \n # CDS cnt\n cds_cnt = 0\n for feature in genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n cds_cnt += 1\n genome_CDS_count_by_ref[genome_ref] = cds_cnt\n\n # SEED annotations\n #\n #f_cnt = 0 # DEBUG\n if 'SEED' in namespaces_reading:\n for feature in genome_obj['features']:\n\n # filter out genes that aren't in featureSet\n target_feature = False\n #featureSet_element_id = genome_ref+self.genome_feature_id_delim+feature['id']\n featureSet_element_id = feature['id']\n if featureSet_element_id in featureSet_obj['elements']:\n target_feature = True\n\n #if f_cnt % 100 == 0:\n # self.log (console, \"iterating features: \"+str(f_cnt)) # DEBUG\n\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n #if f_cnt % 100 == 0:\n # self.log (console, \"prot: \"+str(feature['protein_translation'])) # DEBUG\n\n if 'function' in feature and feature['function'] != None and feature['function'] != '':\n gene_name = feature['id']\n \n #if f_cnt % 100 == 0:\n # self.log (console, \"fxn: '\"+str(feature['function'])+\"'\") # DEBUG\n\n # store assignments for gene\n for namespace in ['SEED']:\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if not target_feature:\n continue\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n dom_hits[genome_ref][gene_name][namespace] = dict()\n\n domfam_list = []\n annot_set = feature['function'].strip().split(';')\n for annot in annot_set:\n annot_set_2 = annot.strip().split('@')\n for annot2 in annot_set_2:\n domfam = annot2.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n domfam_list.append(domfam)\n #if f_cnt % 100 == 0:\n # self.log (console, \"domfam: '\"+str(domfam)+\"'\") # DEBUG\n\n if top_hit_flag: # does SEED give more than one function?\n dom_hits[genome_ref][gene_name][namespace][domfam_list[0]] = True\n else:\n for domfam in domfam_list:\n dom_hits[genome_ref][gene_name][namespace][domfam] = True\n\n #f_cnt += 1 # DEBUG\n\n\n # capture domain hits to genes within each namespace\n #\n if params['namespace'] != 'SEED':\n dom_annot_found = dict()\n\n KBASE_DOMAINHIT_GENE_ID_I = 0\n KBASE_DOMAINHIT_GENE_BEG_I = 1 # not used\n KBASE_DOMAINHIT_GENE_END_I = 2 # not used\n KBASE_DOMAINHIT_GENE_STRAND_I = 3 # not used\n KBASE_DOMAINHIT_GENE_HITS_DICT_I = 4\n KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J = 0\n KBASE_DOMAINHIT_GENE_HITS_DICT_END_J = 1\n KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J = 2\n KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J = 3\n KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J = 4\n\n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT A: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n for ws_id in uniq_genome_ws_ids.keys():\n try:\n dom_annot_obj_info_list = wsClient.list_objects({'ids':[ws_id],'type':\"KBaseGeneFamilies.DomainAnnotation\"})\n except Exception as e:\n raise ValueError (\"Unable to list DomainAnnotation objects from workspace: \"+str(ws_id)+\" \"+str(e))\n\n for info in dom_annot_obj_info_list:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n \n dom_annot_ref = str(info[WSID_I])+'/'+str(info[OBJID_I])+'/'+str(info[VERSION_I])\n try:\n domain_data = wsClient.get_objects([{'ref':dom_annot_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch domain annotation: \"+dom_annot_ref)\n\n # read domain data object\n genome_ref = domain_data['genome_ref']\n if genome_ref not in genome_refs:\n continue\n dom_annot_found[genome_ref] = True\n\n if genome_ref not in dom_hits:\n dom_hits[genome_ref] = dict()\n\n if genome_ref not in genes_with_hits_cnt:\n genes_with_hits_cnt[genome_ref] = dict()\n\n for scaffold_id_iter in domain_data['data'].keys():\n for CDS_domain_list in domain_data['data'][scaffold_id_iter]:\n gene_ID = CDS_domain_list[KBASE_DOMAINHIT_GENE_ID_I]\n #gene_name = re.sub ('^'+genome_object_name+'.', '', gene_ID) \n gene_name = gene_ID\n #(contig_name, gene_name) = (gene_ID[0:gene_ID.index(\".\")], gene_ID[gene_ID.index(\".\")+1:])\n #print (\"DOMAIN_HIT: \"+contig_name+\" \"+gene_name) # DEBUG\n #print (\"DOMAIN_HIT for gene: \"+gene_name) # DEBUG\n #gene_beg = CDS_domain_list[KBASE_DOMAINHIT_GENE_BEG_I]\n #gene_end = CDS_domain_list[KBASE_DOMAINHIT_GENE_END_I]\n #gene_strand = CDS_domain_list[KBASE_DOMAINHIT_GENE_STRAND_I]\n gene_hits_dict = CDS_domain_list[KBASE_DOMAINHIT_GENE_HITS_DICT_I]\n\n dom_hits_by_namespace = dict()\n top_hit_evalue_by_namespace = dict()\n top_hit_dom_by_namespace = dict()\n\n for namespace in namespace_classes:\n dom_hits_by_namespace[namespace] = dict()\n top_hit_evalue_by_namespace[namespace] = 100\n top_hit_dom_by_namespace[namespace] = None\n\n for domfam in gene_hits_dict.keys():\n if domfam.startswith('PF'):\n domfam_clean = re.sub('\\.[^\\.]*$','',domfam)\n else:\n domfam_clean = domfam\n known_namespace = False\n for this_namespace in namespace_classes:\n if domfam.startswith(this_namespace):\n namespace = this_namespace\n known_namespace = True\n if not known_namespace:\n continue\n\n for hit in gene_hits_dict[domfam]:\n beg = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J])\n end = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_END_J])\n e_value = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J])\n bit_score = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J])\n aln_perc = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J])\n\n if e_value_thresh != None and e_value > e_value_thresh:\n continue\n if top_hit_flag:\n if top_hit_dom_by_namespace[namespace] == None \\\n or top_hit_evalue_by_namespace[namespace] > e_value:\n top_hit_dom_by_namespace[namespace] = domfam_clean\n top_hit_evalue_by_namespace[namespace] = e_value\n \n dom_hits_by_namespace[namespace][domfam_clean] = True\n\n # store assignments for gene\n for namespace in namespace_classes:\n if namespace == 'SEED':\n continue\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n if dom_hits_by_namespace[namespace]:\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n # filter out genes that aren't in featureSet\n #featureSet_element_id = genome_ref+self.genome_feature_id_delim+gene_ID\n featureSet_element_id = gene_ID\n if featureSet_element_id not in featureSet_obj['elements']:\n continue\n \n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n \n if top_hit_flag:\n dom_hits[genome_ref][gene_name][namespace] = { top_hit_dom_by_namespace[namespace]: True }\n else:\n dom_hits[genome_ref][gene_name][namespace] = dom_hits_by_namespace[namespace]\n\n # make sure we have domain annotations for all genomes\n missing_annot = []\n for genome_ref in genome_refs:\n if genome_ref not in dom_annot_found:\n missing_annot.append(\"\\t\"+'MISSING DOMAIN ANNOTATION FOR: '+genome_ref)\n if missing_annot:\n error_msg = \"ABORT: You must run the DomainAnnotation App first\\n\"\n error_msg += \"\\n\".join(missing_annot)\n raise ValueError (error_msg)\n \n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT B: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n \n # calculate table\n #\n table_data = dict()\n INSANE_VALUE = 10000000000000000\n overall_low_val = INSANE_VALUE\n overall_high_val = -INSANE_VALUE\n\n # count raw\n for genome_ref in genome_refs:\n if genome_ref not in table_data:\n table_data[genome_ref] = dict()\n for cat in cats:\n table_data[genome_ref][cat] = 0\n\n # custom\n if params['namespace'] == 'custom':\n for cat in cats:\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n if cat in dom_hits[genome_ref][gene_name][namespace]:\n table_data[genome_ref][cat] += 1\n\n # high level summation\n else:\n namespace = params['namespace']\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n for domfam in dom_hits[genome_ref][gene_name][namespace].keys():\n #self.log(console, \"DOMFAM: '\"+str(domfam)+\"'\") # DEBUG\n\n if domfam in domfam2cat[namespace]:\n cat = domfam2cat[namespace][domfam]\n #self.log(console, \"CAT: '\"+str(cat)+\"'\") # DEBUG\n if cat in cats:\n #self.log(console, \"CAT_FOUND: '\"+str(cat)+\"'\") # DEBUG\n table_data[genome_ref][cat] += 1\n \n # adjust to percs\n if params['count_category'].startswith('perc'):\n for genome_ref in genome_refs:\n for cat in cats:\n if params['count_category'] == 'perc_annot':\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n else:\n namespace = params['namespace']\n total_genes = genes_with_hits_cnt[genome_ref][namespace]\n else:\n total_genes = genome_CDS_count_by_ref[genome_ref]\n\n table_data[genome_ref][cat] /= float(total_genes)\n table_data[genome_ref][cat] *= 100.0\n\n # determine high and low val\n for genome_ref in genome_refs:\n for cat in cats:\n val = table_data[genome_ref][cat]\n if val == 0: continue\n #self.log (console, \"HIGH VAL SCAN CAT: '\"+cat+\"' VAL: '\"+str(val)+\"'\") # DEBUG\n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n if val > overall_high_val:\n overall_high_val = val\n if val < overall_low_val:\n overall_low_val = val\n if overall_high_val == -INSANE_VALUE:\n raise ValueError (\"unable to find any counts\")\n\n\n # determine cats with a value and build group\n #\n cat_seen = dict()\n group_size = dict()\n group_size_with_blanks = dict()\n group_order = []\n group_order_with_blanks = []\n for cat in cats:\n cat_seen[cat] = False\n if params['namespace'] == 'custom':\n # get cats seen and group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n else:\n namespace = params['namespace']\n # get group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != None and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n\n\n # build report\n #\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n\n\n # build html report\n sp = ' '\n text_color = \"#606060\"\n text_color_2 = \"#606060\"\n head_color_1 = \"#eeeeee\"\n head_color_2 = \"#eeeeee\"\n border_color = \"#cccccc\"\n border_cat_color = \"#ffccff\"\n #graph_color = \"lightblue\"\n #graph_width = 100\n #graph_char = \".\"\n graph_char = sp\n color_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e']\n max_color = len(color_list)-1\n cat_disp_trunc_len = 40\n cell_width = '10px'\n if len(genome_refs) > 20:\n graph_gen_fontsize = \"1\"\n elif len(genome_refs) > 10:\n graph_gen_fontsize = \"2\"\n else:\n graph_gen_fontsize = \"3\"\n if len(cats) > 20:\n graph_cat_fontsize = \"1\"\n elif len(cats) > 5:\n graph_cat_fontsize = \"2\"\n else:\n graph_cat_fontsize = \"3\"\n if int(graph_cat_fontsize) < int(graph_gen_fontsize):\n cell_fontsize = graph_gen_fontsize = graph_cat_fontsize\n else:\n cell_fontsize = graph_cat_fontsize = graph_gen_fontsize\n graph_padding = \"5\"\n graph_spacing = \"3\"\n #border = \"1\"\n border = \"0\"\n #row_spacing = \"-2\"\n num_rows = len(genome_refs)\n show_groups = False\n if len(group_order) > 0: show_groups = True\n\n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Functional Domain Profile</title>']\n html_report_lines += ['<style>']\n html_report_lines += [\".vertical-text {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 0.65em;\\n}\\n.vertical-text__inner {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.1;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += [\".vertical-text_title {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 1.0em;\\n}\\n.vertical-text__inner_title {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.0;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner_title:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += ['</style>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n\n # genomes as rows\n if 'vertical' in params and params['vertical'] == \"1\":\n # table header\n html_report_lines += ['<table cellpadding='+graph_padding+' cellspacing='+graph_spacing+' border='+border+'>']\n corner_rowspan = \"1\"\n if show_groups: corner_rowspan = \"2\"\n label = ''\n if params['namespace'] != 'custom':\n label = params['namespace']\n if label == 'PF':\n label = 'PFAM'\n elif label == 'TIGR':\n label = 'TIGRFAM'\n html_report_lines += ['<tr><td valign=bottom align=right rowspan='+corner_rowspan+'><div class=\"vertical-text_title\"><div class=\"vertical-text__inner_title\"><font color=\"'+text_color+'\">'+label+'</font></div></div></td>']\n \n # group headers\n if show_groups:\n for cat_group in group_order:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n max_group_width = 3*group_size[cat_group]\n if len(cat_group) > max_group_width:\n new_cat_group_words = []\n sentence_len = 0\n for w_i,word in enumerate(cat_group_words):\n new_cat_group_words.append(word)\n sentence_len += len(word)\n if w_i < len(cat_group_words)-1:\n if sentence_len + 1 + len(cat_group_words[w_i+1]) > max_group_width:\n new_cat_group_words[w_i] += '<br>'\n sentence_len = 0\n cat_group_words = new_cat_group_words\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \" \".join(cat_group_words)\n\n # DEBUG\n #if cat_group not in group_size:\n # self.log(console, \"CAT_GROUP: '\"+str(cat_group)+\"'\") # DEBUG\n # self.log(console, \"CAT_GROUP_DISP: '\"+str(cat_group_disp)+\"'\") # DEBUG\n # for cg in group_size:\n # self.log(console, \"CG: '\"+str(cg)+\"'\") # DEBUG\n\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white colspan='+str(group_size[cat_group])+'></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_1+'\"valign=middle align=center colspan='+str(group_size[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n html_report_lines += ['</tr><tr>']\n\n # column headers\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub (\"\\d*$\", \"\", cat)\n cell_title = domfam2name[namespace][cat].strip()\n cat_disp = cat\n cat_disp = re.sub ('^SEED', 'SEED:', cat_disp)\n else:\n cell_title = cat2name[params['namespace']][cat].strip()\n cat_disp = cat\n cat_disp = re.sub (\"TIGR_\", \"\", cat_disp)\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_2+'\"title=\"'+cell_title+'\" valign=bottom align=center>']\n if params['namespace'] != 'COG':\n html_report_lines += ['<div class=\"vertical-text\"><div class=\"vertical-text__inner\">']\n html_report_lines += ['<font color=\"'+text_color_2+'\" size='+graph_cat_fontsize+'><b>']\n #for c_i,c in enumerate(cat_disp):\n # if c_i < len(cat_disp)-1:\n # html_report_lines += [c+'<br>']\n # else:\n # html_report_lines += [c]\n html_report_lines += [cat_disp]\n html_report_lines += ['</b></font>']\n if params['namespace'] != 'COG':\n html_report_lines += ['</div></div>']\n html_report_lines += ['</td>']\n html_report_lines += ['</tr>']\n \n # rest of rows\n for genome_ref in genome_refs:\n genome_sci_name = genome_sci_name_by_ref[genome_ref]\n html_report_lines += ['<tr>']\n html_report_lines += ['<td align=right><font color=\"'+text_color+'\" size='+graph_gen_fontsize+'><b><nobr>'+genome_sci_name+'</nobr></b></font></td>']\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n val = table_data[genome_ref][cat]\n if val == 0:\n cell_color = 'white'\n else: \n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n cell_color_i = max_color - int(round(max_color * (val-overall_low_val) / float(overall_high_val-overall_low_val)))\n c = color_list[cell_color_i]\n cell_color = '#'+c+c+c+c+'FF'\n\n if params['count_category'].startswith('perc'):\n cell_val = str(\"%.3f\"%table_data[genome_ref][cat])\n cell_val += '%'\n else:\n cell_val = str(table_data[genome_ref][cat])\n\n if 'heatmap' in params and params['heatmap'] == '1':\n if table_data[genome_ref][cat] == 0:\n this_text_color = text_color\n #this_graph_char = \"0\"\n this_graph_char = sp\n else:\n this_text_color = cell_color\n this_graph_char = graph_char\n html_report_lines += ['<td align=center valign=middle title=\"'+cell_val+'\" style=\"width:'+cell_width+'\" bgcolor=\"'+cell_color+'\"><font color=\"'+this_text_color+'\" size='+cell_fontsize+'>'+this_graph_char+'</font></td>']\n else:\n html_report_lines += ['<td align=center valign=middle style=\"'+cell_width+'; border-right:solid 2px '+border_color+'; border-bottom:solid 2px '+border_color+'\"><font color=\"'+text_color+'\" size='+cell_fontsize+'>'+cell_val+'</font></td>']\n\n html_report_lines += ['</tr>']\n html_report_lines += ['</table>']\n\n # genomes as columns\n else:\n raise ValueError (\"Do not yet support Genomes as columns\")\n\n\n # key table\n html_report_lines += ['<p>']\n html_report_lines += ['<table cellpadding=3 cellspacing=2 border='+border+'>']\n html_report_lines += ['<tr><td valign=middle align=left colspan=3 style=\"border-bottom:solid 4px '+border_color+'\"><font color=\"'+text_color+'\"><b>KEY</b></font></td></tr>']\n\n if show_groups:\n group_cat_i = 0\n for cat_group in group_order_with_blanks:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \" <br>\".join(cat_group_words)\n cat_group_disp += sp\n\n html_report_lines += ['<tr>']\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white rowspan='+str(group_size_with_blanks[cat_group])+' style=\"border-right:solid 4px '+border_color+'\"></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 4px '+border_color+'\" valign=top align=right rowspan='+str(group_size_with_blanks[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n # add first cat for group\n first_cat = cats[group_cat_i]\n cell_color = 'white'\n #if not cat_seen[first_cat] and not show_blanks:\n if not cat_seen[first_cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = first_cat\n if first_cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', first_cat)\n cat_disp = re.sub ('^SEED', 'SEED:', first_cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = first_cat\n desc = cat2name[namespace][first_cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n # add rest of cats in group\n for c_i in range(group_cat_i, group_cat_i+group_size_with_blanks[cat_group]-1):\n cat = cats[c_i]\n cell_color = 'white'\n #if not cat_seen[cat] and not show_blanks:\n if not cat_seen[cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n \n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n else:\n for cat in cats:\n cell_color = 'white'\n if not cat_seen[cat] and not show_blanks:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', domfam)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n\n html_report_lines += ['</table>']\n\n # close\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n html_file = os.path.join (output_dir, 'domain_profile_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n upload_ret = dfu.file_to_shock({'file_path': html_file,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'domain_profile_report.html',\n 'label': 'Functional Domain Profile report'}\n ]\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_fxn_profile_featureSet\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_fxn_profile_featureSet return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_fxn_profile_phylo(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_fxn_profile_phylo_Input\"\n (view_fxn_profile_phylo() ** ** show a table/heatmap of general\n categories or custom gene families for a set of Genomes using the\n species tree) -> structure: parameter \"workspace_name\" of type\n \"workspace_name\" (** Common types), parameter\n \"input_speciesTree_ref\" of type \"data_obj_ref\", parameter\n \"namespace\" of String, parameter \"custom_target_fams\" of type\n \"CustomTargetFams\" (parameter groups) -> structure: parameter\n \"target_fams\" of list of String, parameter\n \"extra_target_fam_groups_COG\" of list of String, parameter\n \"extra_target_fam_groups_PFAM\" of list of String, parameter\n \"extra_target_fam_groups_TIGR\" of list of String, parameter\n \"extra_target_fam_groups_SEED\" of list of String, parameter\n \"count_category\" of String, parameter \"heatmap\" of type \"bool\",\n parameter \"vertical\" of type \"bool\", parameter \"top_hit\" of type\n \"bool\", parameter \"e_value\" of Double, parameter \"log_base\" of\n Double, parameter \"show_blanks\" of type \"bool\"\n :returns: instance of type \"view_fxn_profile_phylo_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_fxn_profile_phylo\n\n ### STEP 0: basic init\n console = []\n self.log(console, 'Running view_fxn_profile_phylo(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_speciesTree_ref',\n 'namespace'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n if params['namespace'] == 'custom':\n if ('custom_target_fams' not in params or not params['custom_target_fams']) \\\n or ( \\\n ('target_fams' not in params['custom_target_fams'] or not params['custom_target_fams']['target_fams']) \\\n and ('extra_target_fam_groups_COG' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_COG']) \\\n and ('extra_target_fam_groups_PFAM' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_PFAM']) \\\n and ('extra_target_fam_groups_TIGR' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_TIGR']) \\\n and ('extra_target_fam_groups_SEED' not in params['custom_target_fams'] or not params['custom_target_fams']['extra_target_fam_groups_SEED'])\n ):\n \n raise ValueError (\"Must define either param: 'target_fams' or 'extra_target_fam_groups' if using CUSTOM targets\")\n\n # base config\n namespace_classes = ['COG', 'PF', 'TIGR', 'SEED']\n show_blanks = False\n if 'show_blanks' in params and params['show_blanks'] == '1':\n show_blanks = True\n e_value_thresh = None\n if 'e_value' in params and params['e_value'] != None and params['e_value'] != '':\n e_value_thresh = float (params['e_value'])\n top_hit_flag = False\n if 'top_hit' in params and params['top_hit'] != None and params['top_hit'] != '' and params['top_hit'] != 0:\n top_hit_flag = True\n\n domain_desc_basepath = os.path.abspath('/kb/module/data/domain_desc')\n domain_to_cat_map_path = dict()\n domain_cat_names_path = dict()\n domain_fam_names_path = dict()\n domain_to_cat_map_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_cat_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014_funcat.tsv')\n domain_fam_names_path['COG'] = os.path.join(domain_desc_basepath, 'COG_2014.tsv')\n domain_to_cat_map_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_cat_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans_names.tsv')\n domain_fam_names_path['PF'] = os.path.join(domain_desc_basepath, 'Pfam-A.clans.tsv')\n domain_to_cat_map_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_cat_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrrole2go.txt')\n #domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'tigrfams2go.txt')\n domain_fam_names_path['TIGR'] = os.path.join(domain_desc_basepath, 'TIGRInfo.tsv')\n domain_to_cat_map_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_funcat.txt')\n #domain_cat_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n domain_fam_names_path['SEED'] = os.path.join(domain_desc_basepath, 'SEED_subsys.txt')\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_speciesTree_ref'])]\n\n\n # set the output paths\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n html_output_dir = os.path.join(output_dir,'html_output')\n if not os.path.exists(html_output_dir):\n os.makedirs(html_output_dir)\n\n\n # configure categories\n #\n cats = []\n cat2name = dict()\n cat2group = dict()\n domfam2cat = dict()\n cat2domfams = dict()\n namespaces_reading = dict()\n\n # categories are high-level summations\n if params['namespace'] != 'custom':\n for namespace in ['COG','PF','TIGR','SEED']:\n if params['namespace'] == namespace:\n namespaces_reading[namespace] = True\n\n # read all mappings between groups and domfams\n for namespace in ['COG','PF','TIGR','SEED']:\n\n cat2name[namespace] = dict()\n cat2group[namespace] = dict()\n domfam2cat[namespace] = dict()\n cat2domfams[namespace] = dict()\n\n # get high-level cats\n tigrrole_id2cat = dict()\n with open (domain_cat_names_path[namespace], 'r', 0) as dom_cat_handle:\n for line in dom_cat_handle.readlines():\n line = line.strip()\n \n if namespace == 'COG':\n [cat, cat_group, cat_name] = line.split(\"\\t\")[0:3]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n elif namespace == 'PF':\n [cat, cat_name] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = None\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [cat, cat_id, cat_group, cat_name_plus_go_terms] = line.split(\"\\t\")[0:4]\n tigrrole_id2cat[cat_id] = cat\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_name = re.sub (' *\\> GO:.*$', '', cat_name_plus_go_terms)\n cat2name[namespace][cat] = cat_name\n cat2group[namespace][cat] = cat_group\n\n # DEBUG\n #self.log(console, \"CAT: '\"+str(cat)+\"' NAME: '\"+str(cat_name)+\"' GROUP: '\"+str(cat_group)+\"'\")\n\n elif namespace == 'SEED':\n #[cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n [cat_group, cat] = line.split(\"\\t\")[0:2]\n if namespace == params['namespace'] and cat not in cats:\n cats.append(cat)\n cat_disp = re.sub ('_', ' ', cat)\n cat2name[namespace][cat] = cat_disp\n cat2group[namespace][cat] = cat_group\n\n # get domfam to cat map, and vice versa\n with open (domain_to_cat_map_path[namespace], 'r', 0) as dom2cat_map_handle:\n for line in dom2cat_map_handle.readlines():\n line = line.strip()\n\n if namespace == 'COG':\n [domfam, cat_str, cat_name] = line.split(\"\\t\")[0:3]\n cat = cat_str[0] # only use first cat\n\n elif namespace == 'PF':\n [domfam, cat, cat_name, dom_id, dom_name] = line.split(\"\\t\")[0:5]\n\n elif namespace == 'TIGR':\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if cat_id != '' and int(cat_id) != 0 and cat_id in tigrrole_id2cat:\n cat = tigrrole_id2cat[cat_id]\n else:\n continue\n\n elif namespace == 'SEED':\n [cat_group, cat_subgroup, cat, domfam] = line.split(\"\\t\")[0:4]\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n\n domfam2cat[namespace][domfam] = cat\n if cat not in cat2domfams[namespace]:\n cat2domfams[namespace][cat] = []\n cat2domfams[namespace][cat].append(domfam)\n\n # custom domains\n if params['namespace'] == 'custom':\n\n # add target fams\n target_fams = []\n if 'target_fams' in params['custom_target_fams'] and params['custom_target_fams']['target_fams']:\n for target_fam in params['custom_target_fams']['target_fams']:\n target_fam = target_fam.strip()\n if target_fam == '':\n continue\n\n target_fam = re.sub (\"^cog\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^pf\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^tigr\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^seed\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PFAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P-FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^P_FAM\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGRFAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_FAM\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n\n target_fam = re.sub (\"^COG:\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG-\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG_\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^COG *\", \"COG\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF:\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF-\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF_\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^PF *\", \"PF\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR:\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR-\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR_\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^TIGR *\", \"TIGR\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED:\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED-\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED_\", \"SEED\", target_fam, flags=re.IGNORECASE)\n target_fam = re.sub (\"^SEED *\", \"SEED\", target_fam, flags=re.IGNORECASE)\n\n num_id_len = dict()\n num_id_len['COG'] = 4\n num_id_len['PF'] = 5\n num_id_len['TIGR'] = 5\n\n #self.log (console, \"TARGET_FAM A: '\"+target_fam+\"'\") # DEBUG\n \n if target_fam.startswith('SEED'):\n namespaces_reading['SEED'] = True\n target_fam = target_fam.strip()\n target_fam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', target_fam)\n target_fam = re.sub (' ', '_', target_fam)\n else:\n namespace_found = False\n for namespace_iter in ['COG','PF','TIGR']:\n if target_fam.startswith(namespace_iter):\n this_namespace = namespace_iter\n namespaces_reading[this_namespace] = True\n target_fam = re.sub(this_namespace, \"\", target_fam)\n namespace_found = True\n break\n if not namespace_found:\n raise ValueError (\"unrecognized custom domain family: '\"+str(target_fam)+\"'\")\n leading_zeros = ''\n for c_i in range(num_id_len[this_namespace] - len(str(target_fam))):\n leading_zeros += '0'\n target_fam = this_namespace + leading_zeros + target_fam\n\n #self.log (console, \"TARGET_FAM B: '\"+target_fam+\"'\") # DEBUG\n\n target_fams.append(target_fam)\n\n # add extra target fams\n extra_target_fams = []\n extra_target_fam_groups = []\n domfam2group = dict()\n for target_set in ['extra_target_fam_groups_COG', 'extra_target_fam_groups_PFAM', 'extra_target_fam_groups_TIGR', 'extra_target_fam_groups_SEED']:\n if target_set in params['custom_target_fams'] and params['custom_target_fams'][target_set]:\n extra_target_fam_groups.extend (params['custom_target_fams'][target_set])\n\n if extra_target_fam_groups:\n for target_group in extra_target_fam_groups:\n target_group = target_group.strip()\n if target_group == '':\n continue\n\n namespace = re.sub (\":.*$\", \"\", target_group)\n namespaces_reading[namespace] = True\n\n if namespace == 'COG':\n this_group = re.sub (\"COG: \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'PF':\n this_group = re.sub (\"PF: Clan \", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n elif namespace == 'TIGR':\n this_group = re.sub (\"TIGR: role:\", \"\", target_group)\n this_group = re.sub (\":.*$\", \"\", this_group)\n this_group = 'TIGR_role:'+this_group\n elif namespace == 'SEED':\n this_group = re.sub (\"SEED: \", \"\", target_group)\n\n for domfam in cat2domfams[namespace][this_group]:\n extra_target_fams.append(domfam)\n domfam2group[domfam] = target_group\n\n # we have our targets\n cats = target_fams + extra_target_fams\n\n # store names of targets\n domfam2name = dict()\n for namespace in namespaces_reading.keys():\n domfam2name[namespace] = dict()\n\n if namespace == 'COG':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, cat_class, domfam_name] = line.split(\"\\t\")[0:3]\n domfam2name[namespace][domfam] = domfam_name\n\n elif namespace == 'PF':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [domfam, class_id, class_name, domfam_id, domfam_name] = line.split(\"\\t\")[0:5]\n if domfam_name.startswith(domfam_id):\n combo_name = domfam_name\n else:\n combo_name = domfam_id+': '+domfam_name\n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'TIGR':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n if line.startswith('!'):\n continue\n [domfam_id, domfam, cat_group, cat_id, domfam_name, ec_id, domfam_desc] = line.split(\"\\t\")[0:7]\n if domfam_name != '':\n if domfam_desc.startswith(domfam_name):\n combo_name = domfam_desc\n else:\n combo_name = domfam_name+': '+domfam_desc\n else:\n if domfam_desc.startswith(domfam_id):\n combo_name = domfam_desc\n else:\n combo_name = domfam_id+': '+domfam_desc\n if ec_id != '':\n combo_name += ' (EC '+ec_id+')'\n \n domfam2name[namespace][domfam] = combo_name\n\n elif namespace == 'SEED':\n with open (domain_fam_names_path[namespace], 'r', 0) as dom_fam_handle:\n for line in dom_fam_handle.readlines():\n line = line.strip()\n [level1, level2, level3, domfam] = line.split(\"\\t\")[0:4]\n\n domfam_desc = domfam\n domfam = domfam.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n if domfam in domfam2name[namespace]:\n if len(domfam_desc) > len(domfam2name[namespace][domfam]):\n domfam2name[namespace][domfam] = domfam_desc\n else:\n domfam2name[namespace][domfam] = domfam_desc\n\n # just in case\n elif params['namespace'] != 'COG' \\\n and params['namespace'] != 'PF' \\\n and params['namespace'] != 'TIGR' \\\n and params['namespace'] != 'SEED':\n raise ValueError (\"Unknown namespace: '\"+str(params['namespace'])+\"'\")\n\n\n # get speciesTree\n #\n input_ref = params['input_speciesTree_ref']\n speciesTree_name = None\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n speciesTree_name = input_obj_info[NAME_I]\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseTrees.Tree\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n speciesTree_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch speciesTree: \"+input_ref)\n\n\n # get genome_refs from speciesTree and instantiate ETE3 tree and order\n #\n genome_refs = []\n genome_id_by_ref = dict()\n genome_ref_by_id = dict()\n for genome_id in speciesTree_obj['default_node_labels'].keys():\n genome_ref = speciesTree_obj['ws_refs'][genome_id]['g'][0]\n genome_id_by_ref[genome_ref] = genome_id\n genome_ref_by_id[genome_id] = genome_ref\n\n species_tree = ete3.Tree(speciesTree_obj['tree'])\n species_tree.ladderize()\n for genome_id in species_tree.get_leaf_names():\n genome_refs.append(genome_ref_by_id[genome_id])\n\n\n # get object names, sci names, protein-coding gene counts, and SEED annot\n #\n genome_obj_name_by_ref = dict()\n genome_sci_name_by_ref = dict()\n genome_sci_name_by_id = dict()\n genome_CDS_count_by_ref = dict()\n uniq_genome_ws_ids = dict()\n\n dom_hits = dict() # initialize dom_hits here because reading SEED within genome\n genes_with_hits_cnt = dict()\n\n for genome_ref in genome_refs:\n\n dom_hits[genome_ref] = dict()\n genes_with_hits_cnt[genome_ref] = dict()\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[genome_ref] = input_name\n\n try:\n genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n # sci name\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n genome_sci_name_by_id[genome_id_by_ref[genome_ref]] = genome_obj['scientific_name']\n \n # CDS cnt\n cds_cnt = 0\n for feature in genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n cds_cnt += 1\n genome_CDS_count_by_ref[genome_ref] = cds_cnt\n\n # SEED annotations\n #\n #f_cnt = 0 # DEBUG\n if 'SEED' in namespaces_reading:\n for feature in genome_obj['features']:\n #if f_cnt % 100 == 0:\n # self.log (console, \"iterating features: \"+str(f_cnt)) # DEBUG\n\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n #if f_cnt % 100 == 0:\n # self.log (console, \"prot: \"+str(feature['protein_translation'])) # DEBUG\n\n if 'function' in feature and feature['function'] != None and feature['function'] != '':\n gene_name = feature['id']\n \n #if f_cnt % 100 == 0:\n # self.log (console, \"fxn: '\"+str(feature['function'])+\"'\") # DEBUG\n\n # store assignments for gene\n for namespace in ['SEED']:\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n dom_hits[genome_ref][gene_name][namespace] = dict()\n\n domfam_list = []\n annot_set = feature['function'].strip().split(';')\n for annot in annot_set:\n annot_set_2 = annot.strip().split('@')\n for annot2 in annot_set_2:\n domfam = annot2.strip()\n domfam = re.sub (' *\\#.*$', '', domfam)\n domfam = re.sub (' *\\(EC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' *\\(TC [\\d\\.\\-\\w]*\\) *$', '', domfam)\n domfam = re.sub (' ', '_', domfam)\n domfam = 'SEED'+domfam\n domfam_list.append(domfam)\n #if f_cnt % 100 == 0:\n # self.log (console, \"domfam: '\"+str(domfam)+\"'\") # DEBUG\n\n if top_hit_flag: # does SEED give more than one function?\n dom_hits[genome_ref][gene_name][namespace][domfam_list[0]] = True\n else:\n for domfam in domfam_list:\n dom_hits[genome_ref][gene_name][namespace][domfam] = True\n\n #f_cnt += 1 # DEBUG\n\n\n # capture domain hits to genes within each namespace\n #\n if params['namespace'] != 'SEED':\n dom_annot_found = dict()\n\n KBASE_DOMAINHIT_GENE_ID_I = 0\n KBASE_DOMAINHIT_GENE_BEG_I = 1 # not used\n KBASE_DOMAINHIT_GENE_END_I = 2 # not used\n KBASE_DOMAINHIT_GENE_STRAND_I = 3 # not used\n KBASE_DOMAINHIT_GENE_HITS_DICT_I = 4\n KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J = 0\n KBASE_DOMAINHIT_GENE_HITS_DICT_END_J = 1\n KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J = 2\n KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J = 3\n KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J = 4\n\n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT A: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n for ws_id in uniq_genome_ws_ids.keys():\n try:\n dom_annot_obj_info_list = wsClient.list_objects({'ids':[ws_id],'type':\"KBaseGeneFamilies.DomainAnnotation\"})\n except Exception as e:\n raise ValueError (\"Unable to list DomainAnnotation objects from workspace: \"+str(ws_id)+\" \"+str(e))\n\n for info in dom_annot_obj_info_list:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n \n dom_annot_ref = str(info[WSID_I])+'/'+str(info[OBJID_I])+'/'+str(info[VERSION_I])\n try:\n domain_data = wsClient.get_objects([{'ref':dom_annot_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch domain annotation: \"+dom_annot_ref)\n\n # read domain data object\n genome_ref = domain_data['genome_ref']\n if genome_ref not in genome_refs:\n continue\n dom_annot_found[genome_ref] = True\n\n if genome_ref not in dom_hits:\n dom_hits[genome_ref] = dict()\n\n if genome_ref not in genes_with_hits_cnt:\n genes_with_hits_cnt[genome_ref] = dict()\n\n for scaffold_id_iter in domain_data['data'].keys():\n for CDS_domain_list in domain_data['data'][scaffold_id_iter]:\n gene_ID = CDS_domain_list[KBASE_DOMAINHIT_GENE_ID_I]\n #gene_name = re.sub ('^'+genome_object_name+'.', '', gene_ID) \n gene_name = gene_ID\n #(contig_name, gene_name) = (gene_ID[0:gene_ID.index(\".\")], gene_ID[gene_ID.index(\".\")+1:])\n #print (\"DOMAIN_HIT: \"+contig_name+\" \"+gene_name) # DEBUG\n #print (\"DOMAIN_HIT for gene: \"+gene_name) # DEBUG\n #gene_beg = CDS_domain_list[KBASE_DOMAINHIT_GENE_BEG_I]\n #gene_end = CDS_domain_list[KBASE_DOMAINHIT_GENE_END_I]\n #gene_strand = CDS_domain_list[KBASE_DOMAINHIT_GENE_STRAND_I]\n gene_hits_dict = CDS_domain_list[KBASE_DOMAINHIT_GENE_HITS_DICT_I]\n\n dom_hits_by_namespace = dict()\n top_hit_evalue_by_namespace = dict()\n top_hit_dom_by_namespace = dict()\n\n for namespace in namespace_classes:\n dom_hits_by_namespace[namespace] = dict()\n top_hit_evalue_by_namespace[namespace] = 100\n top_hit_dom_by_namespace[namespace] = None\n\n for domfam in gene_hits_dict.keys():\n if domfam.startswith('PF'):\n domfam_clean = re.sub('\\.[^\\.]*$','',domfam)\n else:\n domfam_clean = domfam\n known_namespace = False\n for this_namespace in namespace_classes:\n if domfam.startswith(this_namespace):\n namespace = this_namespace\n known_namespace = True\n if not known_namespace:\n continue\n\n for hit in gene_hits_dict[domfam]:\n beg = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BEG_J])\n end = int(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_END_J])\n e_value = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_EVALUE_J])\n bit_score = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_BITSCORE_J])\n aln_perc = float(hit[KBASE_DOMAINHIT_GENE_HITS_DICT_ALNPERC_J])\n\n if e_value_thresh != None and e_value > e_value_thresh:\n continue\n if top_hit_flag:\n if top_hit_dom_by_namespace[namespace] == None \\\n or top_hit_evalue_by_namespace[namespace] > e_value:\n top_hit_dom_by_namespace[namespace] = domfam_clean\n top_hit_evalue_by_namespace[namespace] = e_value\n \n dom_hits_by_namespace[namespace][domfam_clean] = True\n\n # store assignments for gene\n for namespace in namespace_classes:\n if namespace == 'SEED':\n continue\n if namespace not in genes_with_hits_cnt[genome_ref]:\n genes_with_hits_cnt[genome_ref][namespace] = 0\n if dom_hits_by_namespace[namespace]:\n genes_with_hits_cnt[genome_ref][namespace] += 1\n\n if gene_name not in dom_hits[genome_ref]:\n dom_hits[genome_ref][gene_name] = dict()\n \n if top_hit_flag:\n dom_hits[genome_ref][gene_name][namespace] = { top_hit_dom_by_namespace[namespace]: True }\n else:\n dom_hits[genome_ref][gene_name][namespace] = dom_hits_by_namespace[namespace]\n \n # make sure we have domain annotations for all genomes\n missing_annot = []\n for genome_ref in genome_refs:\n if genome_ref not in dom_annot_found:\n missing_annot.append(\"\\t\"+'MISSING DOMAIN ANNOTATION FOR: '+genome_ref)\n if missing_annot:\n error_msg = \"ABORT: You must run the DomainAnnotation App first\\n\"\n error_msg += \"\\n\".join(missing_annot)\n raise ValueError (error_msg)\n \n # DEBUG\n #for genome_ref in genome_refs:\n # self.log (console, \"SEED ANNOT CNT B: '\"+str(genes_with_hits_cnt[genome_ref]['SEED'])+\"'\")\n\n \n # calculate table\n #\n table_data = dict()\n INSANE_VALUE = 10000000000000000\n overall_low_val = INSANE_VALUE\n overall_high_val = -INSANE_VALUE\n\n # count raw\n for genome_ref in genome_refs:\n if genome_ref not in table_data:\n table_data[genome_ref] = dict()\n for cat in cats:\n table_data[genome_ref][cat] = 0\n\n # custom\n if params['namespace'] == 'custom':\n for cat in cats:\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n if cat in dom_hits[genome_ref][gene_name][namespace]:\n table_data[genome_ref][cat] += 1\n\n # high level summation\n else:\n namespace = params['namespace']\n for gene_name in dom_hits[genome_ref].keys():\n if namespace in dom_hits[genome_ref][gene_name]:\n for domfam in dom_hits[genome_ref][gene_name][namespace].keys():\n #self.log(console, \"DOMFAM: '\"+str(domfam)+\"'\") # DEBUG\n\n if domfam in domfam2cat[namespace]:\n cat = domfam2cat[namespace][domfam]\n #self.log(console, \"CAT: '\"+str(cat)+\"'\") # DEBUG\n if cat in cats:\n #self.log(console, \"CAT_FOUND: '\"+str(cat)+\"'\") # DEBUG\n table_data[genome_ref][cat] += 1\n \n # adjust to percs\n if params['count_category'].startswith('perc'):\n for genome_ref in genome_refs:\n\n # DEBUG\n #sci_name = genome_sci_name_by_ref[genome_ref]\n #try:\n # total_genes = genes_with_hits_cnt[genome_ref]['COG']\n # print (sci_name +\" (\"+genome_ref+\"): COG OK\")\n #except:\n # print (sci_name +\" (\"+genome_ref+\"): COG MISSING\")\n\n for cat in cats:\n if params['count_category'] == 'perc_annot':\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n else:\n namespace = params['namespace']\n total_genes = genes_with_hits_cnt[genome_ref][namespace]\n else:\n total_genes = genome_CDS_count_by_ref[genome_ref]\n\n table_data[genome_ref][cat] /= float(total_genes)\n table_data[genome_ref][cat] *= 100.0\n\n # determine high and low val\n for genome_ref in genome_refs:\n for cat in cats:\n val = table_data[genome_ref][cat]\n if val == 0: continue\n #self.log (console, \"HIGH VAL SCAN CAT: '\"+cat+\"' VAL: '\"+str(val)+\"'\") # DEBUG\n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n if val > overall_high_val:\n overall_high_val = val\n if val < overall_low_val:\n overall_low_val = val\n if overall_high_val == -INSANE_VALUE:\n raise ValueError (\"unable to find any counts\")\n\n\n # determine cats with a value and build group\n #\n cat_seen = dict()\n group_size = dict()\n group_size_with_blanks = dict()\n group_order = []\n group_order_with_blanks = []\n for cat in cats:\n cat_seen[cat] = False\n if params['namespace'] == 'custom':\n # get group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = None\n if extra_target_fam_groups:\n if cat in domfam2group:\n cat_group = domfam2group[cat]\n else:\n cat_group = 'N/A'\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n else:\n namespace = params['namespace']\n # get group size\n for cat in cats:\n for genome_ref in genome_refs:\n if cat in table_data[genome_ref] and table_data[genome_ref][cat] != None and table_data[genome_ref][cat] != 0:\n cat_seen[cat] = True\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size:\n group_order.append(cat_group)\n group_size[cat_group] = 0\n group_size[cat_group] += 1\n break\n # get group size including blanks\n for cat in cats:\n cat_group = cat2group[namespace][cat]\n if cat_group != None:\n if cat_group not in group_size_with_blanks:\n group_order_with_blanks.append(cat_group)\n group_size_with_blanks[cat_group] = 0\n group_size_with_blanks[cat_group] += 1\n\n\n # Draw tree (we already instantiated Tree above)\n #\n png_file = speciesTree_name+'.png'\n pdf_file = speciesTree_name+'.pdf'\n output_png_file_path = os.path.join(html_output_dir, png_file);\n output_pdf_file_path = os.path.join(html_output_dir, pdf_file);\n\n # init ETE3 accessory objects\n ts = ete3.TreeStyle()\n\n # customize\n ts.show_leaf_name = True\n ts.show_branch_length = False\n ts.show_branch_support = True\n #ts.scale = 50 # 50 pixels per branch length unit\n ts.branch_vertical_margin = 5 # pixels between adjacent branches\n #ts.title.add_face(ete3.TextFace(params['output_name']+\": \"+params['desc'], fsize=10), column=0)\n\n node_style = ete3.NodeStyle()\n node_style[\"fgcolor\"] = \"#606060\" # for node balls\n node_style[\"size\"] = 10 # for node balls (gets reset based on support)\n node_style[\"vt_line_color\"] = \"#606060\"\n node_style[\"hz_line_color\"] = \"#606060\"\n node_style[\"vt_line_width\"] = 2\n node_style[\"hz_line_width\"] = 2\n node_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n node_style[\"hz_line_type\"] = 0\n\n leaf_style = ete3.NodeStyle()\n leaf_style[\"fgcolor\"] = \"#ffffff\" # for node balls\n leaf_style[\"size\"] = 2 # for node balls (we're using it to add space)\n leaf_style[\"vt_line_color\"] = \"#606060\" # unecessary\n leaf_style[\"hz_line_color\"] = \"#606060\"\n leaf_style[\"vt_line_width\"] = 2\n leaf_style[\"hz_line_width\"] = 2\n leaf_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n leaf_style[\"hz_line_type\"] = 0\n\n for n in species_tree.traverse():\n if n.is_leaf():\n style = leaf_style\n genome_id = n.name\n #n.name = genome_sci_name_by_id[genome_id]\n n.name = None\n leaf_name_disp = genome_sci_name_by_id[genome_id]\n n.add_face(ete3.TextFace(leaf_name_disp, fsize=10), column=0, position=\"branch-right\")\n else:\n style = ete3.NodeStyle()\n for k in node_style.keys():\n style[k] = node_style[k]\n\n if n.support > 0.95:\n style[\"size\"] = 6\n elif n.support > 0.90:\n style[\"size\"] = 5\n elif n.support > 0.80:\n style[\"size\"] = 4\n else:\n style[\"size\"] = 2\n\n n.set_style(style)\n\n # save images\n dpi = 300\n img_units = \"in\"\n img_pix_width = 1200\n img_in_width = round(float(img_pix_width)/float(dpi), 1)\n img_html_width = img_pix_width // 2\n species_tree.render(output_png_file_path, w=img_in_width, units=img_units, dpi=dpi, tree_style=ts)\n species_tree.render(output_pdf_file_path, w=img_in_width, units=img_units, tree_style=ts) # dpi irrelevant\n\n\n # build report\n #\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n\n\n # build html report\n sp = ' '\n text_color = \"#606060\"\n text_color_2 = \"#606060\"\n head_color_1 = \"#eeeeee\"\n head_color_2 = \"#eeeeee\"\n border_color = \"#cccccc\"\n border_cat_color = \"#ffccff\"\n #graph_color = \"lightblue\"\n #graph_width = 100\n #graph_char = \".\"\n graph_char = sp\n color_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e']\n max_color = len(color_list)-1\n cat_disp_trunc_len = 40\n cell_width = '10px'\n tree_scale_factor = 22.625\n tree_img_height = int(tree_scale_factor*len(genome_refs))\n extra_tree_rows = 3\n if len(genome_refs) > 20:\n graph_gen_fontsize = \"1\"\n elif len(genome_refs) > 10:\n graph_gen_fontsize = \"2\"\n else:\n graph_gen_fontsize = \"3\"\n if len(cats) > 20:\n graph_cat_fontsize = \"1\"\n elif len(cats) > 5:\n graph_cat_fontsize = \"2\"\n else:\n graph_cat_fontsize = \"3\"\n if int(graph_cat_fontsize) < int(graph_gen_fontsize):\n cell_fontsize = graph_gen_fontsize = graph_cat_fontsize\n else:\n cell_fontsize = graph_cat_fontsize = graph_gen_fontsize\n #graph_padding = \"5\"\n graph_padding = \"2\"\n graph_spacing = \"3\"\n #border = \"1\"\n border = \"0\"\n #row_spacing = \"-2\"\n num_rows = len(genome_refs)\n show_groups = False\n if len(group_order) > 0: show_groups = True\n\n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Functional Domain Profile with Species Tree</title>']\n html_report_lines += ['<style>']\n html_report_lines += [\".vertical-text {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 0.65em;\\n}\\n.vertical-text__inner {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.1;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += [\".vertical-text_title {\\ndisplay: inline-block;\\noverflow: hidden;\\nwidth: 1.0em;\\n}\\n.vertical-text__inner_title {\\ndisplay: inline-block;\\nwhite-space: nowrap;\\nline-height: 1.0;\\ntransform: translate(0,100%) rotate(-90deg);\\ntransform-origin: 0 0;\\n}\\n.vertical-text__inner_title:after {\\ncontent: \\\"\\\";\\ndisplay: block;\\nmargin: 0.0em 0 100%;\\n}\"]\n html_report_lines += ['</style>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n\n # genomes as rows\n if 'vertical' in params and params['vertical'] == \"1\":\n # table header\n html_report_lines += ['<table cellpadding='+graph_padding+' cellspacing='+graph_spacing+' border='+border+'>']\n corner_rowspan = \"1\"\n if show_groups: corner_rowspan = \"2\"\n label = ''\n if params['namespace'] != 'custom':\n label = params['namespace']\n if label == 'PF':\n label = 'PFAM'\n elif label == 'TIGR':\n label = 'TIGRFAM'\n html_report_lines += ['<tr><td valign=bottom align=right rowspan='+corner_rowspan+'><div class=\"vertical-text_title\"><div class=\"vertical-text__inner_title\"><font color=\"'+text_color+'\">'+label+'</font></div></div></td>']\n \n # group headers\n if show_groups:\n for cat_group in group_order:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n max_group_width = 3*group_size[cat_group]\n if len(cat_group) > max_group_width:\n new_cat_group_words = []\n sentence_len = 0\n for w_i,word in enumerate(cat_group_words):\n new_cat_group_words.append(word)\n sentence_len += len(word)\n if w_i < len(cat_group_words)-1:\n if sentence_len + 1 + len(cat_group_words[w_i+1]) > max_group_width:\n new_cat_group_words[w_i] += '<br>'\n sentence_len = 0\n cat_group_words = new_cat_group_words\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \" \".join(cat_group_words)\n\n # DEBUG\n #if cat_group not in group_size:\n # self.log(console, \"CAT_GROUP: '\"+str(cat_group)+\"'\") # DEBUG\n # self.log(console, \"CAT_GROUP_DISP: '\"+str(cat_group_disp)+\"'\") # DEBUG\n # for cg in group_size:\n # self.log(console, \"CG: '\"+str(cg)+\"'\") # DEBUG\n\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white colspan='+str(group_size[cat_group])+'></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_1+'\"valign=middle align=center colspan='+str(group_size[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n html_report_lines += ['</tr><tr>']\n\n # column headers\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n if params['namespace'] == 'custom':\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub (\"\\d*$\", \"\", cat)\n cell_title = domfam2name[namespace][cat].strip()\n cat_disp = cat\n cat_disp = re.sub ('^SEED', 'SEED:', cat_disp)\n else:\n cell_title = cat2name[params['namespace']][cat].strip()\n cat_disp = cat\n cat_disp = re.sub (\"TIGR_\", \"\", cat_disp)\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<td style=\"border-right:solid 2px '+border_cat_color+'; border-bottom:solid 2px '+border_cat_color+'\" bgcolor=\"'+head_color_2+'\"title=\"'+cell_title+'\" valign=bottom align=center>']\n if params['namespace'] != 'COG':\n html_report_lines += ['<div class=\"vertical-text\"><div class=\"vertical-text__inner\">']\n html_report_lines += ['<font color=\"'+text_color_2+'\" size='+graph_cat_fontsize+'><b>']\n #for c_i,c in enumerate(cat_disp):\n # if c_i < len(cat_disp)-1:\n # html_report_lines += [c+'<br>']\n # else:\n # html_report_lines += [c]\n html_report_lines += [cat_disp]\n html_report_lines += ['</b></font>']\n if params['namespace'] != 'COG':\n html_report_lines += ['</div></div>']\n html_report_lines += ['</td>']\n html_report_lines += ['</tr>']\n \n # add tree image\n html_report_lines += ['<tr><td align=\"left\" valign=\"top\" rowspan='+str(len(genome_refs)+extra_tree_rows)+'><img src=\"'+png_file+'\" height='+str(tree_img_height)+'></td>']\n\n # rest of rows\n for row_i,genome_ref in enumerate(genome_refs):\n genome_sci_name = genome_sci_name_by_ref[genome_ref]\n if row_i > 0:\n html_report_lines += ['<tr>']\n #html_report_lines += ['<td align=right><font color=\"'+text_color+'\" size='+graph_gen_fontsize+'><b><nobr>'+genome_sci_name+'</nobr></b></font></td>']\n for cat in cats:\n if not cat_seen[cat] and not show_blanks:\n continue\n val = table_data[genome_ref][cat]\n if val == 0:\n cell_color = 'white'\n else: \n if 'log_base' in params and params['log_base'] != None and params['log_base'] != '':\n log_base = float(params['log_base'])\n if log_base <= 1.0:\n raise ValueError (\"log base must be > 1.0\")\n val = math.log(val, log_base)\n cell_color_i = max_color - int(round(max_color * (val-overall_low_val) / float(overall_high_val-overall_low_val)))\n c = color_list[cell_color_i]\n cell_color = '#'+c+c+c+c+'FF'\n\n if params['count_category'].startswith('perc'):\n cell_val = str(\"%.3f\"%table_data[genome_ref][cat])\n cell_val += '%'\n else:\n cell_val = str(table_data[genome_ref][cat])\n\n if 'heatmap' in params and params['heatmap'] == '1':\n if table_data[genome_ref][cat] == 0:\n this_text_color = text_color\n #this_graph_char = \"0\"\n this_graph_char = sp\n else:\n this_text_color = cell_color\n this_graph_char = graph_char\n html_report_lines += ['<td align=center valign=middle title=\"'+cell_val+'\" style=\"width:'+cell_width+'\" bgcolor=\"'+cell_color+'\"><font color=\"'+this_text_color+'\" size='+cell_fontsize+'>'+this_graph_char+'</font></td>']\n else:\n html_report_lines += ['<td align=center valign=middle style=\"'+cell_width+'; border-right:solid 2px '+border_color+'; border-bottom:solid 2px '+border_color+'\"><font color=\"'+text_color+'\" size='+cell_fontsize+'>'+cell_val+'</font></td>']\n\n html_report_lines += ['</tr>']\n # add extra blank rows to extend tree rule below grid\n for row_i in range(extra_tree_rows):\n html_report_lines += ['<tr><td bgcolor=\"white\" style=\"width:10px\"><font color=\"white\" size='+cell_fontsize+'>'+sp+'</font></td></tr>']\n\n html_report_lines += ['</table>']\n\n # genomes as columns\n else:\n raise ValueError (\"Do not yet support Genomes as columns\")\n\n\n # key table\n html_report_lines += ['<p>']\n html_report_lines += ['<table cellpadding=3 cellspacing=2 border='+border+'>']\n html_report_lines += ['<tr><td valign=middle align=left colspan=3 style=\"border-bottom:solid 4px '+border_color+'\"><font color=\"'+text_color+'\"><b>KEY</b></font></td></tr>']\n\n if show_groups:\n group_cat_i = 0\n for cat_group in group_order_with_blanks:\n if cat_group.startswith('SEED'):\n cat_group_disp = re.sub ('_',' ',cat_group)\n else:\n cat_group_disp = cat_group\n cat_group_words = cat_group_disp.split()\n if cat_group_words[0] == 'N/A':\n cat_group_disp = ''\n else:\n cat_group_disp = \" <br>\".join(cat_group_words)\n cat_group_disp += sp\n\n html_report_lines += ['<tr>']\n if cat_group_disp == '':\n html_report_lines += ['<td bgcolor=white rowspan='+str(group_size_with_blanks[cat_group])+' style=\"border-right:solid 4px '+border_color+'\"></td>']\n else:\n html_report_lines += ['<td style=\"border-right:solid 4px '+border_color+'\" valign=top align=right rowspan='+str(group_size_with_blanks[cat_group])+'><font color=\"'+text_color+'\" size='+str(graph_cat_fontsize)+'><b>'+cat_group_disp+'</b></font></td>']\n\n\n # DEBUG\n #self.log (console, \"CAT GROUP: '\"+cat_group+\"' SIZE: '\"+str(group_size_with_blanks[cat_group])+\"'\")\n\n # add first cat for group\n first_cat = cats[group_cat_i]\n cell_color = 'white'\n #if not cat_seen[first_cat] and not show_blanks:\n if not cat_seen[first_cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = first_cat\n if first_cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', first_cat)\n cat_disp = re.sub ('^SEED', 'SEED:', first_cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = first_cat\n desc = cat2name[namespace][first_cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n # add rest of cats in group\n for c_i in range(group_cat_i, group_cat_i+group_size_with_blanks[cat_group]-1):\n cat = cats[c_i]\n cell_color = 'white'\n #if not cat_seen[cat] and not show_blanks:\n if not cat_seen[cat]:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', cat)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n cat_disp = sp+cat_disp\n \n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+sp+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n group_cat_i += 1\n\n else:\n for cat in cats:\n cell_color = 'white'\n if not cat_seen[cat] and not show_blanks:\n cell_color = \"#eeeeee\"\n if params['namespace'] == 'custom':\n domfam = cat\n if cat.startswith('SEED'):\n namespace = 'SEED'\n else:\n namespace = re.sub ('\\d*$', '', domfam)\n cat_disp = re.sub ('^SEED', 'SEED:', cat)\n desc = domfam2name[namespace][domfam]\n else:\n namespace = params['namespace']\n cat_disp = cat\n desc = cat2name[namespace][cat]\n if len(cat_disp) > cat_disp_trunc_len+1:\n cat_disp = cat_disp[0:cat_disp_trunc_len]+'*'\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\" style=\"border-right:solid 4px '+border_color+'><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+cat_disp+'</font></td>']\n html_report_lines += ['<td valign=middle align=left bgcolor=\"'+cell_color+'\"><font color=\"'+text_color+'\" size='+graph_cat_fontsize+'>'+desc+'</font></td>']\n html_report_lines += ['</tr>']\n\n\n html_report_lines += ['</table>']\n\n # close\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n html_file = os.path.join (html_output_dir, 'domain_profile_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n #upload_ret = dfu.file_to_shock({'file_path': html_file,\n upload_ret = dfu.file_to_shock({'file_path': html_output_dir,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'domain_profile_report.html',\n 'label': 'Functional Domain Profile report'}\n ]\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_fxn_profile_phylo\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_fxn_profile_phylo return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_genome_circle_plot(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_genome_circle_plot_Input\"\n (view_genome_circle_plot() ** ** build a circle plot of a\n microbial genome) -> structure: parameter \"workspace_name\" of type\n \"workspace_name\" (** Common types), parameter \"input_genome_ref\"\n of type \"data_obj_ref\"\n :returns: instance of type \"view_genome_circle_plot_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_genome_circle_plot\n #END view_genome_circle_plot\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_genome_circle_plot return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_circle_plot(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_circle_plot_Input\"\n (view_pan_circle_plot() ** ** build a circle plot of a microbial\n genome with its pangenome members) -> structure: parameter\n \"workspace_name\" of type \"workspace_name\" (** Common types),\n parameter \"input_genome_ref\" of type \"data_obj_ref\", parameter\n \"input_pangenome_ref\" of type \"data_obj_ref\", parameter\n \"input_compare_genome_refs\" of type \"data_obj_ref\", parameter\n \"input_outgroup_genome_refs\" of type \"data_obj_ref\", parameter\n \"save_featuresets\" of type \"bool\"\n :returns: instance of type \"view_pan_circle_plot_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_circle_plot\n\n ### STEP 0: basic init\n console = []\n invalid_msgs = []\n self.log(console, 'Running view_pan_circle_plot(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_genome_ref',\n 'input_pangenome_ref'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_genome_ref']),\n str(params['input_pangenome_ref'])\n ]\n\n\n # set the output paths\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n html_output_dir = os.path.join(output_dir,'html_output')\n if not os.path.exists(html_output_dir):\n os.makedirs(html_output_dir)\n\n\n # get base genome\n #\n self.log(console, \"GETTING BASE GENOME OBJECT\")\n genome_sci_name_by_ref = dict()\n base_genome_ref = input_ref = params['input_genome_ref']\n base_genome_obj_name = None\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n base_genome_obj_name = input_obj_info[NAME_I]\n base_genome_obj_name = base_genome_obj_name.replace(\" \",\"_\")\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n try:\n base_genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n genome_sci_name_by_ref[base_genome_ref] = base_genome_obj['scientific_name']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n\n # get pangenome\n #\n self.log(console, \"GETTING PANGENOME OBJECT\")\n input_ref = params['input_pangenome_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n pg_obj_name = input_obj_info[NAME_I]\n pg_obj_name = pg_obj_name.replace(\" \", \"_\")\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Pangenome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n try:\n pg_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n\n # get genome_refs from pangenome and make sure requested genomes are found\n #\n self.log(console, \"READING GENOME REFS IN PANGENOME\")\n pg_genome_refs = pg_obj['genome_refs']\n compare_genome_refs = []\n compare_genomes_cnt = 0\n if 'input_compare_genome_refs' not in params or not params['input_compare_genome_refs']:\n for g_ref in pg_genome_refs:\n if g_ref == base_genome_ref:\n continue\n compare_genome_refs.append(g_ref)\n compare_genomes_cnt += 1\n else:\n for g_ref in params['input_compare_genome_refs']:\n if g_ref == base_genome_ref:\n continue\n compare_genome_refs.append(g_ref)\n compare_genomes_cnt += 1\n\n\n # get outgroup genomes and remove from compare_genomes\n #\n self.log(console, \"REMOVING OUTGROUP GENOME(s) FROM TARGETS\")\n outgroup_genome_refs = []\n outgroup_genome_refs_cnt = 0\n if 'input_outgroup_genome_refs' in params and params['input_outgroup_genome_refs']:\n for genome_ref in params['input_outgroup_genome_refs']:\n outgroup_genome_refs.append(genome_ref)\n outgroup_genome_refs_cnt += 1\n new_compare_genome_refs = []\n compare_genomes_cnt = 0\n for genome_ref in compare_genome_refs:\n if genome_ref not in outgroup_genome_refs:\n new_compare_genome_refs.append(genome_ref)\n compare_genomes_cnt += 1\n compare_genome_refs = new_compare_genome_refs\n\n\n # Make sure all requested genomes are in pangenome\n #\n self.log(console, \"CHECKING FOR REQUESTED GENOMES IN PANGENOME\")\n missing_genomes = []\n for genome_ref in [base_genome_ref] + compare_genome_refs + outgroup_genome_refs:\n if genome_ref not in pg_genome_refs:\n missing_genomes.append(genome_ref)\n if missing_genomes:\n msg = ''\n for genome_ref in missing_genomes:\n msg += \"genome \"+genome_ref+\" not found in pangenome\\n\"\n raise ValueError (msg)\n\n\n # Reorder compare genomes by fractional overlap to base by pangenome\n #\n self.log(console, \"ORDERING TARGET GENOMES BY OVERLAP WITH BASE\")\n compare_genome_cluster_overlap_cnt = dict()\n for genome_ref in compare_genome_refs:\n compare_genome_cluster_overlap_cnt[genome_ref] = 0\n\n for cluster in pg_obj['orthologs']:\n genomes_seen = dict()\n for cluster_member in cluster['orthologs']:\n feature_id = cluster_member[0]\n feature_len_maybe = cluster_member[1]\n genome_ref = cluster_member[2]\n genomes_seen[genome_ref] = True\n if base_genome_ref in genomes_seen:\n for genome_ref in compare_genome_refs:\n if genome_ref in genomes_seen:\n compare_genome_cluster_overlap_cnt[genome_ref] += 1\n\n sorted_compare_genome_refs = sorted(compare_genome_cluster_overlap_cnt, key=compare_genome_cluster_overlap_cnt.__getitem__, reverse=True)\n compare_genome_refs = sorted_compare_genome_refs\n\n\n # Get genome sci names\n #\n self.log(console, \"GETTING GENOME SCIENTIFIC NAMES\")\n for genome_ref in compare_genome_refs + outgroup_genome_refs:\n try:\n genome_obj = wsClient.get_objects([{'ref':genome_ref}])[0]['data']\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n except:\n raise ValueError (\"unable to fetch genome: \"+genome_ref)\n\n\n # Determine singleton, clade-core, universal, and partial pangenome\n # feature sets for base+compare genome set\n # (but not including outgroup genome features)\n #\n self.log(console, \"DETERMINING PANGENOME CATEGORIES OF FEATURES\")\n singleton_featureSet_elements = dict()\n partial_featureSet_elements = dict()\n core_featureSet_elements = dict()\n univ_featureSet_elements = dict()\n for cluster in pg_obj['orthologs']:\n genomes_seen = dict()\n fids_by_genome_ref = dict()\n for cluster_member in cluster['orthologs']:\n feature_id = cluster_member[0]\n feature_len_maybe = cluster_member[1]\n genome_ref = cluster_member[2]\n genomes_seen[genome_ref] = True\n try:\n fid_list = fids_by_genome_ref[genome_ref]\n except:\n fids_by_genome_ref[genome_ref] = []\n fids_by_genome_ref[genome_ref].append(feature_id)\n\n # determine categorization\n hit_cnt = 0\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n if genome_ref in genomes_seen:\n hit_cnt += 1\n if hit_cnt == 0: # nothing within requested genome set\n continue\n elif hit_cnt == 1: # singleton\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n if genome_ref in genomes_seen:\n for fid in fids_by_genome_ref[genome_ref]:\n #featureSet_element_id = genome_ref + self.genome_feature_id_delim + fid\n #singleton_featureSet_elements[featureSet_element_id] = [genome_ref]\n if fid in singleton_featureSet_elements:\n singleton_featureSet_elements[fid].append(genome_ref)\n else:\n singleton_featureSet_elements[fid] = [genome_ref]\n elif hit_cnt < compare_genomes_cnt + 1: # +1: include base genome\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n if genome_ref in genomes_seen:\n for fid in fids_by_genome_ref[genome_ref]:\n #featureSet_element_id = genome_ref + self.genome_feature_id_delim + fid\n #partial_featureSet_elements[featureSet_element_id] = [genome_ref]\n if fid in partial_featureSet_elements:\n partial_featureSet_elements[fid].append(genome_ref)\n else:\n partial_featureSet_elements[fid] = [genome_ref]\n else: # core\n outgroup_hit = False\n for genome_ref in outgroup_genome_refs:\n if genome_ref in genomes_seen:\n outgroup_hit = True\n break\n if outgroup_hit: # universal core\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n #if genome_ref in genomes_seen: # implicit\n for fid in fids_by_genome_ref[genome_ref]:\n #featureSet_element_id = genome_ref + self.genome_feature_id_delim + fid\n #univ_featureSet_elements[featureSet_element_id] = [genome_ref]\n if fid in univ_featureSet_elements:\n univ_featureSet_elements[fid].append(genome_ref)\n else:\n univ_featureSet_elements[fid] = [genome_ref]\n else: # clade-specific core\n for genome_ref in [base_genome_ref]+compare_genome_refs:\n #if genome_ref in genomes_seen: # implicit\n for fid in fids_by_genome_ref[genome_ref]:\n #featureSet_element_id = genome_ref + self.genome_feature_id_delim + fid\n #core_featureSet_elements[featureSet_element_id] = [genome_ref]\n if fid in core_featureSet_elements:\n core_featureSet_elements[fid].append(genome_ref)\n else:\n core_featureSet_elements[fid] = [genome_ref]\n\n\n # Create and save featureSets\n #\n objects_created = []\n if 'save_featuresets' not in params or params['save_featuresets'] == None or params['save_featuresets'] == '' or int(params['save_featuresets']) != 1:\n self.log(console, \"SKIPPING FEATURESETS\")\n else:\n self.log(console, \"SAVING FEATURESETS\")\n\n if singleton_featureSet_elements:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".singleton_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" singleton pangenome features\"\n singleton_obj = { 'description': fs_desc,\n 'elements': singleton_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': singleton_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n singleton_featureSet_elements = {} # free memory\n singleton_obj = {} # free memory\n \n if partial_featureSet_elements:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".non-core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" non-core pangenome features\"\n partial_obj = { 'description': fs_desc,\n 'elements': partial_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': partial_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n partial_featureSet_elements = {} # free memory\n partial_obj = {} # free memory\n\n if core_featureSet_elements:\n if outgroup_genome_refs_cnt == 0:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" core pangenome features\"\n else:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".clade-specific_core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" clade-specific core pangenome features\"\n core_obj = { 'description': fs_desc,\n 'elements': core_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': core_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n core_featureSet_elements = {} # free memory\n core_obj = {} # free memory\n\n if univ_featureSet_elements:\n fs_name = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\".non-specific_core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".base_genome-\"+base_genome_obj_name+\" non-specific core pangenome features\"\n univ_obj = { 'description': fs_desc,\n 'elements': univ_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': univ_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n univ_featureSet_elements = {} # free memory\n univ_obj = {} # free memory\n\n\n # Get mapping of base genes to pangenome\n #\n self.log(console, \"DETERMINING MAPPING OF BASE GENES TO PANGENOME\")\n base_to_compare_redundant_map = dict()\n base_singletons = dict()\n base_cores = dict()\n base_universals = dict()\n for cluster in pg_obj['orthologs']:\n genomes_seen = dict()\n base_fids = []\n compare_genomes_seen = []\n outgroup_genomes_seen = []\n for cluster_member in cluster['orthologs']:\n feature_id = cluster_member[0]\n feature_len_maybe = cluster_member[1]\n genome_ref = cluster_member[2]\n genomes_seen[genome_ref] = True\n if genome_ref == base_genome_ref:\n base_fids.append(feature_id)\n if base_genome_ref in genomes_seen:\n universal = True\n core = True\n singleton = True\n for genome_ref in compare_genome_refs:\n if genome_ref in genomes_seen:\n singleton = False\n compare_genomes_seen.append(True)\n else:\n universal = False\n core = False\n compare_genomes_seen.append(False)\n for genome_ref in outgroup_genome_refs:\n if genome_ref in genomes_seen:\n singleton = False\n core = False\n else:\n universal = False\n for base_fid in base_fids:\n base_to_compare_redundant_map[base_fid] = compare_genomes_seen\n if universal:\n base_universals[base_fid] = True\n elif core:\n base_cores[base_fid] = True\n elif singleton:\n base_singletons[base_fid] = True\n \n\n # Get positions of genes in base genome\n #\n self.log(console, \"READING BASE GENOME COORDS\")\n sorted_base_contig_ids = []\n sorted_base_contig_lens = []\n unsorted_contig_lens = dict()\n sorted_contig_order = dict()\n feature_contig_id = dict()\n feature_pos_in_contig = dict()\n feature_order = []\n sum_contig_lens = 0\n\n # hopefully info sitting in Genome obj\n if 'contig_ids' in base_genome_obj and base_genome_obj['contig_ids'] != None:\n for contig_i,contig_id in enumerate(base_genome_obj['contig_ids']):\n contig_len = base_genome_obj['contig_lengths'][contig_i]\n unsorted_contig_lens[contig_id] = contig_len\n sum_contig_lens += contig_len\n\n # otherwise have to get contig ids from Assembly or ContigSet obj\n else: \n # Get genome_assembly_refs\n base_genome_assemby_ref = None\n base_genome_assembly_type = None\n if ('contigset_ref' not in base_genome_obj or base_genome_obj['contigset_ref'] == None) \\\n and ('assembly_ref' not in base_genome_obj or base_genome_obj['assembly_ref'] == None):\n msg = \"Genome \"+base_genome_obj_name+\" (ref:\"+base_genome_ref+\") \"+genome_sci_name_by_ref[base_genome_ref]+\" MISSING BOTH contigset_ref AND assembly_ref. Cannot process. Exiting.\"\n self.log(console, msg)\n #self.log(invalid_msgs, msg)\n #continue\n raise ValueError (msg)\n elif 'assembly_ref' in base_genome_obj and base_genome_obj['assembly_ref'] != None:\n msg = \"Genome \"+base_genome_obj_name+\" (ref:\"+base_genome_ref+\") \"+genome_sci_name_by_ref[base_genome_ref]+\" USING assembly_ref: \"+str(base_genome_obj['assembly_ref'])\n self.log (console, msg)\n base_genome_assembly_ref = base_genome_obj['assembly_ref']\n base_genome_assembly_type = 'assembly'\n elif 'contigset_ref' in base_genome_obj and base_genome_obj['contigset_ref'] != None:\n msg = \"Genome \"+base_genome_obj_name+\" (ref:\"+base_genome_ref+\") \"+genome_sci_name_by_ref[base_genome_ref]+\" USING contigset_ref: \"+str(base_genome_obj['contigset_ref'])\n self.log (console, msg)\n base_genome_assembly_ref = base_genome_obj['contigset_ref']\n base_genome_assembly_type = 'contigset'\n\n # get assembly obj and read contig ids and lengths (both contigset obj and assembly obj have list of contigs that \n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n #objects_list = wsClient.get_objects2({'objects':[{'ref':input_ref}]})['data']\n ass_obj = wsClient.get_objects([{'ref':base_genome_assembly_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch assembly: \"+base_genome_assembly_ref)\n\n if base_genome_assembly_type == 'assembly':\n for contig_key in ass_obj['contigs'].keys():\n contig_id = ass_obj['contigs'][contig_key]['contig_id']\n contig_len = ass_obj['contigs'][contig_key]['length']\n #print (\"CONTIG_ID: '\"+str(contig_id)+\"' CONTIG_LEN: '\"+str(contig_len)+\"'\\n\") # DEBUG\n unsorted_contig_lens[contig_id] = contig_len\n sum_contig_lens += contig_len\n else: # contigset obj\n for contig in ass_obj['contigs']:\n contig_id = contig['id']\n contig_len = contig['length']\n unsorted_contig_lens[contig_id] = contig_len\n sum_contig_lens += contig_len\n \n # order contigs by length and store by contig_id\n for order_i,contig_id in enumerate(sorted(unsorted_contig_lens, key=unsorted_contig_lens.__getitem__, reverse=True)):\n #print (\"STORING CONTIG ORDER: '\"+str(order_i)+\"' for CONTIG_ID: '\"+str(contig_id)+\"'\\n\") # DEBUG\n sorted_contig_order[contig_id] = order_i\n sorted_base_contig_ids.append(contig_id)\n sorted_base_contig_lens.append(unsorted_contig_lens[contig_id])\n feature_order.append([])\n\n for feature in base_genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n fid = feature['id']\n #print (\"FEATURE_ID: '\"+str(fid)+\"'\\n\") # DEBUG\n feature_contig_id[fid] = feature['location'][0][0]\n beg = feature['location'][0][1]\n strand = feature['location'][0][2]\n len = feature['location'][0][3]\n if strand == '-':\n feature_pos_in_contig[fid] = beg - int(len/2)\n else:\n feature_pos_in_contig[fid] = beg + int(len/2)\n contig_i = sorted_contig_order[feature_contig_id[fid]]\n feature_order[contig_i].append(fid)\n\n\n # Draw Circle Plot with matplotlib\n #\n self.log(console, \"CREATING CIRCLE PLOT\")\n img_dpi = 200\n img_units = \"in\"\n img_pix_width = 2000\n img_in_width = round(float(img_pix_width) / float(img_dpi), 2)\n img_html_width = img_pix_width // 4\n\n #genome_ring_scale_factor = 0.8\n genome_ring_scale_factor = 1.0 / compare_genomes_cnt\n #img_pix_width = img_dpi * compare_genomes_cnt * genome_ring_scale_factor\n\n origin_gap_angle = 20\n mark_width = 0.1\n ellipse_to_circle_scaling = 1.0\n ellipse_center_x = 0.50\n ellipse_center_y = 0.50\n ellipse_center = (ellipse_center_x, ellipse_center_y)\n lw_to_coord_scale = 0.005\n max_unscaled_rings = 4\n unscaled_ring_lw = 30\n outer_ring_radius = 0.8\n min_inner_radius = 0.3\n\n if compare_genomes_cnt <= max_unscaled_rings:\n gene_bar_lw = unscaled_ring_lw\n inner_radius = outer_ring_radius - lw_to_coord_scale * compare_genomes_cnt * gene_bar_lw\n else:\n inner_radius = min_inner_radius\n gene_bar_lw = genome_ring_scale_factor * (outer_ring_radius - min_inner_radius) / lw_to_coord_scale\n #genome_ring_spacing = 0.05 * gene_bar_lw\n genome_ring_spacing = 0.0\n gene_bar_lw -= genome_ring_spacing\n #self.log(console, \"gene_bar_lw: \"+str(gene_bar_lw)) # DEBUG\n #self.log(console, \"genome_ring_spacing: \"+str(genome_ring_spacing)) # DEBUG\n #self.log(console, \"inner_radius: \"+str(inner_radius)) # DEBUG\n #genome_ring_spacing = 0.05 * gene_bar_lw\n #genome_ring_spacing = 0.3 * gene_bar_lw\n #lw_to_coord_scale = 0.1\n base_singleton_color = \"red\"\n base_core_color = \"magenta\"\n #hit_core_color = \"darkmagenta\"\n hit_core_color = \"magenta\"\n #base_univ_color = \"blue\"\n base_univ_color = \"darkblue\"\n hit_univ_color = \"darkblue\"\n #base_nonspecific_core_color = \"purple\"\n #hit_nonspecific_core_color = \"purple\"\n base_nonspecific_core_color = \"darkblue\"\n hit_nonspecific_core_color = \"darkblue\"\n #base_partial_color = \"cyan\"\n #hit_partial_color = \"deepskyblue\"\n base_partial_color = \"deepskyblue\"\n #hit_partial_color = \"gray\" # too dark\n hit_partial_color = \"lightgray\"\n\n # Build image\n fig = pyplot.figure()\n fig.set_size_inches(img_in_width, img_in_width)\n ax = pyplot.subplot2grid( (1,1), (0,0), rowspan=1, colspan=1 )\n\n # Let's turn off visibility of all tic labels and boxes here\n for ax in fig.axes:\n ax.xaxis.set_visible(False) # remove axis labels and tics\n ax.yaxis.set_visible(False)\n for t in ax.get_xticklabels()+ax.get_yticklabels(): # remove tics\n t.set_visible(False)\n ax.spines['top'].set_visible(False) # Get rid of top axis line\n ax.spines['bottom'].set_visible(False) # bottom axis line\n ax.spines['left'].set_visible(False) # left axis line\n ax.spines['right'].set_visible(False) # right axis line\n\n # Add marks for genomes\n ax = fig.axes[0]\n base_contig_pos = 0\n for contig_i,contig_feature_order in enumerate(feature_order):\n if contig_i > 0:\n base_contig_pos += sorted_base_contig_lens[contig_i-1]\n\n # use base genome for angle\n #\n for fid in contig_feature_order:\n\n # base genome ring color\n if fid in base_singletons:\n gene_color = base_singleton_color\n this_mark_width = 2* mark_width\n z_level = 4\n elif fid in base_cores:\n gene_color = base_core_color\n hit_gene_color = hit_core_color\n this_mark_width = mark_width\n z_level = 3\n elif fid in base_universals:\n if outgroup_genome_refs_cnt == 0:\n gene_color = base_nonspecific_core_color\n hit_gene_color = hit_nonspecific_core_color\n else:\n gene_color = base_univ_color\n hit_gene_color = hit_univ_color\n this_mark_width = mark_width\n z_level = 2\n else:\n gene_color = base_partial_color\n hit_gene_color = hit_partial_color\n this_mark_width = mark_width\n z_level = 1\n gene_pos = base_contig_pos + feature_pos_in_contig[fid]\n \n arc_beg = 90 - origin_gap_angle/2.0 - (360-origin_gap_angle) * (float(gene_pos) / float(sum_contig_lens)) - this_mark_width\n arc_end = 90 - origin_gap_angle/2.0 - (360-origin_gap_angle) * (float(gene_pos) / float(sum_contig_lens)) + this_mark_width\n\n\n # draw base genome gene\n #gene_bar_radius = inner_radius + 0.5*gene_bar_lw*lw_to_coord_scale\n\n # old (with base in center)\n #gene_bar_radius = inner_radius\n # new (with base on outside)\n #gene_bar_radius = inner_radius + 0.5*(compare_genomes_cnt)*(gene_bar_lw+genome_ring_spacing)*lw_to_coord_scale\n #gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt + 0.5) * (gene_bar_lw+genome_ring_spacing)\n #gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-1) * (gene_bar_lw+genome_ring_spacing) + lw_to_coord_scale * (this_gene_bar_lw+genome_ring_spacing)\n this_gene_bar_lw = unscaled_ring_lw\n if gene_bar_lw == unscaled_ring_lw:\n gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt) * (gene_bar_lw+genome_ring_spacing)\n else:\n gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-1) * (gene_bar_lw+genome_ring_spacing) + 0.5 * lw_to_coord_scale * (this_gene_bar_lw+genome_ring_spacing)\n\n #self.log(console, str('BASE')+\" gene_bar_radius: \"+str(gene_bar_radius)) # DEBUG\n gene_x_radius = 1.0 * gene_bar_radius\n gene_y_radius = ellipse_to_circle_scaling * gene_bar_radius\n \n gene_arc = Arc (ellipse_center, gene_x_radius, gene_y_radius, \\\n theta1=arc_beg, theta2=arc_end, \\\n edgecolor=gene_color, lw=this_gene_bar_lw, alpha=1.0, zorder=z_level) # facecolor does nothing (no fill for Arc)\n ax.add_patch (gene_arc) \n\n # add homolog rings\n for genome_i,hit_flag in enumerate(base_to_compare_redundant_map[fid]):\n if not hit_flag:\n continue\n# if fid in base_cores:\n# #gene_color = \"darkmagenta\"\n# gene_color = \"magenta\"\n# z_level = 3\n# elif fid in base_universals:\n# gene_color = \"darkblue\"\n# z_level = 2\n# else:\n# gene_color = \"deepskyblue\"\n# z_level = 1\n #gene_bar_radius = inner_radius + 0.5*(compare_genomes_cnt-(genome_i+1))*(gene_bar_lw+genome_ring_spacing)*lw_to_coord_scale\n #gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt - (genome_i+1) + 0.5) * (gene_bar_lw+genome_ring_spacing)\n gene_bar_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt - (genome_i+1)) * (gene_bar_lw+genome_ring_spacing)\n #self.log(console, str(genome_i)+\" gene_bar_radius: \"+str(gene_bar_radius)) # DEBUG\n gene_x_radius = 1.0 * gene_bar_radius\n gene_y_radius = ellipse_to_circle_scaling * gene_bar_radius\n gene_arc = Arc (ellipse_center, gene_x_radius, gene_y_radius, \\\n theta1=arc_beg, theta2=arc_end, \\\n edgecolor=hit_gene_color, lw=gene_bar_lw, alpha=1.0, zorder=z_level) # facecolor does nothing (no fill for Arc)\n ax.add_patch (gene_arc) \n\n # Add labels\n base_text_fontsize = 10\n if gene_bar_lw < unscaled_ring_lw:\n text_fontsize = int(max_unscaled_rings * base_text_fontsize * gene_bar_lw / unscaled_ring_lw)\n if text_fontsize > base_text_fontsize:\n text_fontsize = base_text_fontsize\n else:\n text_fontsize = base_text_fontsize\n text_color = \"#606060\"\n label_margin = 0.005\n y_downshift = 0.0075 * ellipse_to_circle_scaling\n #text_y_delta = 0.25\n #label_margin = 0.0\n #y_downshift = 0.0\n #text_y_delta = 0.0\n\n label_angle = (math.pi/180) * (90 - origin_gap_angle/2.0 - (360-origin_gap_angle))\n #label_radius = inner_radius + 0.5*gene_bar_lw*lw_to_coord_scale\n #label_radius = 0.5*inner_radius\n #label_radius = 0.5*inner_radius + text_y_delta*compare_genomes_cnt*(gene_bar_lw+genome_ring_spacing)*lw_to_coord_scale\n #label_radius = inner_radius + text_y_delta * lw_to_coord_scale * (compare_genomes_cnt + 0.5) * (gene_bar_lw+genome_ring_spacing)\n #label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt + 0.5) * (gene_bar_lw+genome_ring_spacing)\n this_gene_bar_lw = unscaled_ring_lw\n if gene_bar_lw == unscaled_ring_lw:\n label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt) * (gene_bar_lw+genome_ring_spacing)\n else:\n label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-1)*(gene_bar_lw+genome_ring_spacing) + 0.5*lw_to_coord_scale * (this_gene_bar_lw+genome_ring_spacing)\n label_radius *= 0.5 # why is this necessary?\n x_shift = label_radius * math.cos(label_angle)\n y_shift = label_radius * math.sin(label_angle)\n label_x_pos = ellipse_center_x + x_shift + label_margin\n label_y_pos = ellipse_center_y + y_shift - y_downshift\n label = str(0)\n ax.text (label_x_pos, label_y_pos, label, verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n for genome_i,genome_ref in enumerate(compare_genome_refs):\n #label_radius = 0.5*inner_radius + text_y_delta*(compare_genomes_cnt-(genome_i+1))*(gene_bar_lw+genome_ring_spacing)*lw_to_coord_scale\n #label_radius = inner_radius + text_y_delta * lw_to_coord_scale * (compare_genomes_cnt - (genome_i+1) + 0.5) * (gene_bar_lw+genome_ring_spacing)\n #label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt - (genome_i+1) + 0.5) * (gene_bar_lw+genome_ring_spacing)\n #label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-(genome_i+1+1))*(gene_bar_lw+genome_ring_spacing) + 0.5*lw_to_coord_scale * (gene_bar_lw+genome_ring_spacing)\n label_radius = inner_radius + lw_to_coord_scale * (compare_genomes_cnt-(genome_i+1))*(gene_bar_lw+genome_ring_spacing)\n label_radius *= 0.5 # why is this necessary?\n x_shift = label_radius * math.cos(label_angle)\n y_shift = label_radius * math.sin(label_angle)\n label_x_pos = ellipse_center_x + x_shift + label_margin\n label_y_pos = ellipse_center_y + y_shift - y_downshift\n label = str(genome_i+1)\n ax.text (label_x_pos, label_y_pos, label, verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n # Add color key\n key_x_margin = 0.01\n key_y_margin = 0.01\n key_line_spacing = 0.015\n key_x_label_offset = 0.018\n box_gap = key_line_spacing/6.0\n box_h = key_line_spacing - box_gap\n box_w = box_h\n\n # base genome key\n ax.text (key_x_margin/2.0, 1.0-key_y_margin, genome_sci_name_by_ref[base_genome_ref], verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n key_config = [ { 'name': 'base singletons',\n 'y_shift': 1,\n 'color': base_singleton_color\n },\n { 'name': 'non-core',\n 'y_shift': 2,\n 'color': base_partial_color\n }\n ]\n if outgroup_genome_refs_cnt == 0:\n key_config.extend(\n [ { 'name': 'core',\n 'y_shift': 3,\n 'color': base_nonspecific_core_color\n }\n ])\n else:\n key_config.extend(\n [ { 'name': 'clade-specific core',\n 'y_shift': 3,\n 'color': base_core_color\n },\n { 'name': 'core + outgroup',\n 'y_shift': 4,\n 'color': base_univ_color\n }\n ])\n for k_config in key_config:\n key_box = Rectangle ((key_x_margin, 1.0-(key_y_margin+k_config['y_shift']*key_line_spacing)), box_w, box_h, facecolor=k_config['color'], edgecolor=text_color, alpha=1.0, zorder=1)\n ax.add_patch(key_box)\n ax.text (key_x_margin+key_x_label_offset, 1.0-(key_y_margin+box_gap+k_config['y_shift']*key_line_spacing), k_config['name'], verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n # rest of pangenome key\n ax.text (key_x_margin/2.0, 1.0-(key_y_margin+5.5*key_line_spacing), \"Pangenome\", verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n key_config = [ { 'name': 'non-core',\n 'y_shift': 6.5,\n 'color': hit_partial_color\n }\n ]\n if outgroup_genome_refs_cnt == 0:\n key_config.extend ([\n { 'name': 'core',\n 'y_shift': 7.5,\n 'color': hit_nonspecific_core_color\n }\n ])\n else:\n key_config.extend ([\n { 'name': 'clade-specific core',\n 'y_shift': 7.5,\n 'color': hit_core_color\n },\n { 'name': 'core + outgroup',\n 'y_shift': 8.5,\n 'color': hit_univ_color\n }\n ])\n for k_config in key_config:\n key_box = Rectangle ((key_x_margin, 1.0-(key_y_margin+k_config['y_shift']*key_line_spacing)), box_w, box_h, facecolor=k_config['color'], edgecolor=text_color, alpha=1.0, zorder=1)\n ax.add_patch(key_box)\n ax.text (key_x_margin+key_x_label_offset, 1.0-(key_y_margin+box_gap+k_config['y_shift']*key_line_spacing), k_config['name'], verticalalignment=\"bottom\", horizontalalignment=\"left\", color=text_color, fontsize=text_fontsize, zorder=1)\n\n\n # Save circle plot\n #\n self.log(console, \"SAVING CIRCLE PLOT\")\n png_file = base_genome_obj_name+'-pangenome_circle.png'\n pdf_file = base_genome_obj_name+'-pangenome_circle.pdf'\n output_png_file_path = os.path.join(html_output_dir, png_file);\n output_pdf_file_path = os.path.join(html_output_dir, pdf_file);\n fig.savefig(output_png_file_path, dpi=img_dpi)\n fig.savefig(output_pdf_file_path, format='pdf')\n\n\n # build report object\n #\n self.log(console, \"CREATING HTML REPORT\")\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n reportObj['objects_created'] = objects_created\n\n\n # build html report\n #\n circle_img_height = 1000\n cell_padding = 0\n cell_spacing = 10\n #cell_spacing = 0\n cell_border = 0\n sp = ' '\n text_color = \"#606060\"\n font_size = '3'\n space_fontsize = '1'\n bar_char = '.'\n bar_fontsize = '1'\n bar_width = 50\n num_bars_per_node = 2 + 1\n \n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Pangenome Homolog Circle Plot</title>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n html_report_lines += ['<table cellpadding=\"'+str(cell_padding)+'\" cellspacing=\"'+str(cell_spacing)+'\" border=\"'+str(cell_border)+'\">']\n\n # add circle image\n circle_rowspan = 2 * (compare_genomes_cnt+outgroup_genome_refs_cnt+1)\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=\"middle\" align=\"left\" rowspan=\"'+str(circle_rowspan)+'\">']\n html_report_lines += ['<img src=\"'+png_file+'\" height='+str(circle_img_height)+'>']\n html_report_lines += ['</td>']\n\n # add labels\n for filler_line_i in range((compare_genomes_cnt+outgroup_genome_refs_cnt+1)//2):\n if filler_line_i > 0:\n html_report_lines += ['<tr>']\n html_report_lines += ['<td>'+sp+'</td></tr>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr><b>'+\"genome \"+str(0)+'</b></nobr></font></td>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr><b>'+str(genome_sci_name_by_ref[base_genome_ref])+'</b></nobr></font></td>']\n html_report_lines += ['</tr>']\n for genome_i,genome_ref in enumerate(compare_genome_refs):\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr>'+\"genome \"+str(genome_i+1)+'</nobr></font></td>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr>'+str(genome_sci_name_by_ref[genome_ref])+'</nobr></font></td>']\n html_report_lines += ['</tr>']\n for genome_i,genome_ref in enumerate(outgroup_genome_refs):\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr><i>'+\"outgroup\"+'</i></nobr></font></td>']\n html_report_lines += ['<td valign=\"top\" align=\"left\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr><i>'+str(genome_sci_name_by_ref[genome_ref])+'</i></nobr></font></td>']\n html_report_lines += ['</tr>']\n for filler_line_i in range((compare_genomes_cnt+outgroup_genome_refs_cnt+1)//2):\n html_report_lines += ['<tr><td>'+sp+'</td></tr>']\n\n # close\n html_report_lines += ['</table>']\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n self.log(console, \"SAVING AND UPLOADING HTML REPORT\")\n html_file = os.path.join (html_output_dir, 'pan_circle_plot_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n png_upload_ret = dfu.file_to_shock({'file_path': output_png_file_path,\n 'make_handle': 0})\n #'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading png_file to shock')\n\n try:\n pdf_upload_ret = dfu.file_to_shock({'file_path': output_pdf_file_path,\n 'make_handle': 0})\n #'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading pdf_file to shock')\n\n try:\n #upload_ret = dfu.file_to_shock({'file_path': html_file,\n upload_ret = dfu.file_to_shock({'file_path': html_output_dir,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['file_links'] = [{'shock_id': png_upload_ret['shock_id'],\n 'name': 'pan_circle_plot.png',\n 'label': 'Pangenome Circle Plot PNG'},\n {'shock_id': pdf_upload_ret['shock_id'],\n 'name': 'pan_circle_plot.pdf',\n 'label': 'Pangenome Circle Plot PDF'}\n ]\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'pan_circle_plot_report.html',\n 'label': 'Pangenome Circle Plot Report'}\n ]\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_pan_circle_plot\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_circle_plot return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_accumulation_plot(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_accumulation_plot_Input\"\n (view_pan_accumulation_plot() ** ** build an accumulation plot of\n a pangenome) -> structure: parameter \"workspace_name\" of type\n \"workspace_name\" (** Common types), parameter \"input_genome_ref\"\n of type \"data_obj_ref\", parameter \"input_pangenome_ref\" of type\n \"data_obj_ref\"\n :returns: instance of type \"view_pan_accumulation_plot_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_accumulation_plot\n #END view_pan_accumulation_plot\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_accumulation_plot return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_flower_venn(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_flower_venn_Input\"\n (view_pan_flower_venn() ** ** build a multi-member pangenome\n flower venn diagram) -> structure: parameter \"workspace_name\" of\n type \"workspace_name\" (** Common types), parameter\n \"input_genome_ref\" of type \"data_obj_ref\", parameter\n \"input_pangenome_ref\" of type \"data_obj_ref\"\n :returns: instance of type \"view_pan_flower_venn_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_flower_venn\n #END view_pan_flower_venn\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_flower_venn return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_pairwise_overlap(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_pairwise_overlap_Input\"\n (view_pan_pairwise_overlap() ** ** build a multi-member pangenome\n pairwise overlap plot) -> structure: parameter \"workspace_name\" of\n type \"workspace_name\" (** Common types), parameter\n \"input_genome_ref\" of type \"data_obj_ref\", parameter\n \"input_pangenome_ref\" of type \"data_obj_ref\"\n :returns: instance of type \"view_pan_pairwise_overlap_Output\" ->\n structure: parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_pairwise_overlap\n #END view_pan_pairwise_overlap\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_pairwise_overlap return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def view_pan_phylo(self, ctx, params):\n \"\"\"\n :param params: instance of type \"view_pan_phylo_Input\"\n (view_pan_phylo() ** ** show the pangenome accumulation using a\n tree) -> structure: parameter \"workspace_name\" of type\n \"workspace_name\" (** Common types), parameter\n \"input_pangenome_ref\" of type \"data_obj_ref\", parameter\n \"input_speciesTree_ref\" of type \"data_obj_ref\", parameter\n \"save_featuresets\" of type \"bool\"\n :returns: instance of type \"view_pan_phylo_Output\" -> structure:\n parameter \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN view_pan_phylo\n\n ### STEP 0: basic init\n console = []\n self.log(console, 'Running view_pan_phylo(): ')\n self.log(console, \"\\n\"+pformat(params))\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n headers = {'Authorization': 'OAuth '+token}\n env = os.environ.copy()\n env['KB_AUTH_TOKEN'] = token\n\n #SERVICE_VER = 'dev' # DEBUG\n SERVICE_VER = 'release'\n\n # param checks\n required_params = ['input_speciesTree_ref',\n 'input_pangenome_ref'\n ]\n for arg in required_params:\n if arg not in params or params[arg] == None or params[arg] == '':\n raise ValueError (\"Must define required param: '\"+arg+\"'\")\n\n\n # load provenance\n provenance = [{}]\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n provenance[0]['input_ws_objects']=[str(params['input_speciesTree_ref']),\n str(params['input_pangenome_ref'])\n ]\n\n\n # set the output paths\n timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)\n output_dir = os.path.join(self.scratch,'output.'+str(timestamp))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n html_output_dir = os.path.join(output_dir,'html_output')\n if not os.path.exists(html_output_dir):\n os.makedirs(html_output_dir)\n\n\n # get speciesTree\n #\n input_ref = params['input_speciesTree_ref']\n speciesTree_name = None\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n speciesTree_name = input_obj_info[NAME_I]\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseTrees.Tree\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n # get set obj\n try:\n speciesTree_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch speciesTree: \"+input_ref)\n\n\n # get genome_refs from speciesTree and instantiate ETE3 tree and order\n #\n genome_refs = []\n genome_id_by_ref = dict()\n genome_ref_by_id = dict()\n for genome_id in speciesTree_obj['default_node_labels'].keys():\n genome_ref = speciesTree_obj['ws_refs'][genome_id]['g'][0]\n genome_id_by_ref[genome_ref] = genome_id\n genome_ref_by_id[genome_id] = genome_ref\n\n species_tree = ete3.Tree(speciesTree_obj['tree']) # instantiate ETE3 tree\n species_tree.ladderize()\n for genome_id in species_tree.get_leaf_names():\n genome_refs.append(genome_ref_by_id[genome_id])\n\n\n # get internal node ids based on sorted genome_refs of children\n #\n node_ids_by_refs = dict()\n genome_ref_to_node_ids_by_refs = dict()\n node_size = dict()\n node_order_by_ref = []\n node_num_id = -1\n for n in species_tree.traverse(\"preorder\"):\n if n.is_leaf():\n continue\n\n node_num_id += 1\n leaf_refs = []\n for genome_id in n.get_leaf_names():\n leaf_refs.append(genome_ref_by_id[genome_id])\n node_ref_id = \"+\".join(sorted(leaf_refs))\n node_size[node_ref_id] = len(leaf_refs)\n node_order_by_ref.append(node_ref_id)\n node_ids_by_refs[node_ref_id] = node_num_id\n\n # point each genome at its nodes\n for genome_ref in leaf_refs:\n if genome_ref not in genome_ref_to_node_ids_by_refs:\n genome_ref_to_node_ids_by_refs[genome_ref] = []\n genome_ref_to_node_ids_by_refs[genome_ref].append(node_ref_id)\n \n\n # get object names, sci names, protein-coding gene counts, and SEED annot\n #\n genome_obj_name_by_ref = dict()\n genome_sci_name_by_ref = dict()\n genome_sci_name_by_id = dict()\n genome_CDS_count_by_ref = dict()\n uniq_genome_ws_ids = dict()\n\n dom_hits = dict() # initialize dom_hits here because reading SEED within genome\n genes_with_hits_cnt = dict()\n\n for genome_ref in genome_refs:\n\n dom_hits[genome_ref] = dict()\n genes_with_hits_cnt[genome_ref] = dict()\n\n # get genome object name\n input_ref = genome_ref\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n input_name = input_obj_info[NAME_I]\n uniq_genome_ws_ids[input_obj_info[WSID_I]] = True\n\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Genome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n genome_obj_name_by_ref[genome_ref] = input_name\n\n try:\n genome_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n # sci name\n genome_sci_name_by_ref[genome_ref] = genome_obj['scientific_name']\n genome_sci_name_by_id[genome_id_by_ref[genome_ref]] = genome_obj['scientific_name']\n \n # CDS cnt\n cds_cnt = 0\n for feature in genome_obj['features']:\n if 'protein_translation' in feature and feature['protein_translation'] != None and feature['protein_translation'] != '':\n cds_cnt += 1\n genome_CDS_count_by_ref[genome_ref] = cds_cnt\n\n\n # get pangenome\n #\n self.log(console, \"GETTING PANGENOME OBJECT\")\n input_ref = params['input_pangenome_ref']\n try:\n [OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple\n input_obj_info = wsClient.get_object_info_new ({'objects':[{'ref':input_ref}]})[0]\n input_obj_type = re.sub ('-[0-9]+\\.[0-9]+$', \"\", input_obj_info[TYPE_I]) # remove trailing version\n pg_obj_name = input_obj_info[NAME_I]\n pg_obj_name = pg_obj_name.replace(\" \", \"_\")\n except Exception as e:\n raise ValueError('Unable to get object from workspace: (' + input_ref +')' + str(e))\n accepted_input_types = [\"KBaseGenomes.Pangenome\" ]\n if input_obj_type not in accepted_input_types:\n raise ValueError (\"Input object of type '\"+input_obj_type+\"' not accepted. Must be one of \"+\", \".join(accepted_input_types))\n\n try:\n pg_obj = wsClient.get_objects([{'ref':input_ref}])[0]['data']\n except:\n raise ValueError (\"unable to fetch genome: \"+input_ref)\n\n\n\n # make sure species tree genomes are found in pangenome (reverse not required)\n for genome_ref in genome_refs:\n if genome_ref not in pg_obj['genome_refs']:\n raise ValueError (\"genome: '\"+str(genome_ref)+\"' from SpeciesTree not present in Pangenome object\")\n\n\n # determine pangenome accumulations of core, partial, and singleton\n #\n cluster_hits = dict()\n nodes_hit_by_gene = dict()\n for node_ref_id in node_ids_by_refs.keys():\n cluster_hits[node_ref_id] = []\n\n cluster_num = -1 # cluster ids themselves start from 1\n for homolog_cluster in pg_obj['orthologs']:\n cluster_num += 1\n for node_ref_id in node_ids_by_refs.keys():\n cluster_hits[node_ref_id].append(0)\n\n nodes_hit = dict()\n for gene in homolog_cluster['orthologs']:\n gene_id = gene[0]\n probably_gene_len_dont_need = gene[1]\n genome_ref = gene[2]\n\n if genome_ref not in genome_ref_to_node_ids_by_refs:\n continue\n for node_ref_id in genome_ref_to_node_ids_by_refs[genome_ref]:\n if node_ref_id not in nodes_hit:\n nodes_hit[node_ref_id] = dict()\n nodes_hit[node_ref_id][genome_ref] = True\n\n # store features\n if node_ref_id not in nodes_hit_by_gene:\n nodes_hit_by_gene[node_ref_id] = dict()\n if cluster_num not in nodes_hit_by_gene[node_ref_id]:\n nodes_hit_by_gene[node_ref_id][cluster_num] = dict()\n if genome_ref not in nodes_hit_by_gene[node_ref_id][cluster_num]:\n nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref] = []\n\n nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref].append(gene_id)\n \n # sum counts\n for node_ref_id in nodes_hit.keys():\n for genome_ref in nodes_hit[node_ref_id].keys():\n cluster_hits[node_ref_id][cluster_num] += 1\n\n # calc accumulations\n clusters_total = dict()\n clusters_singletons = dict()\n clusters_core = dict()\n clusters_partial = dict()\n clusters_singletons_by_node_and_cluster_flag = dict()\n clusters_core_by_node_and_cluster_flag = dict()\n clusters_partial_by_node_and_cluster_flag = dict()\n for node_ref_id in node_ids_by_refs.keys():\n clusters_total[node_ref_id] = 0\n clusters_singletons[node_ref_id] = 0\n clusters_core[node_ref_id] = 0\n clusters_partial[node_ref_id] = 0\n clusters_singletons_by_node_and_cluster_flag[node_ref_id] = dict()\n clusters_core_by_node_and_cluster_flag[node_ref_id] = dict()\n clusters_partial_by_node_and_cluster_flag[node_ref_id] = dict()\n\n for cluster_num,hit_cnt in enumerate(cluster_hits[node_ref_id]):\n if hit_cnt > 0:\n clusters_total[node_ref_id] += 1\n if hit_cnt == 1:\n clusters_singletons[node_ref_id] += 1\n clusters_singletons_by_node_and_cluster_flag[node_ref_id][cluster_num] = True\n \n elif hit_cnt == node_size[node_ref_id]:\n clusters_core[node_ref_id] += 1\n clusters_core_by_node_and_cluster_flag[node_ref_id][cluster_num] = True\n else:\n clusters_partial[node_ref_id] += 1\n clusters_partial_by_node_and_cluster_flag[node_ref_id][cluster_num] = True\n\n # get min and max cluster cnts\n INSANE_VALUE = 10000000000000000\n max_clusters_cnt = -INSANE_VALUE\n min_clusters_cnt = INSANE_VALUE\n for node_ref_id in node_ids_by_refs.keys():\n if clusters_total[node_ref_id] > max_clusters_cnt:\n max_clusters_cnt = clusters_total[node_ref_id]\n if clusters_total[node_ref_id] < min_clusters_cnt:\n min_clusters_cnt = clusters_total[node_ref_id]\n\n self.log(console, \"NODE: \"+node_ref_id+\" MIN: \"+str(min_clusters_cnt)+\" MAX: \"+str(max_clusters_cnt)) # DEBUG\n\n\n # Create FeatureSet objects for nodes\n #\n objects_created = []\n if 'save_featuresets' not in params or params['save_featuresets'] == None or params['save_featuresets'] == '' or int(params['save_featuresets']) != 1:\n self.log(console, \"SKIPPING FEATURESETS\")\n else:\n self.log(console, \"SAVING FEATURESETS\")\n\n for node_ref_id in sorted(node_ids_by_refs, key=node_ids_by_refs.get):\n\n node_num_id = str(node_ids_by_refs[node_ref_id])\n\n self.log(console, \"calculating feature sets for node \"+str(node_num_id))\n\n # Core\n if clusters_core[node_ref_id] > 0:\n\n self.log(console, \"\\t\"+\"adding CORE. Num clusters: \"+str(clusters_core[node_ref_id]))\n\n # build core featureset elements\n core_featureSet_elements = {}\n for cluster_num in sorted(clusters_core_by_node_and_cluster_flag[node_ref_id].keys()):\n for genome_ref in nodes_hit_by_gene[node_ref_id][cluster_num].keys():\n for gene_id in nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref]:\n if gene_id in core_featureSet_elements:\n core_featureSet_elements[gene_id].append(genome_ref)\n else:\n core_featureSet_elements[gene_id] = [genome_ref]\n\n # build object\n fs_name = pg_obj_name+\".node-\"+str(node_num_id)+\".core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".node-\"+str(node_num_id)+\" core pangenome features\"\n core_obj = { 'description': fs_desc,\n 'elements': core_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': core_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n core_featureSet_elements = {} # free memory\n core_obj = {} # free memory\n\n\n # Singletons\n if clusters_singletons[node_ref_id] > 0:\n\n self.log(console, \"\\t\"+\"adding SINGLETON. Num clusters: \"+str(clusters_singletons[node_ref_id]))\n\n # build singleton featureset elements\n singleton_featureSet_elements = {}\n for cluster_num in sorted(clusters_singletons_by_node_and_cluster_flag[node_ref_id].keys()):\n for genome_ref in nodes_hit_by_gene[node_ref_id][cluster_num].keys():\n for gene_id in nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref]:\n if gene_id in singleton_featureSet_elements:\n singleton_featureSet_elements[gene_id].append(genome_ref)\n else:\n singleton_featureSet_elements[gene_id] = [genome_ref]\n\n # build object\n fs_name = pg_obj_name+\".node-\"+str(node_num_id)+\".singleton_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".node-\"+str(node_num_id)+\" singleton pangenome features\"\n singleton_obj = { 'description': fs_desc,\n 'elements': singleton_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': singleton_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n singleton_featureSet_elements = {} # free memory\n singleton_obj = {} # free memory\n\n\n # Partial pangenome\n if clusters_partial[node_ref_id] > 0:\n\n self.log(console, \"\\t\"+\"adding PARTIAL. Num clusters: \"+str(clusters_partial[node_ref_id]))\n\n # build partial featureset elements\n partial_featureSet_elements = {}\n for cluster_num in sorted(clusters_partial_by_node_and_cluster_flag[node_ref_id].keys()):\n for genome_ref in nodes_hit_by_gene[node_ref_id][cluster_num].keys():\n for gene_id in nodes_hit_by_gene[node_ref_id][cluster_num][genome_ref]:\n if gene_id in partial_featureSet_elements:\n partial_featureSet_elements[gene_id].append(genome_ref)\n else:\n partial_featureSet_elements[gene_id] = [genome_ref]\n\n # build object\n fs_name = pg_obj_name+\".node-\"+str(node_num_id)+\".non-core_pangenome.FeatureSet\"\n fs_desc = pg_obj_name+\".node-\"+str(node_num_id)+\" non-core pangenome features\"\n partial_obj = { 'description': fs_desc,\n 'elements': partial_featureSet_elements\n }\n new_obj_info = wsClient.save_objects({\n 'workspace':params['workspace_name'],\n 'objects':[\n { 'type': 'KBaseCollections.FeatureSet',\n 'data': partial_obj,\n 'name': fs_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n objects_created.append({'ref':str(new_obj_info[6])+'/'+str(new_obj_info[0])+'/'+str(new_obj_info[4]), 'description': fs_desc})\n partial_featureSet_elements = {} # free memory\n partial_obj = {} # free memory\n\n\n # Draw tree (we already instantiated Tree above)\n #\n png_file = speciesTree_name+'-pangenome.png'\n pdf_file = speciesTree_name+'-pangenome.pdf'\n output_png_file_path = os.path.join(html_output_dir, png_file);\n output_pdf_file_path = os.path.join(html_output_dir, pdf_file);\n\n # init ETE3 accessory objects\n ts = ete3.TreeStyle()\n\n # customize\n min_pie_size = 1000\n max_pie_size = 2000\n leaf_fontsize = 500 # scale of everything is goofy in circle tree mode, and pie size affects type size and line thickness. ugh.\n node_fontsize = 500\n ts.mode = \"c\" # circular tree graph\n #ts.arc_start = -180 # 0 degrees = 3 o'clock\n #ts.arc_span = 180\n ts.show_leaf_name = True\n ts.show_branch_length = False\n ts.show_branch_support = True\n #ts.scale = 50 # 50 pixels per branch length unit\n ts.branch_vertical_margin = 5 # pixels between adjacent branches\n #ts.title.add_face(ete3.TextFace(params['output_name']+\": \"+params['desc'], fsize=10), column=0)\n\n node_style = ete3.NodeStyle()\n node_style[\"fgcolor\"] = \"#606060\" # for node balls\n node_style[\"size\"] = 10 # for node balls (gets reset based on support)\n node_style[\"vt_line_color\"] = \"#606060\"\n node_style[\"hz_line_color\"] = \"#606060\"\n node_style[\"vt_line_width\"] = 100 # 2\n node_style[\"hz_line_width\"] = 100 # 2\n node_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n node_style[\"hz_line_type\"] = 0\n\n leaf_style = ete3.NodeStyle()\n leaf_style[\"fgcolor\"] = \"#ffffff\" # for node balls\n leaf_style[\"size\"] = 100 # for node balls (we're using it to add space)\n leaf_style[\"vt_line_color\"] = \"#606060\" # unecessary\n leaf_style[\"hz_line_color\"] = \"#606060\"\n leaf_style[\"vt_line_width\"] = 100 # 2\n leaf_style[\"hz_line_width\"] = 100 # 2\n leaf_style[\"vt_line_type\"] = 0 # 0 solid, 1 dashed, 2 dotted\n leaf_style[\"hz_line_type\"] = 0\n\n for n in species_tree.traverse(\"preorder\"):\n if n.is_leaf():\n style = leaf_style\n genome_id = n.name\n #n.name = genome_sci_name_by_id[genome_id]\n n.name = None\n leaf_name_disp = genome_sci_name_by_id[genome_id]\n n.add_face (ete3.TextFace(leaf_name_disp, fsize=leaf_fontsize), column=0, position=\"branch-right\")\n\n else:\n leaf_refs = []\n for genome_id in n.get_leaf_names():\n leaf_refs.append(genome_ref_by_id[genome_id])\n node_ref_id = \"+\".join(sorted (leaf_refs))\n node_num_id = node_ids_by_refs[node_ref_id]\n node_name_disp = str(node_num_id)\n #n.add_face (ete3.TextFace(node_name_disp, fsize=node_fontsize),column=0, position=\"branch-right\")\n n.add_face (ete3.TextFace(' '+node_name_disp+' ', fsize=node_fontsize),column=0)\n\n style = ete3.NodeStyle()\n for k in node_style.keys():\n style[k] = node_style[k]\n\n if n.support > 0.95:\n style[\"size\"] = 6\n elif n.support > 0.90:\n style[\"size\"] = 5\n elif n.support > 0.80:\n style[\"size\"] = 4\n else:\n style[\"size\"] = 2\n\n # yum! pie!\n pie_size = int(min_pie_size + float(max_pie_size-min_pie_size) * float(clusters_total[node_ref_id]-min_clusters_cnt) / float(max_clusters_cnt-min_clusters_cnt))\n singleton_perc = round(100.0*float(clusters_singletons[node_ref_id]) / float(clusters_total[node_ref_id]), 1)\n core_perc = round(100.0*float(clusters_core[node_ref_id]) / float(clusters_total[node_ref_id]), 1)\n partial_perc = round (100.0 - core_perc - singleton_perc, 1)\n\n pie_w = pie_h = pie_size\n pie_percs = [singleton_perc, partial_perc, core_perc]\n pie_colors = [\"IndianRed\", \"Orchid\", \"DodgerBlue\"]\n pie_line_color = \"White\"\n\n this_pieFace = ete3.PieChartFace(pie_percs, pie_w, pie_h, pie_colors, pie_line_color)\n n.add_face (this_pieFace, column=1)\n\n n.set_style(style)\n\n # save images\n dpi = 300\n img_units = \"in\"\n img_pix_width = 1200\n img_in_width = round(float(img_pix_width)/float(dpi), 1)\n img_html_width = img_pix_width // 2\n species_tree.render(output_png_file_path, w=img_in_width, units=img_units, dpi=dpi, tree_style=ts)\n species_tree.render(output_pdf_file_path, w=img_in_width, units=img_units, tree_style=ts) # dpi irrelevant\n\n\n # build report object\n #\n reportName = 'kb_phylogenomics_report_'+str(uuid.uuid4())\n reportObj = {'objects_created': [],\n #'text_message': '', # or is it 'message'?\n 'message': '', # or is it 'text_message'?\n 'direct_html': None,\n 'direct_html_link_index': 0,\n 'file_links': [],\n 'html_links': [],\n 'workspace_name': params['workspace_name'],\n 'report_object_name': reportName\n }\n\n\n # build html report\n #\n tree_img_height = 1000\n cell_padding = 0\n #cell_spacing = 5\n cell_spacing = 0\n cell_border = 0\n sp = ' '\n horiz_sp = sp+sp+sp+sp\n text_color = \"#606060\"\n font_size = '2'\n space_fontsize = '1'\n bar_char = '.'\n bar_fontsize = '1'\n bar_width = 50\n cat_order = ['TOTAL', 'singleton', 'partial', 'perfect core']\n cat_colors = [text_color] + pie_colors\n #num_bars_per_node = 2*len(cat_order) + 1\n num_bars_per_node = len(cat_order) + 1\n \n html_report_lines = []\n html_report_lines += ['<html>']\n html_report_lines += ['<head>']\n html_report_lines += ['<title>KBase Pangenome Phylogenetic Context</title>']\n html_report_lines += ['</head>']\n html_report_lines += ['<body bgcolor=\"white\">']\n html_report_lines += ['<table cellpadding=\"'+str(cell_padding)+'\" cellspacing=\"'+str(cell_spacing)+'\" border=\"'+str(cell_border)+'\">']\n\n # add tree image\n html_report_lines += ['<tr>']\n html_report_lines += ['<td valign=\"top\" align=\"left\" rowspan=\"'+str(num_bars_per_node*len(node_ids_by_refs))+'\">']\n html_report_lines += ['<img src=\"'+png_file+'\" height='+str(tree_img_height)+'>']\n html_report_lines += ['</td>']\n\n # add key and bar graph\n max_cnt = 0\n for node_ref_id in node_order_by_ref:\n if clusters_total[node_ref_id] > max_cnt:\n max_cnt = clusters_total[node_ref_id]\n\n for node_i,node_ref_id in enumerate(node_order_by_ref):\n node_id = node_ids_by_refs[node_ref_id]\n if node_i > 0:\n html_report_lines += ['<tr>']\n\n # vals\n cat_cnts = dict()\n cat_percs = dict()\n cat_cnts['TOTAL'] = clusters_total[node_ref_id]\n cat_cnts['singleton'] = clusters_singletons[node_ref_id]\n cat_cnts['perfect core'] = clusters_core[node_ref_id]\n cat_cnts['partial'] = clusters_total[node_ref_id] - clusters_singletons[node_ref_id] - clusters_core[node_ref_id]\n cat_percs['TOTAL'] = '100'\n cat_percs['singleton'] = round (100.0*float(clusters_singletons[node_ref_id]) / float(clusters_total[node_ref_id]), 1)\n cat_percs['perfect core'] = round (100.0*float(clusters_core[node_ref_id]) / float(clusters_total[node_ref_id]), 1)\n if cat_cnts['partial'] == 0:\n cat_percs['partial'] = 0.0\n else:\n cat_percs['partial'] = round (100.0 - cat_percs['perfect core'] - cat_percs['singleton'], 1)\n\n # node id\n node_label = 'NODE '+str(node_id)\n html_report_lines += ['<td rowspan=\"'+str(num_bars_per_node)+'\" valign=\"top\" align=\"right\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><b><nobr>'+str(node_label)+'</nobr></b></font></td>']\n html_report_lines += ['<td rowspan=\"'+str(num_bars_per_node)+'\"><font size=\"'+str(space_fontsize)+'\">'+horiz_sp+'</font></td>']\n\n for cat_i,cat in enumerate(cat_order):\n if cat_i > 0:\n html_report_lines += ['<tr>']\n # cat name\n html_report_lines += ['<td valign=\"top\" align=\"right\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\"><nobr>'+cat+'</nobr></font></td>']\n html_report_lines += ['<td><font size=\"'+str(space_fontsize)+'\">'+horiz_sp+'</font></td>']\n \n # cnt\n html_report_lines += ['<td valign=\"top\" align=\"right\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\">'+str(cat_cnts[cat])+'</font></td>']\n html_report_lines += ['<td><font size=\"'+str(space_fontsize)+'\">'+horiz_sp+'</font></td>']\n\n #perc\n html_report_lines += ['<td valign=\"top\" align=\"right\"><font color=\"'+str(text_color)+'\" size=\"'+str(font_size)+'\">'+str(cat_percs[cat])+'%'+'</font></td>']\n html_report_lines += ['<td><font size=\"'+str(space_fontsize)+'\">'+horiz_sp+'</font></td>']\n\n # bar\n this_width = int(round(float(bar_width) * (float(cat_cnts[cat])/float(max_cnt)), 0))\n for cell_i in range(this_width):\n html_report_lines += ['<td bgcolor=\"'+str(cat_colors[cat_i])+'\"><font size=\"'+str(bar_fontsize)+'\" color=\"'+str(cat_colors[cat_i])+'\">'+bar_char+'</font></td>']\n\n html_report_lines += ['</tr>']\n #html_report_lines += ['<tr><td><font size=\"'+str(space_fontsize)+'\">'+sp+'</font></td></tr>'] # space for blank row\n html_report_lines += ['<tr><td><font size=\"'+str(space_fontsize)+'\">'+sp+'</font></td></tr>'] # space for blank row\n \n\n # close\n html_report_lines += ['</table>']\n html_report_lines += ['</body>']\n html_report_lines += ['</html>']\n \n html_report_str = \"\\n\".join(html_report_lines)\n #reportObj['direct_html'] = html_report_str\n\n\n # write html to file and upload\n html_file = os.path.join (html_output_dir, 'pan_phylo_report.html')\n with open (html_file, 'w', 0) as html_handle:\n html_handle.write(html_report_str)\n dfu = DFUClient(self.callbackURL)\n try:\n png_upload_ret = dfu.file_to_shock({'file_path': output_png_file_path,\n 'make_handle': 0})\n #'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading png_file to shock')\n\n try:\n pdf_upload_ret = dfu.file_to_shock({'file_path': output_pdf_file_path,\n 'make_handle': 0})\n #'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading pdf_file to shock')\n\n try:\n #upload_ret = dfu.file_to_shock({'file_path': html_file,\n upload_ret = dfu.file_to_shock({'file_path': html_output_dir,\n 'make_handle': 0,\n 'pack': 'zip'})\n except:\n raise ValueError ('Logging exception loading html_report to shock')\n\n reportObj['file_links'] = [{'shock_id': png_upload_ret['shock_id'],\n 'name': 'phylogenetic_pangenome.png',\n 'label': 'Phylogenetic Pangenome PNG'},\n {'shock_id': pdf_upload_ret['shock_id'],\n 'name': 'phylogenetic_pangenome.pdf',\n 'label': 'Phylogenetic Pangenome PDF'}\n ]\n reportObj['html_links'] = [{'shock_id': upload_ret['shock_id'],\n 'name': 'pan_phylo_report.html',\n 'label': 'Phylogenetic Pangenome report'}\n ]\n\n # attach any created objects\n reportObj['objects_created'] = objects_created\n\n\n # save report object\n #\n reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)\n #report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})\n report_info = reportClient.create_extended_report(reportObj)\n\n output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }\n\n #END view_pan_phylo\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method view_pan_phylo return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n def status(self, ctx):\n #BEGIN_STATUS\n returnVal = {'state': \"OK\",\n 'message': \"\",\n 'version': self.VERSION,\n 'git_url': self.GIT_URL,\n 'git_commit_hash': self.GIT_COMMIT_HASH}\n #END_STATUS\n return [returnVal]\n"
] | [
[
"matplotlib.patches.Rectangle",
"matplotlib.patches.Arc",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mbzhu1/ludwig | [
"13c35ec79f930e7dac295e642d92abe82f8c8046",
"201678b1d25b8010ba51c472c6e32ee014d5a4b0",
"13c35ec79f930e7dac295e642d92abe82f8c8046",
"201678b1d25b8010ba51c472c6e32ee014d5a4b0"
] | [
"tests/integration_tests/test_model_training_options.py",
"ludwig/utils/data_utils.py",
"ludwig/data/dataframe/pandas.py",
"ludwig/modules/recurrent_modules.py"
] | [
"import json\nimport os.path\nimport re\nfrom collections import namedtuple\nimport logging\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\nfrom ludwig import globals as global_vars\nfrom ludwig.api import LudwigModel\nfrom ludwig.backend import LOCAL_BACKEND\nfrom ludwig.experiment import experiment_cli\nfrom ludwig.features.numerical_feature import numeric_transformation_registry\nfrom ludwig.modules.optimization_modules import optimizers_registry\nfrom ludwig.utils.data_utils import load_json, replace_file_extension\nfrom ludwig.utils.misc_utils import get_from_registry\nfrom tests.integration_tests.utils import category_feature, generate_data\n\nRANDOM_SEED = 42\nNUMBER_OBSERVATIONS = 500\n\nGeneratedData = namedtuple('GeneratedData',\n 'train_df validation_df test_df')\n\n\ndef get_feature_configs():\n input_features = [\n {'name': 'x', 'type': 'numerical'},\n ]\n output_features = [\n {'name': 'y', 'type': 'numerical',\n 'loss': {'type': 'mean_squared_error'},\n 'num_fc_layers': 5, 'fc_size': 64}\n ]\n\n return input_features, output_features\n\n\[email protected](scope='module')\ndef generated_data():\n # function generates simple training data that guarantee convergence\n # within 30 epochs for suitable config\n\n # generate data\n np.random.seed(RANDOM_SEED)\n x = np.array(range(NUMBER_OBSERVATIONS)).reshape(-1, 1)\n y = 2 * x + 1 + np.random.normal(size=x.shape[0]).reshape(-1, 1)\n raw_df = pd.DataFrame(np.concatenate((x, y), axis=1), columns=['x', 'y'])\n\n # create training data\n train, valid_test = train_test_split(raw_df, train_size=0.7)\n\n # create validation and test data\n validation, test = train_test_split(valid_test, train_size=0.5)\n\n return GeneratedData(train, validation, test)\n\n\[email protected](scope='module')\ndef generated_data_for_optimizer():\n # function generates simple training data that guarantee convergence\n # within 30 epochs for suitable config\n\n # generate data\n np.random.seed(RANDOM_SEED)\n x = np.array(range(NUMBER_OBSERVATIONS)).reshape(-1, 1)\n y = 2 * x + 1 + np.random.normal(size=x.shape[0]).reshape(-1, 1)\n raw_df = pd.DataFrame(np.concatenate((x, y), axis=1), columns=['x', 'y'])\n raw_df['x'] = (raw_df['x'] - raw_df['x'].min()) / \\\n (raw_df['x'].max() - raw_df['x'].min())\n raw_df['y'] = (raw_df['y'] - raw_df['y'].min()) / \\\n (raw_df['y'].max() - raw_df['y'].min())\n\n # create training data\n train, valid_test = train_test_split(raw_df, train_size=0.7)\n\n # create validation and test data\n validation, test = train_test_split(valid_test, train_size=0.5)\n\n return GeneratedData(train, validation, test)\n\n\[email protected]('early_stop', [3, 5])\ndef test_early_stopping(early_stop, generated_data, tmp_path):\n input_features, output_features = get_feature_configs()\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {\n 'type': 'concat'\n },\n 'training': {\n 'epochs': 30,\n 'early_stop': early_stop,\n 'batch_size': 16\n }\n }\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n # run experiment\n _, _, _, _, output_dir = experiment_cli(\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n output_directory=str(results_dir),\n config=config,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n skip_save_model=True,\n skip_save_log=True\n )\n\n # test existence of required files\n train_stats_fp = os.path.join(output_dir, 'training_statistics.json')\n metadata_fp = os.path.join(output_dir, 'description.json')\n assert os.path.isfile(train_stats_fp)\n assert os.path.isfile(metadata_fp)\n\n # retrieve results so we can validate early stopping\n with open(train_stats_fp, 'r') as f:\n train_stats = json.load(f)\n with open(metadata_fp, 'r') as f:\n metadata = json.load(f)\n\n # get early stopping value\n early_stop_value = metadata['config']['training']['early_stop']\n\n # retrieve validation losses\n vald_losses = np.array(train_stats['validation']['combined']['loss'])\n last_epoch = vald_losses.shape[0]\n best_epoch = np.argmin(vald_losses)\n\n # confirm early stopping\n assert (last_epoch - best_epoch - 1) == early_stop_value\n\n\[email protected]('skip_save_progress', [False, True])\[email protected]('skip_save_model', [False, True])\ndef test_model_progress_save(\n skip_save_progress,\n skip_save_model,\n generated_data,\n tmp_path\n):\n input_features, output_features = get_feature_configs()\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {'type': 'concat'},\n 'training': {'epochs': 5}\n }\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n # run experiment\n _, _, _, _, output_dir = experiment_cli(\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n output_directory=str(results_dir),\n config=config,\n skip_save_processed_input=True,\n skip_save_progress=skip_save_progress,\n skip_save_unprocessed_output=True,\n skip_save_model=skip_save_model,\n skip_save_log=True\n )\n\n # ========== Check for required result data sets =============\n if skip_save_model:\n model_dir = os.path.join(output_dir, 'model')\n files = [f for f in os.listdir(model_dir) if\n re.match(r'model_weights', f)]\n assert len(files) == 0\n else:\n model_dir = os.path.join(output_dir, 'model')\n files = [f for f in os.listdir(model_dir) if\n re.match(r'model_weights', f)]\n # at least one .index and one .data file, but .data may be more\n assert len(files) >= 2\n assert os.path.isfile(\n os.path.join(output_dir, 'model', 'checkpoint'))\n\n if skip_save_progress:\n assert not os.path.isdir(\n os.path.join(output_dir, 'model', 'training_checkpoints')\n )\n else:\n assert os.path.isdir(\n os.path.join(output_dir, 'model', 'training_checkpoints')\n )\n\n\[email protected]('optimizer', ['sgd', 'adam'])\ndef test_resume_training(optimizer, generated_data, tmp_path):\n input_features, output_features = get_feature_configs()\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {'type': 'concat'},\n 'training': {\n 'epochs': 2,\n 'early_stop': 1000,\n 'batch_size': 16,\n 'optimizer': {'type': optimizer}\n }\n }\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n _, _, _, _, output_dir1 = experiment_cli(\n config,\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n )\n\n config['training']['epochs'] = 4\n\n experiment_cli(\n config,\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n model_resume_path=output_dir1,\n )\n\n _, _, _, _, output_dir2 = experiment_cli(\n config,\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n )\n\n # compare learning curves with and without resuming\n ts1 = load_json(os.path.join(output_dir1, 'training_statistics.json'))\n ts2 = load_json(os.path.join(output_dir2, 'training_statistics.json'))\n print('ts1', ts1)\n print('ts2', ts2)\n assert ts1['training']['combined']['loss'] == ts2['training']['combined'][\n 'loss']\n\n # compare predictions with and without resuming\n y_pred1 = np.load(os.path.join(output_dir1, 'y_predictions.npy'))\n y_pred2 = np.load(os.path.join(output_dir2, 'y_predictions.npy'))\n print('y_pred1', y_pred1)\n print('y_pred2', y_pred2)\n assert np.all(np.isclose(y_pred1, y_pred2))\n\n\[email protected]('optimizer_type', optimizers_registry)\ndef test_optimizers(optimizer_type, generated_data_for_optimizer, tmp_path):\n input_features, output_features = get_feature_configs()\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {\n 'type': 'concat'\n },\n 'training': {\n 'epochs': 5,\n 'batch_size': 16,\n 'optimizer': {'type': optimizer_type}\n }\n }\n\n # special handling for adadelta, break out of local minima\n if optimizer_type == 'adadelta':\n config['training']['learning_rate'] = 0.1\n\n model = LudwigModel(config)\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n # run experiment\n train_stats, preprocessed_data, output_directory = model.train(\n training_set=generated_data_for_optimizer.train_df,\n output_directory=str(results_dir),\n config=config,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n skip_save_model=True,\n skip_save_log=True\n )\n\n # retrieve training losses for first and last epochs\n train_losses = np.array(train_stats['training']['combined']['loss'])\n last_epoch = train_losses.shape[0]\n\n # ensure train loss for last epoch is less than first epoch\n assert train_losses[last_epoch - 1] < train_losses[0]\n\n\ndef test_regularization(generated_data, tmp_path):\n input_features, output_features = get_feature_configs()\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'combiner': {\n 'type': 'concat'\n },\n 'training': {\n 'epochs': 1,\n 'batch_size': 16,\n 'regularization_lambda': 1\n }\n }\n\n # create sub-directory to store results\n results_dir = tmp_path / 'results'\n results_dir.mkdir()\n\n regularization_losses = []\n for regularizer in [None, 'l1', 'l2', 'l1_l2']:\n tf.keras.backend.clear_session()\n np.random.seed(RANDOM_SEED)\n tf.random.set_seed(RANDOM_SEED)\n\n # setup regularization parameters\n config['output_features'][0][\n 'weights_regularizer'] = regularizer\n config['output_features'][0][\n 'bias_regularizer'] = regularizer\n config['output_features'][0][\n 'activity_regularizer'] = regularizer\n\n # run experiment\n _, _, _, _, output_dir = experiment_cli(\n training_set=generated_data.train_df,\n validation_set=generated_data.validation_df,\n test_set=generated_data.test_df,\n output_directory=str(results_dir),\n config=config,\n experiment_name='regularization',\n model_name=str(regularizer),\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n skip_save_model=True,\n skip_save_log=True\n )\n\n # test existence of required files\n train_stats_fp = os.path.join(output_dir, 'training_statistics.json')\n metadata_fp = os.path.join(output_dir, 'description.json')\n assert os.path.isfile(train_stats_fp)\n assert os.path.isfile(metadata_fp)\n\n # retrieve results so we can compare training loss with regularization\n with open(train_stats_fp, 'r') as f:\n train_stats = json.load(f)\n\n # retrieve training losses for all epochs\n train_losses = np.array(train_stats['training']['combined']['loss'])\n regularization_losses.append(train_losses[0])\n\n # create a set of losses\n regularization_losses_set = set(regularization_losses)\n\n # ensure all losses obtained with the different methods are different\n assert len(regularization_losses) == len(regularization_losses_set)\n\n\n# test cache checksum function\ndef test_cache_checksum(csv_filename, tmp_path):\n # setup for training\n input_features = [category_feature(vocab_size=5)]\n output_features = [category_feature(vocab_size=2)]\n\n source_dataset = os.path.join(tmp_path, csv_filename)\n source_dataset = generate_data(input_features, output_features,\n source_dataset)\n\n config = {\n 'input_features': input_features,\n 'output_features': output_features,\n 'preprocessing': {'text': {'most_common_word': 1000}},\n 'training': {'epochs': 2}\n }\n\n # conduct initial training\n output_directory = os.path.join(tmp_path, 'results')\n model = LudwigModel(config)\n _, _, train_output_directory1 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n first_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # conduct second training, should not force recreating hdf5\n model = LudwigModel(config)\n _, _, train_output_directory2 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # time stamps should be the same\n assert first_training_timestamp == current_training_timestamp\n\n # force recreating cache file by changing checksum\n prior_training_timestamp = current_training_timestamp\n config['preprocessing']['text']['most_common_word'] = 2000\n model = LudwigModel(config)\n _, _, train_output_directory3 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamp should differ\n assert prior_training_timestamp < current_training_timestamp\n\n # force recreating cache by updating modification time of source dataset\n prior_training_timestamp = current_training_timestamp\n os.utime(source_dataset)\n model = LudwigModel(config)\n _, _, train_output_directory4 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamps should be different\n assert prior_training_timestamp < current_training_timestamp\n\n # force change in feature preprocessing\n prior_training_timestamp = current_training_timestamp\n input_features = config['input_features'].copy()\n input_features[0]['preprocessing'] = {'lowercase': True}\n config['input_features'] = input_features\n model = LudwigModel(config)\n _, _, train_output_directory5 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamps should be different\n assert prior_training_timestamp < current_training_timestamp\n\n # force change in features names (and properties)\n prior_training_timestamp = current_training_timestamp\n input_features = [category_feature(vocab_size=5), category_feature()]\n source_dataset = generate_data(input_features, output_features,\n source_dataset)\n config['input_features'] = input_features\n model = LudwigModel(config)\n _, _, train_output_directory5 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamps should be different\n assert prior_training_timestamp < current_training_timestamp\n\n # force change in Ludwig version\n prior_training_timestamp = current_training_timestamp\n global_vars.LUDWIG_VERSION = 'new_version'\n model = LudwigModel(config)\n _, _, train_output_directory5 = \\\n model.train(dataset=source_dataset, output_directory=output_directory)\n current_training_timestamp = \\\n os.path.getmtime(replace_file_extension(source_dataset, 'hdf5'))\n\n # timestamps should be different\n assert prior_training_timestamp < current_training_timestamp\n\n\[email protected](\n 'transformer_key', list(numeric_transformation_registry.keys())\n)\ndef test_numeric_transformer(transformer_key, tmpdir):\n Transformer = get_from_registry(transformer_key,\n numeric_transformation_registry)\n transformer_name = Transformer().__class__.__name__\n if transformer_name == 'Log1pTransformer':\n raw_values = np.random.lognormal(5, 2, size=100)\n else:\n raw_values = np.random.normal(5, 2, size=100)\n\n backend = LOCAL_BACKEND\n parameters = Transformer.fit_transform_params(raw_values, backend)\n if transformer_name in {'Log1pTransformer', 'IdentityTransformer'}:\n # should be empty\n assert not bool(parameters)\n else:\n # should not be empty\n assert bool(parameters)\n\n # instantiate numeric transformer\n numeric_transfomer = Transformer(**parameters)\n\n # transform values\n transformed_values = numeric_transfomer.transform(raw_values)\n\n # inverse transform the prior transformed values\n reconstructed_values = \\\n numeric_transfomer.inverse_transform(transformed_values)\n\n # should now match\n assert np.allclose(raw_values, reconstructed_values)\n\n # now test numeric transformer with output feature\n df = pd.DataFrame(np.array([raw_values, raw_values]).T, columns=['x', 'y'])\n config = {\n 'input_features': [\n {'name': 'x', 'type': 'numerical'}\n ],\n 'output_features': [\n {'name': 'y', 'type': 'numerical',\n 'preprocessing': {'normalization': transformer_key}}\n ],\n 'combiner': {\n 'type': 'concat',\n },\n 'training': {\n 'epochs': 2,\n 'batch_size': 16,\n }\n }\n\n args = {\n 'config': config,\n 'skip_save_processed_input': True,\n 'output_directory': os.path.join(tmpdir, 'results'),\n 'logging_level': logging.WARN\n }\n\n # ensure no exceptions are raised\n experiment_cli(dataset=df, **args)\n",
"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport collections\nimport csv\nimport functools\nimport json\nimport logging\nimport os.path\nimport pickle\nimport random\nimport re\n\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom pandas.errors import ParserError\nfrom sklearn.model_selection import KFold\n\nfrom ludwig.constants import PREPROCESSING, SPLIT, PROC_COLUMN\nfrom ludwig.globals import (MODEL_HYPERPARAMETERS_FILE_NAME,\n MODEL_WEIGHTS_FILE_NAME,\n TRAIN_SET_METADATA_FILE_NAME)\n\nlogger = logging.getLogger(__name__)\n\nDATASET_SPLIT_URL = 'dataset_{}_fp'\nDATA_PROCESSED_CACHE_DIR = 'data_processed_cache_dir'\nDATA_TRAIN_HDF5_FP = 'data_train_hdf5_fp'\nHDF5_COLUMNS_KEY = 'columns'\nDICT_FORMATS = {'dict', 'dictionary', dict}\nDATAFRAME_FORMATS = {'dataframe', 'df', pd.DataFrame}\nCSV_FORMATS = {'csv'}\nTSV_FORMATS = {'tsv'}\nJSON_FORMATS = {'json'}\nJSONL_FORMATS = {'jsonl'}\nEXCEL_FORMATS = {'excel'}\nPARQUET_FORMATS = {'parquet'}\nPICKLE_FORMATS = {'pickle'}\nFEATHER_FORMATS = {'feather'}\nFWF_FORMATS = {'fwf'}\nHTML_FORMATS = {'html'}\nORC_FORMATS = {'orc'}\nSAS_FORMATS = {'sas'}\nSPSS_FORMATS = {'spss'}\nSTATA_FORMATS = {'stata'}\nHDF5_FORMATS = {'hdf5', 'h5'}\nCACHEABLE_FORMATS = set.union(*(CSV_FORMATS, TSV_FORMATS,\n JSON_FORMATS, JSONL_FORMATS,\n EXCEL_FORMATS, PARQUET_FORMATS, PICKLE_FORMATS,\n FEATHER_FORMATS, FWF_FORMATS, HTML_FORMATS,\n ORC_FORMATS, SAS_FORMATS, SPSS_FORMATS,\n STATA_FORMATS))\n\nPANDAS_DF = pd\n\n\ndef get_split_path(dataset_fp):\n return os.path.splitext(dataset_fp)[0] + '.split.csv'\n\n\ndef get_abs_path(data_csv_path, file_path):\n if data_csv_path is not None:\n return os.path.join(data_csv_path, file_path)\n else:\n return file_path\n\n\ndef load_csv(data_fp):\n data = []\n with open(data_fp, 'rb') as f:\n data = list(csv.reader(f))\n return data\n\n\ndef read_xsv(data_fp, df_lib=PANDAS_DF, separator=',', header=0, nrows=None, skiprows=None):\n \"\"\"\n Helper method to read a csv file. Wraps around pd.read_csv to handle some\n exceptions. Can extend to cover cases as necessary\n :param data_fp: path to the xsv file\n :param df_lib: DataFrame library used to read in the CSV\n :param separator: defaults separator to use for splitting\n :param header: header argument for pandas to read the csv\n :param nrows: number of rows to read from the csv, None means all\n :param skiprows: number of rows to skip from the csv, None means no skips\n :return: Pandas dataframe with the data\n \"\"\"\n with open(data_fp, 'r', encoding=\"utf8\") as csvfile:\n try:\n dialect = csv.Sniffer().sniff(csvfile.read(1024 * 100),\n delimiters=[',', '\\t', '|'])\n separator = dialect.delimiter\n except csv.Error:\n # Could not conclude the delimiter, defaulting to user provided\n pass\n\n try:\n df = df_lib.read_csv(data_fp, sep=separator, header=header,\n nrows=nrows, skiprows=skiprows)\n except ParserError:\n logger.warning('Failed to parse the CSV with pandas default way,'\n ' trying \\\\ as escape character.')\n df = df_lib.read_csv(data_fp, sep=separator, header=header,\n escapechar='\\\\',\n nrows=nrows, skiprows=skiprows)\n\n return df\n\n\nread_csv = functools.partial(read_xsv, separator=',')\nread_tsv = functools.partial(read_xsv, separator='\\t')\n\n\ndef read_json(data_fp, df_lib, normalize=False):\n if normalize:\n return df_lib.json_normalize(load_json(data_fp))\n else:\n return df_lib.read_json(data_fp)\n\n\ndef read_jsonl(data_fp, df_lib):\n return df_lib.read_json(data_fp, lines=True)\n\n\ndef read_excel(data_fp, df_lib):\n fp_split = os.path.splitext(data_fp)\n if fp_split[1] == '.xls':\n excel_engine = 'xlrd'\n else:\n excel_engine = 'openpyxl'\n return df_lib.read_excel(data_fp, engine=excel_engine)\n\n\ndef read_parquet(data_fp, df_lib):\n return df_lib.read_parquet(data_fp)\n\n\ndef read_pickle(data_fp, df_lib):\n return df_lib.read_pickle(data_fp)\n\n\ndef read_fwf(data_fp, df_lib):\n return df_lib.read_fwf(data_fp)\n\n\ndef read_feather(data_fp, df_lib):\n return df_lib.read_feather(data_fp)\n\n\ndef read_html(data_fp, df_lib):\n return df_lib.read_html(data_fp)[0]\n\n\ndef read_orc(data_fp, df_lib):\n return df_lib.read_orc(data_fp)\n\n\ndef read_sas(data_fp, df_lib):\n return df_lib.read_sas(data_fp)\n\n\ndef read_spss(data_fp, df_lib):\n return df_lib.read_spss(data_fp)\n\n\ndef read_stata(data_fp, df_lib):\n return df_lib.read_stata(data_fp)\n\n\ndef save_csv(data_fp, data):\n with open(data_fp, 'w', encoding='utf-8') as csv_file:\n writer = csv.writer(csv_file)\n for row in data:\n if not isinstance(row, collections.Iterable) or isinstance(row,\n str):\n row = [row]\n writer.writerow(row)\n\n\ndef csv_contains_column(data_fp, column_name):\n return column_name in read_csv(data_fp, nrows=0) # only loads header\n\n\ndef load_json(data_fp):\n with open(data_fp, 'r') as input_file:\n data = json.load(input_file)\n return data\n\n\ndef save_json(data_fp, data, sort_keys=True, indent=4):\n with open(data_fp, 'w') as output_file:\n json.dump(data, output_file, cls=NumpyEncoder, sort_keys=sort_keys,\n indent=indent)\n\n\ndef to_numpy_dataset(df):\n dataset = {}\n for col in df.columns:\n dataset[col] = np.stack(df[col].to_numpy())\n return dataset\n\n\ndef from_numpy_dataset(dataset):\n col_mapping = {}\n for k, v in dataset.items():\n if len(v.shape) > 1:\n # unstacking, needed for ndarrays of dimension 2 and more\n *vals, = v\n else:\n # not unstacking. Needed because otherwise pandas casts types\n # the way it wants, like converting a list of float32 scalats\n # to a column of float64\n vals = v\n col_mapping[k] = vals\n return pd.DataFrame.from_dict(col_mapping)\n\n\ndef save_hdf5(data_fp, data):\n mode = 'w'\n if os.path.isfile(data_fp):\n mode = 'r+'\n\n numpy_dataset = to_numpy_dataset(data)\n with h5py.File(data_fp, mode) as h5_file:\n h5_file.create_dataset(HDF5_COLUMNS_KEY, data=np.array(data.columns.values, dtype='S'))\n for column in data.columns:\n h5_file.create_dataset(column, data=numpy_dataset[column])\n\n\ndef load_hdf5(data_fp):\n hdf5_data = h5py.File(data_fp, 'r')\n columns = [s.decode('utf-8') for s in hdf5_data[HDF5_COLUMNS_KEY][()].tolist()]\n\n numpy_dataset = {}\n for column in columns:\n numpy_dataset[column] = hdf5_data[column][()]\n\n return from_numpy_dataset(numpy_dataset)\n\n\ndef load_object(object_fp):\n with open(object_fp, 'rb') as f:\n return pickle.load(f)\n\n\ndef save_object(object_fp, obj):\n with open(object_fp, 'wb') as f:\n pickle.dump(obj, f)\n\n\ndef load_array(data_fp, dtype=float):\n list_num = []\n with open(data_fp, 'r') as input_file:\n for x in input_file:\n list_num.append(dtype(x.strip()))\n return np.array(list_num)\n\n\ndef load_matrix(data_fp, dtype=float):\n list_num = []\n with open(data_fp, 'r') as input_file:\n for row in input_file:\n list_num.append([dtype(elem) for elem in row.strip().split()])\n return np.squeeze(np.array(list_num))\n\n\ndef save_array(data_fp, array):\n with open(data_fp, 'w') as output_file:\n for x in np.nditer(array):\n output_file.write(str(x) + '\\n')\n\n\ndef load_pretrained_embeddings(embeddings_path, vocab):\n embeddings = load_glove(embeddings_path)\n\n # find out the size of the embeddings\n embeddings_size = len(next(iter(embeddings.values())))\n\n # calculate an average embedding, to use for initializing missing words\n avg_embedding = np.zeros(embeddings_size)\n count = 0\n for word in vocab:\n if word in embeddings:\n avg_embedding += embeddings[word]\n count += 1\n if count > 0:\n avg_embedding /= count\n\n # create the embedding matrix\n embeddings_vectors = []\n for word in vocab:\n if word in embeddings:\n embeddings_vectors.append(embeddings[word])\n else:\n embeddings_vectors.append(\n avg_embedding + np.random.uniform(-0.01, 0.01, embeddings_size)\n )\n embeddings_matrix = np.stack(embeddings_vectors)\n\n # let's help the garbage collector free some memory\n embeddings = None\n\n return embeddings_matrix\n\n\[email protected]_cache(1)\ndef load_glove(file_path):\n logger.info(' Loading Glove format file {}'.format(file_path))\n embeddings = {}\n embedding_size = 0\n\n # collect embeddings size assuming the first line is correct\n with open(file_path, 'r', encoding='utf-8') as f:\n found_line = False\n while not found_line:\n line = f.readline()\n if line:\n embedding_size = len(line.split()) - 1\n found_line = True\n\n # collect embeddings\n with open(file_path, 'r', encoding='utf-8') as f:\n for line_number, line in enumerate(f):\n if line:\n try:\n split = line.split()\n if len(split) != embedding_size + 1:\n raise ValueError\n word = split[0]\n embedding = np.array(\n [float(val) for val in split[-embedding_size:]]\n )\n embeddings[word] = embedding\n except ValueError:\n logger.warning(\n 'Line {} in the GloVe file {} is malformed, '\n 'skipping it'.format(\n line_number, file_path\n )\n )\n logger.info(' {0} embeddings loaded'.format(len(embeddings)))\n return embeddings\n\n\ndef split_data(split, data):\n # type: (float, list) -> (list, list)\n split_length = int(round(split * len(data)))\n random.shuffle(data)\n return data[:split_length], data[split_length:]\n\n\ndef shuffle_unison_inplace(list_of_lists, random_state=None):\n if list_of_lists:\n assert all(len(l) == len(list_of_lists[0]) for l in list_of_lists)\n if random_state is not None:\n p = random_state.permutation(len(list_of_lists[0]))\n else:\n p = np.random.permutation(len(list_of_lists[0]))\n return [l[p] for l in list_of_lists]\n return None\n\n\ndef shuffle_dict_unison_inplace(np_dict, random_state=None):\n keys = list(np_dict.keys())\n list_of_lists = list(np_dict.values())\n\n # shuffle up the list of lists according to previous fct\n shuffled_list = shuffle_unison_inplace(list_of_lists, random_state)\n\n recon = {}\n for ii in range(len(keys)):\n dkey = keys[ii]\n recon[dkey] = shuffled_list[ii]\n\n # we've shuffled the dictionary in place!\n return recon\n\n\ndef split_dataset_ttv(dataset, split):\n training_set = split_dataset(dataset, split, 0)\n validation_set = split_dataset(dataset, split, 1)\n test_set = split_dataset(dataset, split, 2)\n return training_set, test_set, validation_set\n\n\ndef split_dataset(dataset, split, value_to_split=0):\n split_df = dataset[dataset[split] == value_to_split]\n if len(split_df) == 0:\n return None\n return split_df.reset_index()\n\n\ndef collapse_rare_labels(labels, labels_limit):\n if labels_limit > 0:\n labels[labels >= labels_limit] = labels_limit\n return labels\n\n\ndef class_counts(dataset, labels_field):\n return np.bincount(dataset[labels_field].flatten()).tolist()\n\n\ndef text_feature_data_field(text_feature):\n return text_feature[PROC_COLUMN] + '_' + text_feature['level']\n\n\ndef load_from_file(file_name, field=None, dtype=int, ground_truth_split=2):\n \"\"\"Load experiment data from supported file formats.\n\n Experiment data can be test/train statistics, model predictions,\n probability, ground truth, ground truth metadata.\n :param file_name: Path to file to be loaded\n :param field: Target Prediction field.\n :param dtype:\n :param ground_truth_split: Ground truth split filter where 0 is train 1 is\n validation and 2 is test split. By default test split is used when loading\n ground truth from hdf5.\n :return: Experiment data as array\n \"\"\"\n if file_name.endswith('.hdf5') and field is not None:\n dataset = pd.read_hdf(file_name, key=HDF5_COLUMNS_KEY)\n column = dataset[field]\n array = column[dataset[SPLIT] == ground_truth_split].values # ground truth\n elif file_name.endswith('.npy'):\n array = np.load(file_name)\n elif file_name.endswith('.csv'):\n array = read_csv(file_name, header=None).values\n else:\n array = load_matrix(file_name, dtype)\n return array\n\n\ndef replace_file_extension(file_path, extension):\n \"\"\"\n Return a file path for a file with same name but different format.\n a.csv, json -> a.json\n a.csv, hdf5 -> a.hdf5\n :param file_path: original file path\n :param extension: file extension\n :return: file path with same name but different format\n \"\"\"\n if file_path is None:\n return None\n extension = extension.strip()\n if extension.startswith('.'):\n # Handle the case if the user calls with '.hdf5' instead of 'hdf5'\n extension = extension[1:]\n\n return os.path.splitext(file_path)[0] + '.' + extension\n\n\ndef file_exists_with_diff_extension(file_path, extension):\n return file_path is None or \\\n os.path.isfile(replace_file_extension(file_path, extension))\n\n\ndef add_sequence_feature_column(df, col_name, seq_length):\n \"\"\"\n Adds a new column to the dataframe computed from an existing column.\n Values in the new column are space-delimited strings composed of preceding\n values of the same column up to seq_length.\n For example values of the i-th row of the new column will be a\n space-delimited string of df[col_name][i-seq_length].\n :param df: input dataframe\n :param col_name: column name containing sequential data\n :param seq_length: length of an array of preceeding column values to use\n \"\"\"\n if col_name not in df.columns.values:\n logger.error('{} column does not exist'.format(col_name))\n return\n\n new_col_name = col_name + '_feature'\n if new_col_name in df.columns.values:\n logger.warning(\n '{} column already exists, values will be overridden'.format(\n new_col_name\n )\n )\n\n new_data = [None] * seq_length\n old_data = np.array(df[col_name])\n\n for i in range(seq_length, len(df)):\n new_data.append(' '.join(\n str(j) for j in old_data[i - seq_length: i]\n ))\n\n df[new_col_name] = new_data\n df[new_col_name] = df[new_col_name].fillna(method='backfill')\n\n\ndef override_in_memory_flag(input_features, override_value):\n num_overrides = 0\n for feature in input_features:\n if PREPROCESSING in feature:\n if 'in_memory' in feature[PREPROCESSING]:\n feature[PREPROCESSING]['in_memory'] = override_value\n num_overrides += 1\n return num_overrides\n\n\ndef normalize_numpy(obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return normalize_numpy(obj.tolist())\n elif isinstance(obj, list):\n return [normalize_numpy(v) for v in obj]\n else:\n return obj\n\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, (set, tuple)):\n return list(obj)\n elif isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return json.JSONEncoder.default(self, obj)\n\n\ndef generate_kfold_splits(data_df, num_folds, random_state):\n kf = KFold(n_splits=num_folds, shuffle=True, random_state=random_state)\n fold_num = 0\n for train_indices, test_indices in kf.split(data_df):\n fold_num += 1\n yield train_indices, test_indices, fold_num\n\n\ndef get_path_size(\n start_path,\n regex_accept=None,\n regex_reject=None\n):\n total_size = 0\n pattern_accept = re.compile(regex_accept) if regex_accept else None\n pattern_reject = re.compile(regex_reject) if regex_reject else None\n\n for dirpath, dirnames, filenames in os.walk(start_path):\n for filename in filenames:\n filepath = os.path.join(dirpath, filename)\n if not os.path.islink(filepath):\n accepted = True\n if pattern_accept:\n accepted = accepted and pattern_accept.match(filename)\n if pattern_reject:\n accepted = accepted and not pattern_reject.match(filename)\n if accepted:\n total_size += os.path.getsize(filepath)\n\n return total_size\n\n\ndef clear_data_cache():\n \"\"\"Clears any cached data objects (e.g., embeddings)\"\"\"\n load_glove.cache_clear()\n\n\ndef figure_data_format_dataset(dataset):\n if isinstance(dataset, pd.DataFrame):\n return pd.DataFrame\n elif isinstance(dataset, dict):\n return dict\n elif isinstance(dataset, str):\n dataset = dataset.lower()\n if dataset.endswith('.csv'):\n return 'csv'\n elif dataset.endswith('.tsv'):\n return 'tsv'\n elif dataset.endswith('.json'):\n return 'json'\n elif dataset.endswith('.jsonl'):\n return 'jsonl'\n elif (dataset.endswith('.xls') or dataset.endswith('.xlsx') or\n dataset.endswith('.xlsm') or dataset.endswith('.xlsb') or\n dataset.endswith('.odf') or dataset.endswith('.ods') or\n dataset.endswith('.odt')):\n return 'excel'\n elif dataset.endswith('.parquet'):\n return 'parquet'\n elif dataset.endswith('.pickle') or dataset.endswith('.p'):\n return 'pickle'\n elif dataset.endswith('.feather'):\n return 'feather'\n elif dataset.endswith('.fwf'):\n return 'fwf'\n elif dataset.endswith('.html'):\n return 'html'\n elif dataset.endswith('.orc'):\n return 'orc'\n elif dataset.endswith('.sas'):\n return 'sas'\n elif dataset.endswith('.spss'):\n return 'spss'\n elif dataset.endswith('.dta') or dataset.endswith('.stata'):\n return 'stata'\n elif dataset.endswith('.h5') or dataset.endswith('.hdf5'):\n return 'hdf5'\n else:\n raise ValueError(\n \"Dataset path string {} \"\n \"does not contain a valid extension\".format(dataset)\n )\n else:\n raise ValueError(\n \"Cannot figure out the format of dataset {}\".format(dataset)\n )\n\n\ndef figure_data_format(\n dataset=None, training_set=None, validation_set=None, test_set=None\n):\n if dataset is not None:\n data_format = figure_data_format_dataset(dataset)\n elif training_set is not None:\n data_formats = [figure_data_format_dataset(training_set)]\n if validation_set is not None:\n data_formats.append(figure_data_format_dataset(validation_set))\n if test_set is not None:\n data_formats.append(figure_data_format_dataset(test_set))\n data_formats_set = set(data_formats)\n if len(data_formats_set) > 1:\n error_message = \"Datasets have different formats. Training: \"\n error_message += str(data_formats[0])\n if validation_set:\n error_message = \", Validation: \"\n error_message += str(data_formats[1])\n if test_set:\n error_message = \", Test: \"\n error_message += str(data_formats[-1])\n raise ValueError(error_message)\n else:\n data_format = next(iter(data_formats_set))\n else:\n raise ValueError(\n \"At least one between dataset and training_set must be not None\"\n )\n return data_format\n\n\ndef is_model_dir(path: str) -> bool:\n hyperparameters_fn = os.path.join(path, MODEL_HYPERPARAMETERS_FILE_NAME)\n ts_metadata_fn = os.path.join(path, TRAIN_SET_METADATA_FILE_NAME)\n is_model_dir = False\n if (os.path.isdir(path)\n and os.path.isfile(hyperparameters_fn)\n and os.path.isfile(ts_metadata_fn)):\n weights_files_count = 0\n for file_name in os.listdir(path):\n if file_name.startswith(MODEL_WEIGHTS_FILE_NAME):\n weights_files_count += 1\n if weights_files_count >= 2:\n is_model_dir = True\n return is_model_dir\n\n\nexternal_data_reader_registry = {\n **{fmt: read_csv for fmt in CSV_FORMATS},\n **{fmt: read_tsv for fmt in TSV_FORMATS},\n **{fmt: read_json for fmt in JSON_FORMATS},\n **{fmt: read_jsonl for fmt in JSONL_FORMATS},\n **{fmt: read_excel for fmt in EXCEL_FORMATS},\n **{fmt: read_parquet for fmt in PARQUET_FORMATS},\n **{fmt: read_pickle for fmt in PICKLE_FORMATS},\n **{fmt: read_fwf for fmt in FWF_FORMATS},\n **{fmt: read_feather for fmt in FEATHER_FORMATS},\n **{fmt: read_html for fmt in HTML_FORMATS},\n **{fmt: read_orc for fmt in ORC_FORMATS},\n **{fmt: read_sas for fmt in SAS_FORMATS},\n **{fmt: read_spss for fmt in SPSS_FORMATS},\n **{fmt: read_stata for fmt in STATA_FORMATS}\n}\n",
"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2020 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\nimport pandas as pd\n\nfrom ludwig.data.dataset import Dataset\nfrom ludwig.data.dataframe.base import DataFrameEngine\nfrom ludwig.utils.data_utils import DATA_TRAIN_HDF5_FP\nfrom ludwig.utils.misc_utils import get_proc_features\n\n\nclass PandasEngine(DataFrameEngine):\n def empty_df_like(self, df):\n return pd.DataFrame(index=df.index)\n\n def parallelize(self, data):\n return data\n\n def persist(self, data):\n return data\n\n def compute(self, data):\n return data\n\n def from_pandas(self, df):\n return df\n\n def map_objects(self, series, map_fn):\n return series.map(map_fn)\n\n def reduce_objects(self, series, reduce_fn):\n return reduce_fn(series)\n\n def create_dataset(self, dataset, tag, config, training_set_metadata):\n return Dataset(\n dataset,\n get_proc_features(config),\n training_set_metadata.get(DATA_TRAIN_HDF5_FP)\n )\n\n @property\n def array_lib(self):\n return np\n\n @property\n def df_lib(self):\n return pd\n\n @property\n def use_hdf5_cache(self):\n return True\n\n\nPANDAS = PandasEngine()\n",
"# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport inspect\nimport logging\n\nfrom tensorflow.keras.layers import GRU, LSTM, Bidirectional, Layer, SimpleRNN\n\nfrom ludwig.utils.misc_utils import get_from_registry\n\nlogger = logging.getLogger(__name__)\n\nrnn_layers_registry = {\n 'rnn': SimpleRNN,\n 'gru': GRU,\n 'lstm': LSTM,\n}\n\n\nclass RecurrentStack(Layer):\n def __init__(\n self,\n state_size=256,\n cell_type='rnn',\n num_layers=1,\n bidirectional=False,\n activation='tanh',\n recurrent_activation='sigmoid',\n use_bias=True,\n unit_forget_bias=True,\n weights_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n weights_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n # kernel_constraint=kernel_constraint,\n # recurrent_constraint=recurrent_constraint,\n # bias_constraint=bias_constraint,\n dropout=0.0,\n recurrent_dropout=0.0,\n **kwargs\n ):\n super(RecurrentStack, self).__init__()\n self.supports_masking = True\n\n rnn_layer_class = get_from_registry(cell_type, rnn_layers_registry)\n self.layers = []\n\n rnn_params = {\n 'units': state_size,\n 'activation': activation,\n 'recurrent_activation': recurrent_activation,\n 'use_bias': use_bias,\n 'kernel_initializer': weights_initializer,\n 'recurrent_initializer': recurrent_initializer,\n 'bias_initializer': bias_initializer,\n 'unit_forget_bias': unit_forget_bias,\n 'kernel_regularizer': weights_regularizer,\n 'recurrent_regularizer': recurrent_regularizer,\n 'bias_regularizer': bias_regularizer,\n 'activity_regularizer': activity_regularizer,\n # 'kernel_constraint': weights_constraint,\n # 'recurrent_constraint': recurrent_constraint,\n # 'bias_constraint': bias_constraint,\n 'dropout': dropout,\n 'recurrent_dropout': recurrent_dropout,\n 'return_sequences': True,\n 'return_state': True,\n }\n signature = inspect.signature(rnn_layer_class.__init__)\n valid_args = set(signature.parameters.keys())\n rnn_params = {k: v for k, v in rnn_params.items() if k in valid_args}\n\n for _ in range(num_layers):\n layer = rnn_layer_class(**rnn_params)\n\n if bidirectional:\n layer = Bidirectional(layer)\n\n self.layers.append(layer)\n\n for layer in self.layers:\n logger.debug(' {}'.format(layer.name))\n\n def call(self, inputs, training=None, mask=None):\n hidden = inputs\n final_state = None\n for layer in self.layers:\n outputs = layer(hidden, training=training, mask=mask)\n hidden = outputs[0]\n final_state = outputs[1:]\n if final_state and len(final_state) == 1:\n final_state = final_state[0]\n return hidden, final_state\n"
] | [
[
"numpy.random.lognormal",
"numpy.allclose",
"numpy.random.seed",
"sklearn.model_selection.train_test_split",
"numpy.concatenate",
"numpy.random.normal",
"numpy.argmin",
"tensorflow.keras.backend.clear_session",
"numpy.array",
"tensorflow.random.set_seed",
"numpy.isclose"
],
[
"pandas.read_hdf",
"numpy.nditer",
"sklearn.model_selection.KFold",
"numpy.stack",
"numpy.random.uniform",
"pandas.DataFrame.from_dict",
"numpy.load",
"numpy.array",
"numpy.zeros"
],
[
"pandas.DataFrame"
],
[
"tensorflow.keras.layers.Bidirectional"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
ous8292/arviz | [
"3d788cc7157b764130ee6f84bb2f42021e5ab258",
"3d788cc7157b764130ee6f84bb2f42021e5ab258"
] | [
"arviz/plots/backends/bokeh/posteriorplot.py",
"arviz/plots/backends/bokeh/loopitplot.py"
] | [
"\"\"\"Bokeh Plot posterior densities.\"\"\"\nfrom numbers import Number\nfrom typing import Optional\n\nimport numpy as np\nfrom bokeh.models.annotations import Title\n\nfrom ....stats import hdi\nfrom ....stats.density_utils import get_bins, histogram\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import (\n _scale_fig_size,\n calculate_point_estimate,\n format_sig_figs,\n make_label,\n round_num,\n)\nfrom .. import show_layout\nfrom . import backend_kwarg_defaults, create_axes_grid\n\n\ndef plot_posterior(\n ax,\n length_plotters,\n rows,\n cols,\n figsize,\n plotters,\n bw,\n circular,\n bins,\n kind,\n point_estimate,\n round_to,\n hdi_prob,\n multimodal,\n skipna,\n textsize,\n ref_val,\n rope,\n kwargs,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh posterior plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(\n (\"dpi\", \"plot.bokeh.figure.dpi\"),\n ),\n **backend_kwargs,\n }\n\n (figsize, ax_labelsize, *_, linewidth, _) = _scale_fig_size(figsize, textsize, rows, cols)\n\n if ax is None:\n ax = create_axes_grid(\n length_plotters,\n rows,\n cols,\n figsize=figsize,\n backend_kwargs=backend_kwargs,\n )\n else:\n ax = np.atleast_2d(ax)\n idx = 0\n for (var_name, selection, x), ax_ in zip(\n plotters, (item for item in ax.flatten() if item is not None)\n ):\n _plot_posterior_op(\n idx,\n x.flatten(),\n var_name,\n selection,\n ax=ax_,\n bw=bw,\n circular=circular,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n hdi_prob=hdi_prob,\n multimodal=multimodal,\n skipna=skipna,\n linewidth=linewidth,\n ref_val=ref_val,\n rope=rope,\n ax_labelsize=ax_labelsize,\n **kwargs,\n )\n idx += 1\n _title = Title()\n _title.text = make_label(var_name, selection)\n ax_.title = _title\n\n show_layout(ax, show)\n\n return ax\n\n\ndef _plot_posterior_op(\n idx,\n values,\n var_name,\n selection,\n ax,\n bw,\n circular,\n linewidth,\n bins,\n kind,\n point_estimate,\n hdi_prob,\n multimodal,\n skipna,\n ref_val,\n rope,\n ax_labelsize,\n round_to: Optional[int] = None,\n **kwargs,\n): # noqa: D202\n \"\"\"Artist to draw posterior.\"\"\"\n\n def format_as_percent(x, round_to=0):\n return \"{0:.{1:d}f}%\".format(100 * x, round_to)\n\n def display_ref_val(max_data):\n if ref_val is None:\n return\n elif isinstance(ref_val, dict):\n val = None\n for sel in ref_val.get(var_name, []):\n if all(\n k in selection and selection[k] == v for k, v in sel.items() if k != \"ref_val\"\n ):\n val = sel[\"ref_val\"]\n break\n if val is None:\n return\n elif isinstance(ref_val, list):\n val = ref_val[idx]\n elif isinstance(ref_val, Number):\n val = ref_val\n else:\n raise ValueError(\n \"Argument `ref_val` must be None, a constant, a list or a \"\n 'dictionary like {\"var_name\": [{\"ref_val\": ref_val}]}'\n )\n less_than_ref_probability = (values < val).mean()\n greater_than_ref_probability = (values >= val).mean()\n ref_in_posterior = \"{} <{:g}< {}\".format(\n format_as_percent(less_than_ref_probability, 1),\n val,\n format_as_percent(greater_than_ref_probability, 1),\n )\n ax.line([val, val], [0, 0.8 * max_data], line_color=\"blue\", line_alpha=0.65)\n\n ax.text(x=[values.mean()], y=[max_data * 0.6], text=[ref_in_posterior], text_align=\"center\")\n\n def display_rope(max_data):\n if rope is None:\n return\n elif isinstance(rope, dict):\n vals = None\n for sel in rope.get(var_name, []):\n # pylint: disable=line-too-long\n if all(k in selection and selection[k] == v for k, v in sel.items() if k != \"rope\"):\n vals = sel[\"rope\"]\n break\n if vals is None:\n return\n elif len(rope) == 2:\n vals = rope\n else:\n raise ValueError(\n \"Argument `rope` must be None, a dictionary like\"\n '{\"var_name\": {\"rope\": (lo, hi)}}, or an'\n \"iterable of length 2\"\n )\n rope_text = [f\"{val:.{format_sig_figs(val, round_to)}g}\" for val in vals]\n\n ax.line(\n vals,\n (max_data * 0.02, max_data * 0.02),\n line_width=linewidth * 5,\n line_color=\"red\",\n line_alpha=0.7,\n )\n\n text_props = dict(\n text_font_size=\"{}pt\".format(ax_labelsize), text_color=\"black\", text_align=\"center\"\n )\n\n ax.text(x=vals, y=[max_data * 0.2, max_data * 0.2], text=rope_text, **text_props)\n\n def display_point_estimate(max_data):\n if not point_estimate:\n return\n point_value = calculate_point_estimate(point_estimate, values, bw, circular)\n sig_figs = format_sig_figs(point_value, round_to)\n point_text = \"{point_estimate}={point_value:.{sig_figs}g}\".format(\n point_estimate=point_estimate, point_value=point_value, sig_figs=sig_figs\n )\n\n ax.text(x=[point_value], y=[max_data * 0.8], text=[point_text], text_align=\"center\")\n\n def display_hdi(max_data):\n # np.ndarray with 2 entries, min and max\n # pylint: disable=line-too-long\n hdi_probs = hdi(\n values, hdi_prob=hdi_prob, circular=circular, multimodal=multimodal, skipna=skipna\n ) # type: np.ndarray\n\n for hdi_i in np.atleast_2d(hdi_probs):\n ax.line(\n hdi_i,\n (max_data * 0.02, max_data * 0.02),\n line_width=linewidth * 2,\n line_color=\"black\",\n )\n\n ax.text(\n x=list(hdi_i) + [(hdi_i[0] + hdi_i[1]) / 2],\n y=[max_data * 0.07, max_data * 0.07, max_data * 0.3],\n text=list(map(str, map(lambda x: round_num(x, round_to), hdi_i)))\n + [format_as_percent(hdi_prob) + \" HDI\"],\n text_align=\"center\",\n )\n\n def format_axes():\n ax.yaxis.visible = False\n ax.yaxis.major_tick_line_color = None\n ax.yaxis.minor_tick_line_color = None\n ax.yaxis.major_label_text_font_size = \"0pt\"\n ax.xgrid.grid_line_color = None\n ax.ygrid.grid_line_color = None\n\n if skipna:\n values = values[~np.isnan(values)]\n\n if kind == \"kde\" and values.dtype.kind == \"f\":\n kwargs.setdefault(\"line_width\", linewidth)\n plot_kde(\n values,\n bw=bw,\n circular=circular,\n fill_kwargs={\"fill_alpha\": kwargs.pop(\"fill_alpha\", 0)},\n plot_kwargs=kwargs,\n ax=ax,\n rug=False,\n backend=\"bokeh\",\n backend_kwargs={},\n show=False,\n )\n _, hist, edges = histogram(values, bins=\"auto\")\n else:\n if bins is None:\n if values.dtype.kind == \"i\":\n bins = get_bins(values)\n else:\n bins = \"auto\"\n kwargs.setdefault(\"align\", \"left\")\n kwargs.setdefault(\"color\", \"blue\")\n _, hist, edges = histogram(values, bins=bins)\n ax.quad(\n top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_alpha=0.35, line_alpha=0.35\n )\n\n format_axes()\n max_data = hist.max()\n if hdi_prob != \"hide\":\n display_hdi(max_data)\n display_point_estimate(max_data)\n display_ref_val(max_data)\n display_rope(max_data)\n",
"\"\"\"Bokeh loopitplot.\"\"\"\nimport numpy as np\nfrom bokeh.models import BoxAnnotation\nfrom matplotlib.colors import hsv_to_rgb, rgb_to_hsv, to_hex, to_rgb\nfrom xarray import DataArray\n\nfrom ....stats.density_utils import kde\nfrom ...plot_utils import _scale_fig_size\nfrom .. import show_layout\nfrom . import backend_kwarg_defaults, create_axes_grid\n\n\ndef plot_loo_pit(\n ax,\n figsize,\n ecdf,\n loo_pit,\n loo_pit_ecdf,\n unif_ecdf,\n p975,\n p025,\n fill_kwargs,\n ecdf_fill,\n use_hdi,\n x_vals,\n hdi_kwargs,\n hdi_odds,\n n_unif,\n unif,\n plot_unif_kwargs,\n loo_pit_kde,\n legend, # pylint: disable=unused-argument\n y_hat,\n y,\n color,\n textsize,\n hdi_prob,\n plot_kwargs,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh loo pit plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(),\n **backend_kwargs,\n }\n\n (figsize, *_, linewidth, _) = _scale_fig_size(figsize, textsize, 1, 1)\n\n if ax is None:\n backend_kwargs.setdefault(\"x_range\", (0, 1))\n ax = create_axes_grid(\n 1,\n figsize=figsize,\n squeeze=True,\n backend_kwargs=backend_kwargs,\n )\n\n plot_kwargs = {} if plot_kwargs is None else plot_kwargs\n plot_kwargs.setdefault(\"color\", to_hex(color))\n plot_kwargs.setdefault(\"linewidth\", linewidth * 1.4)\n if isinstance(y, str):\n label = (\"{} LOO-PIT ECDF\" if ecdf else \"{} LOO-PIT\").format(y)\n elif isinstance(y, DataArray) and y.name is not None:\n label = (\"{} LOO-PIT ECDF\" if ecdf else \"{} LOO-PIT\").format(y.name)\n elif isinstance(y_hat, str):\n label = (\"{} LOO-PIT ECDF\" if ecdf else \"{} LOO-PIT\").format(y_hat)\n elif isinstance(y_hat, DataArray) and y_hat.name is not None:\n label = (\"{} LOO-PIT ECDF\" if ecdf else \"{} LOO-PIT\").format(y_hat.name)\n else:\n label = \"LOO-PIT ECDF\" if ecdf else \"LOO-PIT\"\n\n plot_kwargs.setdefault(\"legend_label\", label)\n\n plot_unif_kwargs = {} if plot_unif_kwargs is None else plot_unif_kwargs\n light_color = rgb_to_hsv(to_rgb(plot_kwargs.get(\"color\")))\n light_color[1] /= 2 # pylint: disable=unsupported-assignment-operation\n light_color[2] += (1 - light_color[2]) / 2 # pylint: disable=unsupported-assignment-operation\n plot_unif_kwargs.setdefault(\"color\", to_hex(hsv_to_rgb(light_color)))\n plot_unif_kwargs.setdefault(\"alpha\", 0.5)\n plot_unif_kwargs.setdefault(\"linewidth\", 0.6 * linewidth)\n\n if ecdf:\n n_data_points = loo_pit.size\n plot_kwargs.setdefault(\"drawstyle\", \"steps-mid\" if n_data_points < 100 else \"default\")\n plot_unif_kwargs.setdefault(\"drawstyle\", \"steps-mid\" if n_data_points < 100 else \"default\")\n\n if ecdf_fill:\n if fill_kwargs is None:\n fill_kwargs = {}\n fill_kwargs.setdefault(\"color\", to_hex(hsv_to_rgb(light_color)))\n fill_kwargs.setdefault(\"alpha\", 0.5)\n fill_kwargs.setdefault(\n \"step\", \"mid\" if plot_kwargs[\"drawstyle\"] == \"steps-mid\" else None\n )\n fill_kwargs.setdefault(\"legend_label\", \"{:.3g}% credible interval\".format(hdi_prob))\n elif use_hdi:\n if hdi_kwargs is None:\n hdi_kwargs = {}\n hdi_kwargs.setdefault(\"color\", to_hex(hsv_to_rgb(light_color)))\n hdi_kwargs.setdefault(\"alpha\", 0.35)\n\n if ecdf:\n if plot_kwargs.get(\"drawstyle\") == \"steps-mid\":\n ax.step(\n np.hstack((0, loo_pit, 1)),\n np.hstack((0, loo_pit - loo_pit_ecdf, 0)),\n line_color=plot_kwargs.get(\"color\", \"black\"),\n line_alpha=plot_kwargs.get(\"alpha\", 1.0),\n line_width=plot_kwargs.get(\"linewidth\", 3.0),\n mode=\"center\",\n )\n else:\n ax.line(\n np.hstack((0, loo_pit, 1)),\n np.hstack((0, loo_pit - loo_pit_ecdf, 0)),\n line_color=plot_kwargs.get(\"color\", \"black\"),\n line_alpha=plot_kwargs.get(\"alpha\", 1.0),\n line_width=plot_kwargs.get(\"linewidth\", 3.0),\n )\n\n if ecdf_fill:\n if fill_kwargs.get(\"drawstyle\") == \"steps-mid\":\n # use step patch when you find out how to do that\n ax.patch(\n np.concatenate((unif_ecdf, unif_ecdf[::-1])),\n np.concatenate((p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),\n fill_color=fill_kwargs.get(\"color\"),\n fill_alpha=fill_kwargs.get(\"alpha\", 1.0),\n )\n else:\n ax.patch(\n np.concatenate((unif_ecdf, unif_ecdf[::-1])),\n np.concatenate((p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),\n fill_color=fill_kwargs.get(\"color\"),\n fill_alpha=fill_kwargs.get(\"alpha\", 1.0),\n )\n else:\n if fill_kwargs is not None and fill_kwargs.get(\"drawstyle\") == \"steps-mid\":\n ax.step(\n unif_ecdf,\n p975 - unif_ecdf,\n line_color=plot_unif_kwargs.get(\"color\", \"black\"),\n line_alpha=plot_unif_kwargs.get(\"alpha\", 1.0),\n line_width=plot_kwargs.get(\"linewidth\", 1.0),\n mode=\"center\",\n )\n ax.step(\n unif_ecdf,\n p025 - unif_ecdf,\n line_color=plot_unif_kwargs.get(\"color\", \"black\"),\n line_alpha=plot_unif_kwargs.get(\"alpha\", 1.0),\n line_width=plot_unif_kwargs.get(\"linewidth\", 1.0),\n mode=\"center\",\n )\n else:\n ax.line(\n unif_ecdf,\n p975 - unif_ecdf,\n line_color=plot_unif_kwargs.get(\"color\", \"black\"),\n line_alpha=plot_unif_kwargs.get(\"alpha\", 1.0),\n line_width=plot_unif_kwargs.get(\"linewidth\", 1.0),\n )\n ax.line(\n unif_ecdf,\n p025 - unif_ecdf,\n line_color=plot_unif_kwargs.get(\"color\", \"black\"),\n line_alpha=plot_unif_kwargs.get(\"alpha\", 1.0),\n line_width=plot_unif_kwargs.get(\"linewidth\", 1.0),\n )\n else:\n if use_hdi:\n patch = BoxAnnotation(\n bottom=hdi_odds[1],\n top=hdi_odds[0],\n fill_alpha=hdi_kwargs.pop(\"alpha\"),\n fill_color=hdi_kwargs.pop(\"color\"),\n **hdi_kwargs\n )\n patch.level = \"underlay\"\n ax.add_layout(patch)\n\n # Adds horizontal reference line\n ax.line([0, 1], [1, 1], line_color=\"white\", line_width=1.5)\n else:\n for idx in range(n_unif):\n x_s, unif_density = kde(unif[idx, :])\n ax.line(\n x_s,\n unif_density,\n line_color=plot_unif_kwargs.get(\"color\", \"black\"),\n line_alpha=plot_unif_kwargs.get(\"alpha\", 0.1),\n line_width=plot_unif_kwargs.get(\"linewidth\", 1.0),\n )\n ax.line(\n x_vals,\n loo_pit_kde,\n line_color=plot_kwargs.get(\"color\", \"black\"),\n line_alpha=plot_kwargs.get(\"alpha\", 1.0),\n line_width=plot_kwargs.get(\"linewidth\", 3.0),\n )\n\n # Sets xlim(0, 1)\n ax.line(0, 0)\n ax.line(1, 0)\n show_layout(ax, show)\n\n return ax\n"
] | [
[
"numpy.isnan",
"numpy.atleast_2d"
],
[
"matplotlib.colors.hsv_to_rgb",
"numpy.hstack",
"numpy.concatenate",
"matplotlib.colors.to_hex"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ERhamat/opendrr-data-store | [
"34a737e8636707f85191e2f97a4ae78e8469e317"
] | [
"scripts/combines_all_csvs.py"
] | [
"#script found online to combine all csvs into one\nimport os\nimport glob\nimport pandas as pd\n#directory link\nos.chdir(\"C:/Workspace/eRisk_CA/PSRA_sample_data/baseline/c-damage\")\nextension = 'csv'\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n#combine all files in the list\ncombined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])\n#export to csv\ncombined_csv.to_csv( \"damages-structural-mean_merge_baseline.csv\", index=False, encoding='utf-8-sig')"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
yweweler/ctc-asr | [
"4b24c658b43a28a4f939c95041953ad7a283ff1b"
] | [
"python/dataset/sd_estimator.py"
] | [
"\"\"\"\nCalculate mean and standard deviation for a given training txt file.\n\"\"\"\n\nimport os\nimport sys\nimport random\n\nfrom multiprocessing import Pool, Lock, cpu_count\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom python.load_sample import load_sample\nfrom python.params import BASE_PATH\n\n\n__DATASETS_PATH = os.path.join(BASE_PATH, '../datasets/speech_data')\n__FEATURE_TYPE = 'mel'\n\n\ndef calculate_dataset_stats(txt_path):\n \"\"\"Gather mean and standard deviation values. Averaged for every file in the\n training txt data file.\n\n Args:\n txt_path (str): Path to the `train.txt`.\n\n Returns:\n Nothing.\n \"\"\"\n # Read train.txt file.\n with open(txt_path, 'r') as f:\n lines = f.readlines()\n random.shuffle(lines)\n lines = lines[: int(2.0e5)] # To fit in RAM and not crash Numpy.\n\n # Setup thread pool.\n lock = Lock()\n features = [] # Output buffer.\n\n with Pool(processes=cpu_count()) as pool:\n for feature in tqdm(\n pool.imap_unordered(__stat_calculator, lines, chunksize=4),\n desc='Reading audio samples', total=len(lines), file=sys.stdout,\n unit='samples', dynamic_ncols=True):\n lock.acquire()\n features.append(feature)\n lock.release()\n\n # Reduce the [num_samples, time, num_features] to [total_time, num_features] array.\n features = np.concatenate(features)\n\n print('mean = {}'.format(np.mean(features)))\n print('std = {}'.format(np.std(features)))\n print()\n\n means = np.mean(features, axis=0)\n print('__global_mean = [' + ', '.join(map(str, means)) + ']')\n stds = np.std(features, axis=0)\n print('__global_std = [' + ', '.join(map(str, stds)) + ']')\n\n\ndef __stat_calculator(line):\n # Python multiprocessing helper method.\n wav_path, _ = line.split(' ', 1)\n wav_path = os.path.join(__DATASETS_PATH, wav_path)\n\n feature, _ = load_sample(wav_path, feature_type=__FEATURE_TYPE, feature_normalization='none')\n assert len(feature) > 1, 'Empty feature: {}'.format(wav_path)\n\n return feature\n\n\nif __name__ == '__main__':\n # Path to `train.txt` file.\n _test_txt_path = os.path.join(BASE_PATH, 'data', 'train.txt')\n\n # Display dataset stats.\n calculate_dataset_stats(_test_txt_path)\n"
] | [
[
"numpy.concatenate",
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
changhiskhan/virtual-background | [
"0002d85b0a329611926077633163b45e6668f673"
] | [
"fakecam/fake.py"
] | [
"import os\nimport cv2\nimport numpy as np\nimport requests\nimport pyfakewebcam\nimport traceback\nimport time\n\ndef get_mask(frame, bodypix_url=os.environ.get(\"BODYPIX_URL\",\"http://bodypix:9000\")):\n _, data = cv2.imencode(\".jpg\", frame)\n r = requests.post(\n url=bodypix_url,\n data=data.tobytes(),\n headers={'Content-Type': 'application/octet-stream'})\n mask = np.frombuffer(r.content, dtype=np.uint8)\n mask = mask.reshape((frame.shape[0], frame.shape[1]))\n return mask\n\ndef post_process_mask(mask):\n mask = cv2.dilate(mask, np.ones((10,10), np.uint8) , iterations=1)\n mask = cv2.blur(mask.astype(float), (10,10))\n return mask\n\ndef shift_image(img, dx, dy):\n img = np.roll(img, dy, axis=0)\n img = np.roll(img, dx, axis=1)\n if dy>0:\n img[:dy, :] = 0\n elif dy<0:\n img[dy:, :] = 0\n if dx>0:\n img[:, :dx] = 0\n elif dx<0:\n img[:, dx:] = 0\n return img\n\ndef hologram_effect(img):\n # add a blue tint\n holo = cv2.applyColorMap(img, cv2.COLORMAP_WINTER)\n # add a halftone effect\n bandLength, bandGap = 2, 3\n for y in range(holo.shape[0]):\n if y % (bandLength+bandGap) < bandLength:\n holo[y,:,:] = holo[y,:,:] * np.random.uniform(0.1, 0.3)\n # add some ghosting\n holo_blur = cv2.addWeighted(holo, 0.2, shift_image(holo.copy(), 5, 5), 0.8, 0)\n holo_blur = cv2.addWeighted(holo_blur, 0.4, shift_image(holo.copy(), -5, -5), 0.6, 0)\n # combine with the original color, oversaturated\n out = cv2.addWeighted(img, 0.5, holo_blur, 0.6, 0)\n return out\n\ndef get_frame(cap, background_scaled, speed=True, effect=None):\n _, frame = cap.read()\n # fetch the mask with retries (the app needs to warmup and we're lazy)\n # e v e n t u a l l y c o n s i s t e n t\n mask = None\n while mask is None:\n try:\n\n if speed:\n shrinked_frame = cv2.resize(frame, (width//2, height//2)) \n shrinked_mask = get_mask(shrinked_frame)\n mask = cv2.resize(shrinked_mask, (width, height))\n else:\n mask = get_mask(frame)\n\n except requests.RequestException:\n print(\"mask request failed, retrying\")\n traceback.print_exc()\n time.sleep(5)\n \n # post-process mask and frame\n mask = post_process_mask(mask)\n if effect is not None: \n effect_fun = globals()[effect + '_effect']\n frame = effect_fun(frame)\n\n # composite the foreground and background\n inv_mask = 1-mask\n for c in range(frame.shape[2]): \n frame[:,:,c] = frame[:,:,c]*mask + background_scaled[:,:,c]*inv_mask\n\n\n return frame\n\n\nif __name__ == '__main__':\n\n actual_device = os.environ.get('ACTUAL_CAMERA','/dev/video0')\n fake_device = os.environ.get('FAKE_CAMERA','/dev/video20')\n width = int(os.environ.get('CAMERA_WIDTH',640))\n height = int(os.environ.get('CAMERA_HEIGHT',360))\n cam_fps = int(os.environ.get('CAMERA_FPS',24))\n is_background_video = os.environ.get('IS_VID_BACKGROUND', 'false') == 'true'\n background_file_path = os.environ.get('BACKGROUND_FILE', '/data/background.jpg')\n effect = os.environ.get('EFFECT', None)\n\n # setup access to the *real* webcam\n cap = cv2.VideoCapture(actual_device)\n \n cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n cap.set(cv2.CAP_PROP_FPS, cam_fps)\n\n # setup the fake camera\n fake = pyfakewebcam.FakeWebcam(fake_device, width, height)\n\n # load the virtual background\n _background_scaled = {}\n if is_background_video:\n def get_background_scaled(width, height):\n if int(time.time()) % 10 == 0 or len(_background_scaled) == 0:\n updated_background_file_size = os.stat(background_file_path).st_size\n if updated_background_file_size != _background_scaled.get('size', None):\n if 'cap' in _background_scaled:\n _background_scaled['cap'].release()\n _background_scaled['cap'] = cv2.VideoCapture(background_file_path)\n _background_scaled['size'] = updated_background_file_size\n background_cap = _background_scaled['cap']\n success, frame = background_cap.read()\n if success:\n return cv2.resize(frame, (width, height))\n background_cap.set(cv2.CAP_PROP_POS_FRAMES, 1)\n return get_background_scaled(width, height)\n else:\n def get_background_scaled(width, height):\n if int(time.time()) % 10 == 0 or len(_background_scaled) == 0:\n updated_background_file_size = os.stat(background_file_path).st_size\n if updated_background_file_size != _background_scaled.get('size', None):\n background = cv2.imread(background_file_path)\n _background_scaled['frame'] = cv2.resize(background,(width, height))\n _background_scaled['size'] = updated_background_file_size\n return _background_scaled['frame']\n\n while True:\n frame = get_frame(cap, get_background_scaled(width, height), effect=effect)\n # fake webcam expects RGB\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n fake.schedule_frame(frame)\n"
] | [
[
"numpy.frombuffer",
"numpy.roll",
"numpy.random.uniform",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AloBer03/MA_Alo-sBerger | [
"93e72f7940a3ea8bab3c72e00c92091c01dc5324"
] | [
"NNFS/nnma.py"
] | [
"## nnma\r\n\r\n## code from NNFS\r\n## My own comments are marked with ##\r\n## My own code start with ##-- and ends with --##\r\n\r\n## Makig a file with only the classes\r\n## This will enable to import nnma and not copy all the function into the new file\r\n\r\nimport matplotlib.gridspec as gridspec\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as cl\r\nimport copy\r\nimport pickle\r\nimport numpy as np\r\nimport nnfs\r\nimport os\r\nimport cv2\r\n\r\n\r\nnnfs.init()\r\n\r\n# Dense Layer\r\nclass Layer_Dense:\r\n\r\n\t# Layer initialization\r\n\tdef __init__(self, n_inputs, n_neuron,\r\n\t\t\t\t weight_regularizer_l1=0, weight_regularizer_l2=0,\r\n\t\t\t\t bias_regularizer_l1=0, bias_regularizer_l2=0):\r\n\t\t# Initialize weights and bias\r\n\t\tself.weights = 0.01 * np.random.randn(n_inputs, n_neuron)\r\n\t\tself.biases = np.zeros((1, n_neuron)) \r\n\t\t# Set regularization strength\r\n\t\tself.weight_regularizer_l1 = weight_regularizer_l1\r\n\t\tself.weight_regularizer_l2 = weight_regularizer_l2\r\n\t\tself.bias_regularizer_l1 = bias_regularizer_l1\r\n\t\tself.bias_regularizer_l2 = bias_regularizer_l2\r\n\t\t# Store stats\r\n\t\tself.stat = 'Layer_Dense: '+str(n_inputs)+', '+str(n_neuron)\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\t# Remember input values\r\n\t\tself.inputs = inputs\r\n\t\t# Calculate output through input, weights, bias\r\n\t\tself.output = np.dot(inputs, self.weights) + self.biases\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# Gradient on parameters\r\n\t\tself.dweights = np.dot(self.inputs.T, dvalues)\r\n\t\tself.dbiases = np.sum(dvalues, axis=0, keepdims=True)\r\n\t\t\r\n\t\t# Gradient on regularization\r\n\t\t# L1 on weights\r\n\t\tif self.weight_regularizer_l1 > 0:\r\n\t\t\tdL1 = np.ones_like(self.weights)\r\n\t\t\tdL1[self.weights < 0] = -1\r\n\t\t\tself.dweights += self.weight_regularizer_l1 * dL1\r\n\t\t# L2 on weights\r\n\t\tif self.weight_regularizer_l2 > 0:\r\n\t\t\tself.dweights += 2 * self.weight_regularizer_l2 * self.weights\r\n\r\n\t\t# L1 on biases\r\n\t\tif self.bias_regularizer_l1 > 0:\r\n\t\t\tdL1 = np.ones_like(self.biases)\r\n\t\t\tdL1[self.biases < 0] = -1\r\n\t\t\tself.dbiases += self.bias_regularizer_l1 * dL1\r\n\t\t# L2 on biases\r\n\t\tif self.bias_regularizer_l2 > 0:\r\n\t\t\tself.dbiases += 2 * self.bias_regularizer_l2 * self.biases\r\n\r\n\t\t# Gradient on values\r\n\t\tself.dinputs = np.dot(dvalues, self.weights.T)\r\n\r\n\t# Retrieve layer parameters\r\n\tdef get_parameters(self):\r\n\t\treturn self.weights, self.biases\r\n\r\n\t# Set weights and biases in a layer instance\r\n\tdef set_parameters(self, weights, biases):\r\n\t\tself.weights = weights\r\n\t\tself.biases = biases\r\n\r\n# Dropout\r\nclass Layer_Dropout:\r\n\r\n\t# Init\r\n\tdef __init__(self, rate):\r\n\t\t# Store rate, we invert it as for example for dropout of 0.1 we need success rate of 0.9\r\n\t\tself.rate = 1 - rate\r\n\t\t# Store stats\r\n\t\tself.stat = \"Layer_Dropout: rate:\"+str(rate)\r\n\r\n\t# Forward pass \r\n\tdef forward(self, inputs, training):\r\n\r\n\t\t# Save input values\r\n\t\tself.inputs = inputs\r\n\r\n\t\t# If not in the training mode - return values\r\n\t\tif not training:\r\n\t\t\tself.output = inputs.copy()\r\n\t\t\treturn\r\n\r\n\t\t# Generate and save scaled mask\r\n\t\tself.binary_mask = np.random.binomial(1, self.rate, size=inputs.shape) / self.rate\r\n\t\t# Apply mask to output values\r\n\t\tself.output = inputs * self.binary_mask\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# Gradient on values\r\n\t\tself.dinputs = dvalues * self.binary_mask\r\n\r\n# Input \"layer\"\r\nclass Layer_Input:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\tself.output = inputs\r\n\r\nclass Activation_ReLU:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\t# Remember the inputs values\r\n\t\tself.inputs = inputs\r\n\t\t# Calculate ouput value from inputs\r\n\t\tself.output = np.maximum(0, inputs)\r\n\t\t# Store stats\r\n\t\tself.stat = \"Activation_ReLU\"\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# Since we need to modify the originaal variable, let's make a copy of the values first\r\n\t\tself.dinputs = dvalues.copy()\r\n\r\n\t\t# Zero gradient where input values were nagative\r\n\t\tself.dinputs[self.inputs <= 0] = 0\r\n\r\n\t# Calculate predictions for outputs\r\n\tdef predictions(self, outputs):\r\n\t\treturn outputs\r\n\r\n# Softmax activation\r\nclass Activation_Softmax:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\r\n\t\t# Remember input values\r\n\t\tself.inputs = inputs\r\n\r\n\t\t# Get unnormalized probabilities\t\t \r\n\t\texp_values = np.exp(inputs - np.max(inputs, axis=1, keepdims=True)) \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \r\n\t\t# Normalize them for each sample \r\n\t\tprobabilities = exp_values / np.sum(exp_values, axis=1, keepdims=True)\r\n\r\n\t\tself.output = probabilities\r\n\r\n\t\t# Store stats\r\n\t\tself.stat = 'Activation_Softmax'\r\n\r\n\t# Backwards pass\r\n\tdef backward(self, dvalues):\r\n\r\n\t\t# Create uninitialized array\r\n\t\tself.dinputs = np.empty_like(dvalues)\r\n\r\n\t\t# Enumerate outputs and gradients\r\n\t\tfor index, (single_output, single_dvalues) in enumerate(zip(self.output, dvalues)):\r\n\t\t\t# Flatten output array\r\n\t\t\tsingle_output = single_output.reshape(-1, 1)\r\n\t\t\t# Calculate Jacobian matrix of the output\r\n\t\t\tjacobian_matrix = np.diagflat(single_output) - np.dot(single_output, single_output.T)\r\n\t\t\t# Calculate sample-wise gradient and add it to the array of sample gradients\r\n\t\t\tself.dinputs[index] = np.dot(jacobian_matrix, single_dvalues)\r\n\r\n\t# Calculate predictions for outputs\r\n\tdef predictions(self, outputs):\r\n\t\treturn np.argmax(outputs, axis=1)\r\n\r\n\t# Return the confidences (Aloïs)\r\n\tdef confidencces(self, outputs):\r\n\t\treturn outputs\r\n\r\n# Sigmoid activation\r\nclass Activation_Sigmoid:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\t# Save input and calculate/save output of the sigmoid function\r\n\t\tself.inputs = inputs\r\n\t\tself.output = 1 / (1 + np.exp(-inputs))\r\n\t\t# Store stats\r\n\t\tself.stat = 'Activation_Sigmoid'\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# Derivative - calculates form output of the sigmoid function\r\n\t\tself.dinputs = dvalues * (1 - self.output) * self.output\r\n\r\n\t# Calculate predictions for outputs\r\n\tdef predictions(self, outputs):\r\n\t\treturn (outputs > 0.5) * 1\r\n\r\n# Linear activation\r\nclass Activation_Linear:\r\n\r\n\t# Forward pass\r\n\tdef forward(self, inputs, training):\r\n\t\t# Just remember values\r\n\t\tself.inputs = inputs\r\n\t\tself.output = inputs\r\n\t\t# Store stats\r\n\t\tself.stat = 'Activation_Linear'\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues):\r\n\t\t# The derivative is 1, 1 * dvalues = dvalues - the chain rule\r\n\t\tself.dinputs = dvalues.copy()\r\n\r\n\t# Calculate predictions for outputs\r\n\tdef predictions(self, outputs):\r\n\t\treturn outputs\r\n\r\n#SGD optimizer\r\nclass Optimizer_SGD:\r\n\r\n\t# Initialize optimizer - set settings,\r\n\t# learning rate of 1. is default for this optimizer\r\n\tdef __init__(self, learning_rate=1.0, decay=0.0, momentum=0.):\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.current_learning_rate = learning_rate\r\n\t\tself.decay = decay\r\n\t\tself.iterations = 0\r\n\t\tself.momentum = momentum\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Optimizer_SGD'\r\n\r\n\t# Call once before any parameter updates \r\n\tdef pre_update_params(self):\r\n\t\tif self.decay:\r\n\t\t\tself.current_learning_rate = self.learning_rate * (1. / (1. + self.decay * self.iterations))\r\n\r\n\t# Update parameters \r\n\tdef update_params(self, layer):\r\n\r\n\t\t# If we use momentum\r\n\t\tif self.momentum:\r\n\t\t\t# If layer does not contain momentum arrays, create them filled with zeros\r\n\t\t\tif not hasattr(layer, 'weight_momentums'):\r\n\t\t\t\tlayer.weight_momentums = np.zeros_like(layer.weights)\r\n\t\t\t\t# If there is no momentum array for weights\r\n\t\t\t\t# The array doesn't exist for biases yet either\r\n\t\t\t\tlayer.bias_momentums = np.zeros_like(layer.biases)\r\n\r\n\t\t\t# Build weight updates with momentum - takes previous updates multiplied by retain factor\r\n\t\t\t# and update with current gradients\r\n\t\t\tweight_updates = self.momentum *layer.weight_momentums - \\\r\n\t\t\t\t\t\t\t self.current_learning_rate * layer.dweights\r\n\t\t\tlayer.weight_momentums = weight_updates\r\n\r\n\t\t\t# Build bias updates\r\n\t\t\tbias_updates = self.momentum * layer.bias_momentums - self.current_learning_rate * layer.dbiases\r\n\t\t\tlayer.bias_momentums = bias_updates\r\n\r\n\t\t# Vannilla SGD updates (as before momentum update)\r\n\t\telse: \r\n\t\t\tweight_updates = -self.current_learning_rate * layer.dweights\r\n\t\t\tbias_updates = -self.current_learning_rate * layer.dbiases\r\n\r\n\t\t# Update weights and biases using either vanilla or momentum updates\r\n\t\tlayer.weights += weight_updates\r\n\t\tlayer.biases += bias_updates\r\n\r\n\t# Call once after any parameter updates \r\n\tdef post_update_params(self):\r\n\t\tself.iterations += 1\r\n\r\n# Adagrad optimizer\r\nclass Optimizer_Adagrad:\r\n\r\n\t# Initialize optimizer - set settings\r\n\tdef __init__(self, learning_rate=1., decay=0., epsilon=1e-7):\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.current_learning_rate = learning_rate\r\n\t\tself.decay = decay\r\n\t\tself.iterations = 0\r\n\t\tself.epsilon = epsilon\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Optimizer_Adagrad'\r\n\t\t\r\n\r\n\t# Call once before any parameter updates \r\n\tdef pre_update_params(self):\r\n\t\tif self.decay:\r\n\t\t\tself.current_learning_rate = self.learning_rate * (1. / (1. + self.decay * self.iterations))\r\n\r\n\t# Update parameters \r\n\tdef update_params(self, layer):\r\n\r\n\t\t# If layer does not contain cache arrays, create them filled with zeros\r\n\t\tif not hasattr(layer, 'weight_cache'):\r\n\t\t\tlayer.weight_cache = np.zeros_like(layer.weights)\r\n\t\t\tlayer.bias_cache = np.zeros_like(layer.biases)\r\n\r\n\t\t# Update cache with squared current gradients\r\n\t\tlayer.weight_cache += layer.dweights**2\r\n\t\tlayer.bias_cache += layer.dbiases**2\r\n\t\t\r\n\t\t# Vanilla SGD parameter update + normalization with quare rooted cache\r\n\t\tlayer.weights += -self.current_learning_rate * \\\r\n\t\t\t\t\t\t layer.dweights / \\\r\n\t\t\t\t\t\t (np.sqrt(layer.weight_cache) + self.epsilon)\r\n\t\tlayer.biases += -self.current_learning_rate * \\\r\n\t\t\t\t\t\tlayer.dbiases / \\\r\n\t\t\t\t\t\t(np.sqrt(layer.bias_cache) + self.epsilon)\r\n\r\n\t# Call once after any parameter updates\r\n\tdef post_update_params(self):\r\n\t\tself.iterations += 1\r\n\r\n# RMSprop optimizer\r\nclass Optimizer_RMSprop:\r\n\r\n\t# Initialize optimizer - set settings\r\n\tdef __init__(self, learning_rate=0.001, decay=0.0, epsilon=1e-7, rho=0.9):\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.current_learning_rate = learning_rate\r\n\t\tself.decay = decay\r\n\t\tself.iterations = 0\r\n\t\tself.epsilon = epsilon\r\n\t\tself.rho = rho\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Optimizer_RMSprop'\r\n\r\n\t# Call once before any parameter updates \r\n\tdef pre_update_params(self):\r\n\t\tif self.decay:\r\n\t\t\tself.current_learning_rate = self.learning_rate * (1. / (1. + self.decay *self.iterations))\r\n\r\n\t# Update parameters \r\n\tdef update_params(self, layer):\r\n\r\n\t\t# If layer does not contain cache arrays, create them filled with zeros\r\n\t\tif not hasattr(layer, 'weight_cache'):\r\n\t\t\tlayer.weight_cache = np.zeros_like(layer.weights)\r\n\t\t\tlayer.bias_cache = np.zeros_like(layer.biases)\r\n\r\n\t\t# Update cache with squared current gradients\r\n\t\tlayer.weight_cache = self.rho * layer.weight_cache + \\\r\n\t\t\t\t\t\t\t (1 - self.rho) * layer.dweights**2\r\n\t\tlayer.bias_cache = self.rho * layer.bias_cache + \\\r\n\t\t\t\t\t\t (1 - self.rho) * layer.dbiases**2\r\n\r\n\t\t# Vanilla SGD parameter update + normalization\r\n\t\t# with square rooted cache\r\n\t\tlayer.weights += -self.current_learning_rate * layer.dweights / \\\r\n\t\t\t\t\t\t (np.sqrt(layer.weight_cache) + self.epsilon)\r\n\t\tlayer.biases += -self.current_learning_rate * layer.dbiases / \\\r\n\t\t\t\t\t\t(np.sqrt(layer.bias_cache) + self.epsilon)\r\n\r\n\t# Call once after any parameter updates\r\n\tdef post_update_params(self):\r\n\t\tself.iterations += 1\r\n\r\n# Adam optimizer\r\nclass Optimizer_Adam:\r\n\r\n\t# Initialize optimizer - set settings\r\n\tdef __init__(self, learning_rate=0.001, decay=0.0, epsilon=1e-7, beta_1=0.9, beta_2 = 0.999):\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.current_learning_rate = learning_rate\r\n\t\tself.decay = decay\r\n\t\tself.iterations = 0\r\n\t\tself.epsilon = epsilon\r\n\t\tself.beta_1 = beta_1\r\n\t\tself.beta_2 = beta_2\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Optimizer_Adam'\r\n\r\n\t# Call once before any parameter updates \r\n\tdef pre_update_params(self):\r\n\t\tif self.decay:\r\n\t\t\tself.current_learning_rate = self.learning_rate * (1. / (1. + self.decay *self.iterations))\r\n\r\n\t# Update parameters \r\n\tdef update_params(self, layer):\r\n\r\n\t\t# If layer does not contain momentum and cache arrays, create them filled with zeros\r\n\t\tif not hasattr(layer, 'weight_cache'):\r\n\t\t\tlayer.weight_momentums = np.zeros_like(layer.weights)\r\n\t\t\tlayer.weight_cache = np.zeros_like(layer.weights)\r\n\t\t\tlayer.bias_momentums = np.zeros_like(layer.biases)\r\n\t\t\tlayer.bias_cache = np.zeros_like(layer.biases)\r\n\r\n\t\t# Update Momentum with with current gradients\r\n\t\tlayer.weight_momentums = self.beta_1 * layer.weight_momentums + \\\r\n\t\t\t\t\t\t\t\t\t(1 - self.beta_1) * layer.dweights\r\n\t\tlayer.bias_momentums = self.beta_1 * layer.bias_momentums + \\\r\n\t\t\t\t\t\t\t\t\t(1 - self.beta_1) * layer.dbiases\t\t\r\n\t\t# Get corrected momentum\r\n\t\t# self.iteration is 0 at first pass and we need to start with 1 here\r\n\t\tweight_momentums_corrected = layer.weight_momentums / \\\r\n\t\t\t\t\t\t\t\t\t\t(1 - self.beta_1 ** (self.iterations + 1))\r\n\t\tbias_momentums_corrected = layer.bias_momentums / \\\r\n\t\t\t\t\t\t\t\t\t\t(1 - self.beta_1 **(self.iterations + 1))\r\n\t\t# Update cache with squared current gradients\r\n\t\tlayer.weight_cache = self.beta_2 * layer.weight_cache + \\\r\n\t\t\t\t\t\t\t (1 - self.beta_2) * layer.dweights ** 2\r\n\t\tlayer.bias_cache = self.beta_2 * layer.bias_cache + \\\r\n\t\t\t\t\t\t (1 - self.beta_2) * layer.dbiases ** 2\r\n\t\t# Get corrected cache\r\n\t\tweight_cache_corrected = layer.weight_cache / (1 - self.beta_2 ** (self.iterations + 1))\r\n\t\tbias_cache_corrected = layer.bias_cache / (1 - self.beta_2 ** (self.iterations + 1))\r\n\r\n\t\t# Vanilla SGD parameter update + normalization with square rooted cache\r\n\t\tlayer.weights += -self.current_learning_rate * weight_momentums_corrected / \\\r\n\t\t\t\t\t\t (np.sqrt(weight_cache_corrected) + self.epsilon)\r\n\t\tlayer.biases += -self.current_learning_rate * bias_momentums_corrected / \\\r\n\t\t\t\t\t\t(np.sqrt(bias_cache_corrected) + self.epsilon)\r\n\r\n\t# Call once after any parameter updates \r\n\tdef post_update_params(self):\r\n\t\tself.iterations += 1\r\n\r\n# Common loss class \r\nclass Loss:\r\n\r\n\t# Regularization loss calculation\r\n\tdef regularization_loss(self):\r\n\r\n\t\t# 0 by default\r\n\t\tregularization_loss = 0\r\n\r\n\t\t# Calculate regularization loss\r\n\t\t# iterate all trainable layers\r\n\t\tfor layer in self.trainable_layers:\r\n\r\n\t\t\t# L1 regularization - weithgts\r\n\t\t\t# calculate only when factor greater than 0\r\n\t\t\tif layer.weight_regularizer_l1 > 0:\r\n\t\t\t\tregularization_loss += layer.weight_regularizer_l1 * np.sum(np.abs(layer.weights))\r\n\r\n\t\t\t# L2 regularization - weights\r\n\t\t\tif layer.weight_regularizer_l2 > 0:\r\n\t\t\t\tregularization_loss += layer.weight_regularizer_l2 * np.sum(layer.weights * layer.weights)\r\n\r\n\t\t\t# L1 regularization - biases\r\n\t\t\t# calculate only when factor is greater than 0\r\n\t\t\tif layer.bias_regularizer_l1 > 0:\r\n\t\t\t\tregularization_loss += layer.bias_regularizer_l1 * np.sum(np.abs(layer.biases))\r\n\r\n\t\t\t# L2 regularization - biases\r\n\t\t\tif layer.bias_regularizer_l2 > 0:\r\n\t\t\t\tregularization_loss += layer.bias_regularizer_l2 * np.sum(layer.biases * layer.biases)\r\n\r\n\t\treturn regularization_loss\r\n\r\n\t# Set/remember trainable layers\r\n\tdef remember_trainable_layers(self, trainable_layers):\r\n\t\tself.trainable_layers = trainable_layers\r\n\r\n\t# Calculates the data and regularization losses\r\n\t# given model output and ground truth values\r\n\tdef calculate(self, output, y, *, include_regularization=False):\r\n\r\n\t\t# Calculate sample losses\r\n\t\tsample_losses = self.forward(output, y)\r\n\r\n\t\t# Calculate mean loss\r\n\t\tdata_loss = np.mean(sample_losses)\r\n\r\n\t\t# Add accumulated sum of losses and sample count\r\n\t\tself.accumulated_sum += np.sum(sample_losses)\r\n\t\tself.accumulated_count += len(sample_losses)\r\n\r\n\t\t# If just data loss - return it\r\n\t\tif not include_regularization:\r\n\t\t\treturn data_loss\r\n\r\n\t\t# Return the data and regularization losses\r\n\t\treturn data_loss, self.regularization_loss()\r\n\r\n\t# Calculate accumulated loss\r\n\tdef calculate_accumulated(self, *, include_regularization=False):\r\n\r\n\t\t# Calculate mean loss\r\n\t\tdata_loss = self.accumulated_sum / self.accumulated_count\r\n\r\n\t\t# If just data loss - return it\r\n\t\tif not include_regularization:\r\n\t\t\treturn data_loss\r\n\r\n\t\t# return the data and regularization losses\r\n\t\treturn data_loss, self.regularization_loss()\r\n\r\n\t# Reset variables for accumulated loss\r\n\tdef new_pass(self):\r\n\t\tself.accumulated_sum = 0\r\n\t\tself.accumulated_count = 0\r\n\r\n# Cross-entropy loss\r\nclass Loss_CategoricalCrossentropy(Loss):\r\n\r\n\t# Forward pass\r\n\tdef forward(self, y_pred, y_true):\r\n\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Loss_CategoricalCrossentropy'\r\n\r\n\t\t# Number of samples in a batch\r\n\t\tsamples = len(y_pred)\r\n\r\n\t\t# Clip data to prevent division by 0\r\n\t\t# Clip both sides to not drag mean towards any value\r\n\t\ty_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)\r\n\r\n\t\t# Probabilities for target values - only if categorical labels \r\n\t\tif len(y_true.shape) == 1:\r\n\t\t\tcorrect_confidences = y_pred_clipped[range(samples), y_true]\r\n\r\n\t\t# Mask values - only for one-hot encoded labels \r\n\t\telif len(y_true.shape) == 2:\r\n\t\t\tcorrect_confidences = np.sum(y_pred_clipped * y_true, axis=1)\r\n\r\n\t\t# Losses\r\n\t\tnegative_log_likelihoods = -np.log(correct_confidences)\r\n\t\treturn negative_log_likelihoods\r\n\r\n\t# Backwards pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\t\t# Number of labels in ervery sample\r\n\t\t# We'll use the first sample to count them\r\n\t\tlabels = len(dvalues[0])\r\n\r\n\t\t# If labels are sparse, turn them into one-hot vector\r\n\t\tif len(y_true.shape) == 1:\r\n\t\t\ty_true = np.eye(labels)[y_true]\r\n\r\n\t\t# Calculate gradient\r\n\t\tself.dinputs = -y_true / dvalues\r\n\t\t# Normalize gradient\r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Softmax classifier - combined Softmax activation and cross-entropy loss for faster backward step\r\nclass Activation_Softmax_Loss_CategoricalCrossentropy():\r\n\r\n\t# Backwards pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\r\n\t\t# If labels are one-hot encoded turn them into discrete values\r\n\t\tif len(y_true.shape) == 2:\r\n\t\t\ty_true = np.argmax(y_true, axis=1)\r\n\r\n\t\t# Copy so we can sagely modify \r\n\t\tself.dinputs = dvalues.copy()\r\n\t\t# Calculate gradient\r\n\t\tself.dinputs[range(samples), y_true] -= 1\r\n\t\t# Normalize gradient\r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Binary cross-entropy loss\r\nclass Loss_BinaryCrossentropy(Loss):\r\n\t\r\n\t# Forward pass\r\n\tdef forward(self, y_pred, y_true):\r\n\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Loss_BinaryCrossentropy'\r\n\r\n\t\t# Clip data to prevent dicision by 0\r\n\t\t# Clip both sides to not drag mean towards any value\r\n\t\ty_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)\r\n\r\n\t\t# Calculate samle-wise loss\r\n\t\tsample_losses = -(y_true * np.log(y_pred_clipped) + (1 - y_true) * np.log(1 - y_pred_clipped))\r\n\t\tsample_losses = np.mean(sample_losses, axis= -1)\r\n\r\n\t\t# Return losses\r\n\t\treturn sample_losses\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\t\t# Number of outputs in every sample\r\n\t\t# We'll use the first sample to count them\r\n\t\toutputs = len(dvalues[0])\r\n\r\n\t\t# Clip data to prevent division by 0\r\n\t\t# Clip both sides to not drag mean towards any value\r\n\t\tclipped_dvalues = np.clip(dvalues, 1e-7, 1 - 1e-7)\r\n\r\n\t\t# Calculate gradient\r\n\t\tself.dinputs = -(y_true / clipped_dvalues - (1 - y_true) / (1 - clipped_dvalues)) / outputs\r\n\t\t# Normalize gradient\r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Mean Squarred Error loss\r\nclass Loss_MeanSquaredError(Loss):\r\n\t\r\n\t# Forward pass\r\n\tdef forward(self, y_pred, y_true):\r\n\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Loss_MeanSquaredError'\r\n\r\n\t\t# Calculate loss\r\n\t\tsample_losses = np.mean((y_true - y_pred)**2, axis=-1)\r\n\r\n\t\t# Return losses\r\n\t\treturn sample_losses\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\t\t# Number of outputs in every sample\r\n\t\t# We'll use the first sample to count them\r\n\t\toutputs = len(dvalues[0])\r\n\r\n\t\t# Gradient on values\r\n\t\tself.dinputs = -2 * (y_true - dvalues) / outputs\r\n\t\t# Normalize gradient\r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Mean Absolute Error loss\r\nclass Loss_MeanAbsoluteError(Loss):\r\n\t\r\n\t# Forward pass\r\n\tdef forward(self, y_pred, y_true):\r\n\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Loss_MeanAbsoluteError'\r\n\r\n\t\t# Calculate loss\r\n\t\tsample_losses = np.mean(np.abs(y_true - y_pred), axis=-1)\r\n\r\n\t\t# Return losses\r\n\t\treturn sample_losses\r\n\r\n\t# Backward pass\r\n\tdef backward(self, dvalues, y_true):\r\n\r\n\t\t# Number of samples\r\n\t\tsamples = len(dvalues)\r\n\t\t# Number of outputs in every sample\r\n\t\t# We'll use the first sample to count them\r\n\t\toutputs = len(dvalues[0])\r\n\r\n\t\t# Calculate gradient\r\n\t\tself.dinputs = np.sign(y_true - dvalues) / outputs\r\n\t\t# Normalize gradient \r\n\t\tself.dinputs = self.dinputs / samples\r\n\r\n# Common accuracy class\r\nclass Accuracy:\r\n\r\n\t# Calculate an accuracy\r\n\t# given predictions and ground truth values\r\n\tdef calculate(self, predictions, y):\r\n\r\n\t\t# Get comparison results\r\n\t\tcomparisons = self.compare(predictions, y)\r\n\r\n\t\t# Calculate an accuracy\r\n\t\taccuracy = np.mean(comparisons)\r\n\r\n\t\t# Add accumulated sum of matching values and sample count\r\n\t\tself.accumulated_sum += np.sum(comparisons)\r\n\t\tself.accumulated_count += len(comparisons)\r\n\r\n\t\t# Return accuracy\r\n\t\treturn accuracy\r\n\r\n\t# Calculate accumulated accuracy\r\n\tdef calculate_accumulated(self):\r\n\r\n\t\t# Calculate an accuracy\r\n\t\taccuracy = self.accumulated_sum / self.accumulated_count\r\n\r\n\t\t# Return the data and regularization losses\r\n\t\treturn accuracy\r\n\r\n\t# Reset variables for accumulated accuracy\r\n\tdef new_pass(self):\r\n\t\tself.accumulated_sum = 0\r\n\t\tself.accumulated_count = 0\r\n\r\n# Accuracy calculation for classification model\r\nclass Accuracy_Categorical(Accuracy):\r\n\r\n\tdef __init__(self, *, binary=False):\r\n\t\t# Binary model?\r\n\t\tself.binary = binary\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Accuracy_Categorical'\r\n\r\n\t# No initialization is needed\r\n\tdef init(self, y):\r\n\t\t# Needs to exist because it's called automatically\r\n\t\tpass\r\n\r\n\t# Compares predictions to the ground truth values\r\n\tdef compare(self, predictions, y):\r\n\t\tif not self.binary and len(y.shape) == 2:\r\n\t\t\ty = np.argmax(y, axis=1)\r\n\t\treturn predictions == y\r\n\r\n# Accuracy calculation for regression model\r\nclass Accuracy_Regression(Accuracy):\r\n\r\n\tdef __init__(self):\r\n\t\t# Create precision property\r\n\t\tself.precision = None\r\n\t\t# For stats remember name (Aloïs)\r\n\t\tself.stat = 'Accuracy_Regression'\r\n\r\n\t# Calculates precision value based on passed-in ground truth values\r\n\tdef init(self, y, reinit=False):\r\n\t\tif self.precision is None or reinit:\r\n\t\t\tself.precision = np.std(y) / 250\r\n\r\n\t# Compares predictions to the ground truth values\r\n\tdef compare(self, predictions, y):\r\n\t\treturn np.absolute(predictions - y) < self.precision\r\n\r\n# Model class\r\nclass Model:\r\n\r\n\tdef __init__(self):\r\n\t\t# Create a list of network objects\r\n\t\tself.layers = []\r\n\t\t# Softmax calssifier's output object\r\n\t\tself.softmax_classifier_output = None\r\n\r\n\t# Add objects to the model\r\n\tdef add(self, layer):\r\n\t\tself.layers.append(layer)\r\n\r\n\t# Set loss, optimizer and accuracy\r\n\tdef set(self, *, loss=None, optimizer=None, accuracy=None): \r\n\t\t\r\n\t\tif loss is not None:\r\n\t\t\tself.loss = loss\r\n\r\n\t\tif optimizer is not None:\r\n\t\t\tself.optimizer = optimizer\r\n\t\t\r\n\t\tif accuracy is not None:\r\n\t\t\tself.accuracy = accuracy\r\n\r\n\t# Finalize the model\r\n\tdef finalize(self):\r\n\r\n\t\t# Create and set the input layer\r\n\t\tself.input_layer = Layer_Input()\r\n\r\n\t\t# Count all the objects\r\n\t\tlayer_count = len(self.layers)\r\n\r\n\t\t# Initialize a list containing trainable layers:\r\n\t\tself.trainable_layers = []\r\n\r\n\t\t# Iterate the objects\r\n\t\tfor i in range(layer_count):\r\n\r\n\t\t\t# If it's the first layer\r\n\t\t\t# the previous layer object is the input layer\r\n\t\t\tif i==0:\r\n\t\t\t\tself.layers[i].prev = self.input_layer\r\n\t\t\t\tself.layers[i].next = self.layers[i+1]\r\n\r\n\t\t\t# All layers except for the first and the last\r\n\t\t\telif i < layer_count -1:\r\n\t\t\t\tself.layers[i].prev = self.layers[i-1]\r\n\t\t\t\tself.layers[i].next = self.layers[i+1]\r\n\r\n\t\t\t# The last layer - the next object is the loss\r\n\t\t\t# Also let's save aside the reference to the last object whose output is the model's output\r\n\t\t\telse:\r\n\t\t\t\tself.layers[i].prev = self.layers[i-1]\r\n\t\t\t\tself.layers[i].next = self.loss\r\n\t\t\t\tself.output_layer_activation = self.layers[i]\r\n\r\n\t\t\t# If layer contains an attribute called \"weights\", it's a trainable alyer - \r\n\t\t\t# add it to the list of trainable layers\r\n\t\t\t# We don't need to check for biases - checking for weights is enough\r\n\t\t\tif hasattr(self.layers[i], 'weights'):\r\n\t\t\t\tself.trainable_layers.append(self.layers[i])\r\n\r\n\t\t# Update loss object with trainable layers\r\n\t\tif self.loss is not None:\r\n\t\t\tself.loss.remember_trainable_layers(self.trainable_layers)\r\n\r\n\t\t# If output activation is Softmax and loss function is Categorical Cross-Entropy\r\n\t\t# create an object of combined activation and loss function containing\r\n\t\t# faster gradient calculation\r\n\t\tif isinstance(self.layers[-1], Activation_Softmax) and \\\r\n\t\t isinstance(self.loss, Loss_CategoricalCrossentropy):\r\n\t\t\t# Create an object of combined activation and loss functions\r\n\t\t\tself.softmax_classifier_output = \\\r\n\t\t\t\tActivation_Softmax_Loss_CategoricalCrossentropy()\r\n\r\n\t# Train the model\r\n\tdef train(self, X, y, *, epochs=1, batch_size=None, print_every=1, validation_data=None):\r\n\r\n\t\t# Initialize accuracy object\r\n\t\tself.accuracy.init(y)\r\n\r\n\t\t# Default value if batch size is not set\r\n\t\ttrain_steps = 1\r\n\r\n\t\t# If there is validation data passed, set default number of steps for validation as well\r\n\t\tif validation_data is not None:\r\n\t\t\tvalidation_steps = 1\r\n\r\n\t\t\t# For better readability\r\n\t\t\tX_val, y_val = validation_data\r\n\r\n\t\t# Calculate number of steps\r\n\t\tif batch_size is not None:\r\n\t\t\ttrain_steps = len(X) // batch_size\r\n\t\t\t# Dividing rounds down. If there are some remaining data, but not a full batch,\r\n\t\t\t# this won't include it. Add 1 to include this not full batch\r\n\t\t\tif train_steps * batch_size < len(X):\r\n\t\t\t\ttrain_steps += 1\r\n\r\n\t\t\tif validation_data is not None:\r\n\t\t\t\tvalidation_steps = len(X_val) // batch_size\r\n\t\t\t\t# Dividing rounds down. If there are some remaining data, but not a full batch,\r\n\t\t\t\t# this won't include it. Add 1 to include this not full batch\r\n\t\t\t\tif validation_steps * batch_size < len(X_val):\r\n\t\t\t\t\tvalidation_steps += 1\r\n\r\n\t\t# Main training loop\r\n\t\tfor epoch in range(1, epochs+1):\r\n\r\n\t\t\t# Prit epoch number\r\n\t\t\tprint(f'epoch: {epoch}')\r\n\r\n\t\t\t# Reset accumulated values in loss and accuracy objects\r\n\t\t\tself.loss.new_pass()\r\n\t\t\tself.accuracy.new_pass()\r\n\r\n\t\t\t# Iterate over steps\r\n\t\t\tfor step in range(train_steps):\r\n\r\n\t\t\t\t# If batch size is not set - train using one step and full dataset\r\n\t\t\t\tif batch_size is None:\r\n\t\t\t\t\tbatch_X = X\r\n\t\t\t\t\tbatch_y = y \r\n\r\n\t\t\t\t# Otherwise slice a batch\r\n\t\t\t\telse:\r\n\t\t\t\t\t\tbatch_X = X[step*batch_size:(step+1)*batch_size]\r\n\t\t\t\t\t\tbatch_y = y[step*batch_size:(step+1)*batch_size]\r\n\r\n\t\t\t\t# Perform the forward pass\r\n\t\t\t\toutput = self.forward(batch_X, training=True)\r\n\r\n\t\t\t\t# Calculate loss\r\n\t\t\t\tdata_loss, regularization_loss = \\\r\n\t\t\t\t\tself.loss.calculate(output, batch_y, include_regularization=True)\r\n\t\t\t\tloss = data_loss + regularization_loss\r\n\r\n\t\t\t\t# Get predictions and calculate an accuracy\r\n\t\t\t\tpredictions = self.output_layer_activation.predictions(output)\r\n\t\t\t\taccuracy = self.accuracy.calculate(predictions, batch_y)\r\n\t\t\t\t\r\n\t\t\t\t# Perform a backward pass\r\n\t\t\t\tself.backward(output, batch_y)\r\n\r\n\t\t\t\t# Optimize (update parameters)\r\n\t\t\t\tself.optimizer.pre_update_params()\r\n\t\t\t\tfor layer in self.trainable_layers:\r\n\t\t\t\t\tself.optimizer.update_params(layer)\r\n\t\t\t\tself.optimizer.post_update_params()\r\n\r\n\t\t\t\t# Print a summary\r\n\t\t\t\tif not step % print_every or step == train_steps - 1:\r\n\t\t\t\t\tprint(f'step: {step}, ' +\r\n\t\t\t\t\t\t f'acc: {accuracy:.3f}, ' +\r\n\t\t\t\t\t\t f'loss: {loss:.3f} (' +\r\n\t\t\t\t\t\t f'data_loss: {data_loss:.3f}, ' +\r\n\t\t\t\t\t\t f'reg_loss: {regularization_loss:.3f}), ' +\r\n\t\t\t\t\t\t f'lr: {self.optimizer.current_learning_rate}')\r\n\r\n\t\t\t\t# Store stats for overall summary\r\n\t\t\t\tloss_list.append(loss)\r\n\t\t\t\taccuracy_list.append(accuracy)\r\n\t\t\t\tlr_list.append(self.optimizer.current_learning_rate)\r\n\r\n\t\t\t# Get and print epoch loss and accuracy\r\n\t\t\tepoch_data_loss, epoch_regularization_loss = \\\r\n\t\t\t\tself.loss.calculate_accumulated(include_regularization=True)\r\n\t\t\tepoch_loss = epoch_data_loss + epoch_regularization_loss\r\n\t\t\tepoch_accuracy = self.accuracy.calculate_accumulated()\r\n\r\n\t\t\tprint(f'training, ' +\r\n\t\t\t\t f'acc: {epoch_accuracy:.3f}, ' +\r\n\t\t\t\t f'loss: {epoch_loss:.3f} (' +\r\n\t\t\t\t f'data_loss: {epoch_data_loss:.3f}, ' +\r\n\t\t\t\t f'reg_loss: {epoch_regularization_loss:.3f}), ' +\r\n\t\t\t\t f'lr: {self.optimizer.current_learning_rate}')\r\n\r\n\r\n\t\t\t# If there is the validation data\r\n\t\t\tif validation_data is not None:\r\n\r\n\t\t\t\t# Evaluate the model\r\n\t\t\t\tself.evaluate(*validation_data, batch_size=batch_size)\r\n\r\n\t# Performs forward pass\r\n\tdef forward(self, X, training):\r\n\r\n\t\t# Call forward method on the input layer this will set the output property that\r\n\t\t# the first layer in \"prev\" object is expecting\r\n\t\tself.input_layer.forward(X, training)\r\n\r\n\t\t# Call forward method of every object in a chain \r\n\t\t# Pass output of the previous object as a parameter\r\n\t\tfor layer in self.layers:\r\n\t\t\tlayer.forward(layer.prev.output, training)\r\n\r\n\t\t# \"layer\" is now the last object from the list\r\n\t\t# return its output\r\n\t\treturn layer.output\r\n\r\n\t# Performs backward pass\r\n\tdef backward(self, output, y):\r\n\r\n\t\t# If softmax classifier\r\n\t\tif self.softmax_classifier_output is not None:\r\n\t\t\t# First call backward method on the combined activation/loss\r\n\t\t\t# this will set dinputs properly\r\n\t\t\tself.softmax_classifier_output.backward(output, y)\r\n\r\n\t\t\t# Since we'll not call backward method of the last layer\r\n\t\t\t# which is Softmax activation as we used combined activation/loss\r\n\t\t\t# object, let's set dinputs in this object \r\n\t\t\tself.layers[-1].dinputs = \\\r\n\t\t\t\tself.softmax_classifier_output.dinputs\r\n\r\n\t\t\t# Call backward method going through all the objects but last\r\n\t\t\t# in reversed order passing dinputs as a parameter\r\n\t\t\tfor layer in reversed(self.layers[:-1]):\r\n\t\t\t\tlayer.backward(layer.next.dinputs)\r\n\r\n\t\t\treturn\r\n\r\n\t\t# First call backward method on the loss this will set dinputs property\r\n\t\t# that the last layer will try to access shortly\r\n\t\tself.loss.backward(output, y)\r\n\r\n\t\t# Call backward method going through all the objects in reversed order\r\n\t\t# passing dipunpts as a parameter\r\n\t\tfor layer in reversed(self.layers):\r\n\t\t\tlayer.backward(layer.next.dinputs)\r\n\r\n\t# Evaluates the model using passed-in dataset\r\n\tdef evaluate(self, X_val, y_val, *, batch_size=None):\r\n\r\n\t\t# Default value if batch size is not being set\r\n\t\tvalidation_steps = 1\r\n\r\n\t\t# Calculate number of steps\r\n\t\tif batch_size is not None:\r\n\t\t\tvalidation_steps = len(X_val) // batch_size\r\n\t\t\t# Dividing rounds down. If there are some remaining data,\r\n\t\t\t# but not a full batch, this won't include it \r\n\t\t\t# Add '1' to include this not full batch\r\n\t\t\tif validation_steps * batch_size < len(X_val):\r\n\t\t\t\tvalidation_steps += 1\r\n\r\n\t\t# Reset accumulated values in loss and accuracy objects\r\n\t\tself.loss.new_pass()\r\n\t\tself.accuracy.new_pass()\r\n\r\n\t\t# Iterate over steps\r\n\t\tfor step in range(validation_steps):\r\n\r\n\t\t\t# If batch size is not set - train using one step and full dataset\r\n\t\t\tif batch_size is None:\r\n\t\t\t\tbatch_X = X_val\r\n\t\t\t\tbatch_y = y_val\r\n\r\n\t\t\t# Otherwise slice a batch\r\n\t\t\telse:\r\n\t\t\t\tbatch_X = X_val[step*batch_size:(step+1)*batch_size]\r\n\t\t\t\tbatch_y = y_val[step*batch_size:(step+1)*batch_size]\r\n\r\n\t\t\t# Perform the forward pass\r\n\t\t\toutput = self.forward(batch_X, training=False)\r\n\r\n\t\t\t# Calculate the los\r\n\t\t\tself.loss.calculate(output, batch_y)\r\n\r\n\t\t\t# Get predictions and calculate an accuracy\r\n\t\t\tpredictions = self.output_layer_activation.predictions(output)\r\n\t\t\tself.accuracy.calculate(predictions, batch_y)\r\n\r\n\t\t# Get and print validation loss and accuracy\r\n\t\tvalidation_loss = self.loss.calculate_accumulated()\r\n\t\tvalidation_accuracy = self.accuracy.calculate_accumulated()\r\n\r\n\t\t# Print a summary\r\n\t\tprint(f'validation, ' +\r\n\t\t\t f'acc: {validation_accuracy:.3f}, ' +\r\n\t\t\t f'loss: {validation_loss:.3f}')\r\n\r\n\t# Predicts onthe samples\r\n\tdef predict(self, X, *, batch_size=None):\r\n\r\n\t\t# Default value if batch size is not being set\r\n\t\tprediction_steps = 1\r\n\r\n\t\t# Calculate number of steps\r\n\t\tif batch_size is not None:\r\n\t\t\tprediction_steps = len(X) // batch_size\r\n\t\t\t# Dividing rounds down. If there are some remaining data,\r\n\t\t\t# but not a full batch, this won't include it \r\n\t\t\t# Add '1' to include this not full batch\r\n\t\t\tif prediction_steps * batch_size < len(X):\r\n\t\t\t\tprediction_steps += 1\r\n\r\n\t\t# Model outputs\r\n\t\toutput = []\r\n\r\n\t\t# Iterate over steps \r\n\t\tfor step in range(prediction_steps):\r\n\r\n\t\t\t# If batch size is not set - train ussing one step and full dataset\r\n\t\t\tif batch_size is None:\r\n\t\t\t\tbatch_X = X\r\n\r\n\t\t\t# Otherwise slice a batch \r\n\t\t\telse:\r\n\t\t\t\tbatch_X = X[step*batch_size:(step+1)*batch_size]\r\n\r\n\t\t\t# Perform the forward pass \r\n\t\t\tbatch_output = self.forward(batch_X, training=False)\r\n\r\n\t\t\t# Append batch prediciton to the list of predictions\r\n\t\t\toutput.append(batch_output)\r\n\r\n\t\t# Stack and return results\r\n\t\treturn np.vstack(output)\r\n\r\n\t# Retrieves and returns parameters of trainable layers\r\n\tdef get_parameters(self):\r\n\r\n\t\t# Create a list for parameters\r\n\t\tparameters = []\r\n\r\n\r\n\t\t# Iterate trainable layers and get their parameters\r\n\r\n\t\tfor layer in self.trainable_layers:\r\n\t\t\tparameters.append(layer.get_parameters())\r\n\r\n\t\t# Return a list\r\n\t\treturn parameters\r\n\r\n\t# Updates the model with new parameters\r\n\tdef set_parameters(self, parameters):\r\n\r\n\t\t# Iterate over the parameters and layers\r\n\t\t# and update each layers with each set of the parameters\r\n\t\tfor parameters_set, layer in zip(parameters, self.trainable_layers):\r\n\t\t\tlayer.set_parameters(*parameters_set)\r\n\r\n\t# Saves the parameters to a file\r\n\tdef save_parameters(self, path):\r\n\r\n\t\t# Open a file in the binary-write mode and save parameters to it\r\n\t\twith open(path, 'wb') as f:\r\n\t\t\tpickle.dump(self.get_parameters(), f)\r\n\r\n\t# Load the weights and updates a model instance with them\r\n\tdef load_parameters(self, path):\r\n\r\n\t\t# Open file in the binary-read mode, load weights and update trainable layers\r\n\t\twith open(path, 'rb') as f:\r\n\t\t\tself.set_parameters(pickle.load(f))\r\n\r\n\t# Saves the model\r\n\tdef save(self, path):\r\n\r\n\t\t# Make a deep copy of current model instance\r\n\t\tmodel = copy.deepcopy(self)\r\n\r\n\t\t# Reset accumulated values in loss and accuracy objects\r\n\t\tmodel.loss.new_pass()\r\n\t\tmodel.accuracy.new_pass()\r\n\r\n\t\t# Remove data from input layer and gradients from the loss object\r\n\t\tmodel.input_layer.__dict__.pop('output', None)\r\n\t\tmodel.loss.__dict__.pop('dinputs', None)\r\n\r\n\t\t# For each layer remove inputs, output and dinputs properties\r\n\t\tfor layer in model.layers:\r\n\t\t\tfor property in ['inputs', 'output', 'dinputs', 'dweights', 'dbiases']:\r\n\t\t\t\tlayer.__dict__.pop(property, None)\r\n\r\n\t\t# Open a file in the binary-write mode and save the model\r\n\t\twith open(path, 'wb') as f:\r\n\t\t\tpickle.dump(model, f)\r\n\r\n\t# Outputs stats about the model (Own)\r\n\tdef stats(self, sigma, path_name=None):\r\n\r\n\t\t# Print stats\r\n\t\t# Other layers\r\n\t\tlay = [self.loss, self.optimizer] # self.accuracy can be added\r\n\t\tle = len(self.layers) + len(lay)\r\n\t\tl = len(self.layers)\r\n\r\n\r\n\t\t# Set figure up\r\n\t\tfig = plt.figure(constrained_layout=True,figsize=(20,10))\r\n\t\tout_gs = fig.add_gridspec(2,7)\r\n\r\n\t\t# Get weights and biases\r\n\t\tweights = []\r\n\t\tbiases = []\r\n\t\tfor layer in self.trainable_layers:\r\n\t\t\tweights.append(layer.weights.tolist())\r\n\t\t\tbiases.append(layer.biases.tolist())\r\n\r\n\t\tf_ax1 = fig.add_subplot(out_gs[0,0])\r\n\t\tf_ax1.set_title('Model Struktur:')\r\n\t\tlayer_name = []\r\n\t\tfor i in range(le):\r\n\t\t\tif i < l:\r\n\t\t\t\tf_ax1.text(0.1,1-(i+1.5)*(1/(le+1)),f'Layer{i}: {self.layers[i].stat}')\r\n\t\t\t\tlayer_name.append(self.layers[i].stat)\r\n\t\t\telse:\r\n\t\t\t\tf_ax1.text(0.1,1-(i+1.5)*(1/(le+1)),f'Layer{i}: {lay[i-l].stat}')\r\n\t\t\t\tlayer_name.append(lay[i-l].stat)\r\n\r\n\t\tf_ax1.set_axis_off()\r\n\t\tf_ax2 = fig.add_subplot(out_gs[0,2:4])\r\n\t\tf_ax2.plot([np.average(loss_list[i:i+sigma]) for i in range(len(loss_list))])\r\n\t\tf_ax2.set_title(\"Loss\")\r\n\t\tf_ax2.set_xlabel(\"Steps\")\r\n\t\tf_ax3 = fig.add_subplot(out_gs[1,0:2])\r\n\t\tf_ax3.plot([np.average(lr_list[i:i+sigma]) for i in range(len(lr_list))])\r\n\t\tf_ax3.set_title(\"Learning_rate\")\r\n\t\tf_ax3.set_xlabel(\"Steps\")\r\n\t\tf_ax4 = fig.add_subplot(out_gs[1,2:4])\r\n\t\tf_ax4.plot([np.average(accuracy_list[i:i+sigma]) for i in range(len(accuracy_list))])\r\n\t\tf_ax4.set_title(\"Accuracy\")\r\n\t\tf_ax4.set_xlabel(\"Steps\")\r\n\r\n\t\tf_ax5 = fig.add_subplot(out_gs[0,5])\r\n\t\tf_ax5.set_title(\"Weights\")\r\n\t\tf_ax5.xaxis.set_visible(False)\r\n\t\tf_ax5.yaxis.set_visible(False)\r\n\t\tf_ax5.spines[\"left\"].set_color(\"white\")\r\n\t\tf_ax5.spines[\"right\"].set_color(\"white\")\r\n\t\tf_ax5.spines[\"bottom\"].set_color(\"white\")\r\n\t\tf_ax5.spines[\"top\"].set_color(\"white\")\r\n\r\n\t\tf_ax5_inner = out_gs[:2,5].subgridspec(1,len(weights))\r\n\t\taxs5 = f_ax5_inner.subplots(sharey=True)\r\n\t\tfor a, ax5 in np.ndenumerate(axs5):\t\t\t\t\r\n\t\t\tax5.pcolormesh(np.arange(0,len(weights[a[0]][0])+1,1),np.arange(0,len(weights[a[0]])+1,1),\r\n\t\t\t\t\t\t weights[a[0]], cmap=plt.get_cmap('seismic'))\r\n\t\t\tax5.set_title(f\"\\n Layer {a[0]+1}\")\r\n\t\t\tax5.set_xlabel(\"Neuron\")\r\n\r\n\t\tf_ax6 = fig.add_subplot(out_gs[0,6])\r\n\t\tf_ax6.set_title(\"Biases\")\r\n\t\tf_ax6.xaxis.set_visible(False)\r\n\t\tf_ax6.yaxis.set_visible(False)\r\n\t\tf_ax6.spines[\"left\"].set_color(\"white\")\r\n\t\tf_ax6.spines[\"right\"].set_color(\"white\")\r\n\t\tf_ax6.spines[\"bottom\"].set_color(\"white\")\r\n\t\tf_ax6.spines[\"top\"].set_color(\"white\")\r\n\r\n\t\tf_ax6_inner = out_gs[:2,6].subgridspec(1,len(biases))\r\n\t\taxs6 = f_ax6_inner.subplots(sharey=True)\r\n\t\tfor b, ax6 in np.ndenumerate(axs6):\r\n\t\t\t#norm = cl.Normalize(vmin=biases[b[0]].min(),vmax=biases[b[0]].max())\t\t\t\r\n\t\t\t# pcm = ax6.pcolormesh(np.arange(0,2,1),np.arange(0,len(biases[b[0]][0])+1,1),\r\n\t\t\t# \t\t\t np.array(biases[b[0]]).T, cmap=plt.get_cmap('seismic'), norm=norm)\r\n\t\t\t# fig.colorbar(pcm, ax=ax6, location=\"right\")\r\n\t\t\tax6.pcolormesh(np.arange(0,2,1),np.arange(0,len(biases[b[0]][0])+1,1),\r\n\t\t\t \t\t\t np.array(biases[b[0]]).T, cmap=plt.get_cmap('seismic'))\r\n\t\t\tax6.set_title(f\"\\n Layer {b[0]+1}\")\r\n\t\t\tif b[0]==0:\r\n\t\t\t\tax6.set_ylabel(\"Neuron\")\r\n\t\t\r\n\t\t# Save in a folder\r\n\t\tos.mkdir(path_name)\r\n\t\tfull_path_name = str(path_name) + '/'\r\n\r\n\t\t# Save the figure\r\n\t\tpath_name_png = str(full_path_name) + 'figure.PNG'\r\n\t\tplt.savefig(path_name_png)\r\n\t\tplt.show()\r\n\t\t\r\n\t\tpath_name_model = str(full_path_name) + 'Network.model'\r\n\t\tself.save(path_name_model)\r\n\r\n\t\tstatistics = np.array([layer_name, loss_list, lr_list, accuracy_list, weights, biases])\r\n\t\twith open(str(full_path_name)+'weibia', 'wb') as f:\r\n\t\t\tpickle.dump(statistics, f)\r\n\t\t\r\n\r\n\t# Loads and returns a model\r\n\t@staticmethod\r\n\tdef load(path):\r\n\r\n\t\t# Open file in the binary-read mode, load a model\r\n\t\twith open(path, 'rb') as f:\r\n\t\t\tmodel = pickle.load(f)\r\n\r\n\t\t# Return a model\r\n\t\treturn model\r\n\r\n\r\n# Loads a MNIST dataset\r\ndef load_mnist_dataset(dataset, path):\r\n\r\n\t# Scan all the directories and create a list of labels\r\n\tlabels = os.listdir(os.path.join(path, dataset))\r\n\r\n\t# Create lists for the samples and labels\r\n\tX = []\r\n\ty = []\r\n\r\n\t# For each label folder \r\n\tfor label in labels:\r\n\t\t# And for each image in given folder\r\n\t\tfor file in os.listdir(os.path.join(path, dataset, label)):\r\n\t\t\t# Read the image \r\n\t\t\timage = cv2.imread(os.path.join(path, dataset, label, file), cv2.IMREAD_GRAYSCALE)\r\n\r\n\t\t\tprint(label)\r\n\r\n\t\t\t# And append it and a label to the lists\r\n\t\t\tX.append(image)\r\n\t\t\ty.append(label)\r\n\r\n\t# Convert the data to proper numpy arrays and return \r\n\treturn np.array(X), np.array(y).astype('uint8') # say that y is int and not float\r\n\r\n# MNIST dataset (train + test)\r\ndef create_data_mnist(path):\r\n\r\n\t# Load both sets seperately\r\n\tX, y = load_mnist_dataset('train', path)\r\n\tX_test, y_test = load_mnist_dataset('test', path)\r\n\r\n\t# And return all the data\r\n\treturn X, y, X_test, y_test\r\n"
] | [
[
"numpy.dot",
"numpy.sqrt",
"matplotlib.pyplot.get_cmap",
"numpy.max",
"numpy.mean",
"numpy.random.randn",
"numpy.zeros_like",
"numpy.exp",
"numpy.ones_like",
"numpy.clip",
"numpy.empty_like",
"numpy.eye",
"numpy.arange",
"numpy.std",
"numpy.argmax",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.log",
"numpy.diagflat",
"matplotlib.pyplot.savefig",
"numpy.random.binomial",
"numpy.ndenumerate",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.absolute",
"numpy.maximum",
"numpy.abs",
"numpy.sign",
"numpy.average",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
amitkumarj441/hafnian | [
"3d0b79c77180db7e415b96826707f8049d690208"
] | [
"thewalrus/tests/test_hermite_multidimensional.py"
] | [
"# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the batch hafnian wrapper function\"\"\"\n# pylint: disable=no-self-use,redefined-outer-name\nfrom itertools import product\n\nimport numpy as np\n\nfrom scipy.special import eval_hermitenorm, eval_hermite\n\nfrom thewalrus import hermite_multidimensional, hafnian_batched, hafnian_repeated\n\n\ndef test_hermite_multidimensional_renorm():\n \"\"\" This tests the renormalized batchhafnian wrapper function to compute photon number statistics for a fixed gaussian state.\n\t\"\"\"\n B = np.sqrt(0.5) * np.array([[0, 1], [1, 0]]) + 0 * 1j\n res = 10\n expected = np.diag(0.5 ** (np.arange(0, res) / 2))\n array = hermite_multidimensional(-B, res, renorm=True)\n\n assert np.allclose(array, expected)\n\n\ndef test_reduction_to_physicists_polys():\n \"\"\"Tests that the multidimensional hermite polynomials reduce to the regular physicists' hermite polynomials in the appropriate limit\"\"\"\n x = np.arange(-1, 1, 0.1)\n init = 1\n n_max = 5\n A = np.ones([init, init], dtype=complex)\n vals = np.array(\n [hermite_multidimensional(2 * A, n_max, y=np.array([x0], dtype=complex)) for x0 in x]\n ).T\n expected = np.array([eval_hermite(i, x) for i in range(len(vals))])\n assert np.allclose(vals, expected)\n\n\ndef test_reduction_to_probabilist_polys():\n \"\"\"Tests that the multidimensional hermite polynomials reduce to the regular probabilist' hermite polynomials in the appropriate limit\"\"\"\n x = np.arange(-1, 1, 0.1)\n init = 1\n n_max = 5\n A = np.ones([init, init], dtype=complex)\n vals = np.array(\n [hermite_multidimensional(A, n_max, y=np.array([x0], dtype=complex)) for x0 in x]\n ).T\n expected = np.array([eval_hermitenorm(i, x) for i in range(len(vals))])\n assert np.allclose(vals, expected)\n\n\ndef test_hafnian_batched():\n \"\"\"Test hafnian_batched against hafnian_repeated for a random symmetric matrix\"\"\"\n n_modes = 4\n A = np.random.rand(n_modes, n_modes) + 1j * np.random.rand(n_modes, n_modes)\n A += A.T\n n_photon = 5\n v1 = np.array([hafnian_repeated(A, q) for q in product(np.arange(n_photon), repeat=n_modes)])\n assert np.allclose(hafnian_batched(A, n_photon, make_tensor=False), v1)\n\n\ndef test_hafnian_batched_loops():\n \"\"\"Test hafnian_batched with loops against hafnian_repeated with loops for a random symmetric matrix\n and a random vector of loops\n \"\"\"\n n_modes = 4\n A = np.random.rand(n_modes, n_modes) + 1j * np.random.rand(n_modes, n_modes)\n A += A.T\n mu = np.random.rand(n_modes) + 1j * np.random.rand(n_modes)\n n_photon = 5\n v1 = np.array(\n [\n hafnian_repeated(A, q, mu=mu, loop=True)\n for q in product(np.arange(n_photon), repeat=n_modes)\n ]\n )\n expected = hafnian_batched(A, n_photon, mu=mu, make_tensor=False)\n\n assert np.allclose(expected, v1)\n\n\ndef test_hafnian_batched_loops_no_edges():\n \"\"\"Test hafnian_batched with loops against hafnian_repeated with loops for a random symmetric matrix\n and a random vector of loops\n \"\"\"\n n_modes = 4\n A = np.zeros([n_modes, n_modes], dtype=complex)\n mu = np.random.rand(n_modes) + 1j * np.random.rand(n_modes)\n n_photon = 5\n v1 = np.array(\n [\n hafnian_repeated(A, q, mu=mu, loop=True)\n for q in product(np.arange(n_photon), repeat=n_modes)\n ]\n )\n expected = hafnian_batched(A, n_photon, mu=mu, make_tensor=False)\n\n assert np.allclose(expected, v1)\n\n\ndef test_hafnian_batched_zero_loops_no_edges():\n \"\"\"Test hafnian_batched with loops against hafnian_repeated with loops for a the zero matrix\n and a loops\n \"\"\"\n n_modes = 4\n A = np.zeros([n_modes, n_modes], dtype=complex)\n n_photon = 5\n v1 = np.array(\n [hafnian_repeated(A, q, loop=True) for q in product(np.arange(n_photon), repeat=n_modes)]\n )\n expected = hafnian_batched(A, n_photon, make_tensor=False)\n\n assert np.allclose(expected, v1)\n"
] | [
[
"numpy.allclose",
"numpy.sqrt",
"numpy.arange",
"scipy.special.eval_hermitenorm",
"numpy.ones",
"scipy.special.eval_hermite",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AdityaNG/cone-detector-tf | [
"f2eede83caf64753c7b70b3ce017a26d8903469c",
"f2eede83caf64753c7b70b3ce017a26d8903469c"
] | [
"cone_detector_lib.py",
"cone_detector.py"
] | [
"from __future__ import division\n\nimport argparse\nimport logging.config\nimport os\nimport time\n\nimport cv2\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nfrom utils import cv_utils\nfrom utils import operations as ops\nfrom utils import tf_utils\n\nlogging.config.fileConfig('logging.ini')\n\nFROZEN_GRAPH_PATH = 'models/ssd_mobilenet_v1/frozen_inference_graph.pb'\n\nSCORE_THRESHOLD = 0.5\nNON_MAX_SUPPRESSION_THRESHOLD = 0.5\n\n\nclass ConeDetector:\n\n def __init__(self) -> None:\n #detection_graph = tf_utils.load_model(FROZEN_GRAPH_PATH)\n\n #self.sess = tf.Session(graph=detection_graph)\n pass\n\n def ispath(path):\n if not os.path.exists(path):\n raise argparse.ArgumentTypeError('No such file or directory: ' + path)\n else:\n return path\n\n def find_cones(self, img, crop_size=None):\n # Read TensorFlow graph\n detection_graph = tf_utils.load_model(FROZEN_GRAPH_PATH)\n\n with tf.Session(graph=detection_graph) as sess:\n tic = time.time()\n\n boxes = []\n\n if crop_size:\n crop_height = crop_width = crop_size\n crop_step_vertical = crop_step_horizontal = crop_size - 20\n crops, crops_coordinates = ops.extract_crops(\n img, crop_height, crop_width,\n crop_step_vertical, crop_step_horizontal)\n\n detection_dict = tf_utils.run_inference_for_batch(crops, sess)\n\n for box_absolute, boxes_relative in zip(\n crops_coordinates, detection_dict['detection_boxes']):\n boxes.extend(ops.get_absolute_boxes(\n box_absolute,\n boxes_relative[np.any(boxes_relative, axis=1)]))\n\n boxes = np.vstack(boxes)\n boxes = ops.non_max_suppression_fast(\n boxes, NON_MAX_SUPPRESSION_THRESHOLD)\n else:\n detection_dict = tf_utils.run_inference_for_batch(\n np.expand_dims(img, axis=0), sess)\n boxes = detection_dict['detection_boxes']\n boxes = boxes[np.any(boxes, axis=2)]\n\n boxes_scores = detection_dict['detection_scores']\n boxes_scores = boxes_scores[np.nonzero(boxes_scores)]\n\n for box, score in zip(boxes, boxes_scores):\n if score > SCORE_THRESHOLD:\n ymin, xmin, ymax, xmax = box\n color_detected_rgb = cv_utils.predominant_rgb_color(\n img, ymin, xmin, ymax, xmax)\n text = '{:.2f}'.format(score)\n cv_utils.add_rectangle_with_text(\n img, ymin, xmin, ymax, xmax,\n color_detected_rgb, text)\n\n toc = time.time()\n processing_time_ms = (toc - tic) * 1000\n logging.debug('Detected {} objects in {:.2f} ms'.format(\n len(boxes), processing_time_ms))\n\n return img\n",
"from __future__ import division\n\nimport logging\nimport logging.config\nimport time\n\nimport cv2\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nfrom utils import cv_utils\nfrom utils import operations as ops\nfrom utils import tf_utils\n\nlogging.config.fileConfig('logging.ini')\n\nVIDEO_PATH = '../raw_videos/VID_20210721_103948.mp4'\nFROZEN_GRAPH_PATH = 'models/ssd_mobilenet_v1/frozen_inference_graph.pb'\n\nOUTPUT_WINDOW_WIDTH = 640 # Use None to use the original size of the image\nDETECT_EVERY_N_SECONDS = None # Use None to perform detection for each frame\n\n# TUNE ME\nCROP_WIDTH = CROP_HEIGHT = 600\nCROP_STEP_HORIZONTAL = CROP_STEP_VERTICAL = 600 - 20 # no cone bigger than 20px\nSCORE_THRESHOLD = 0.5\nNON_MAX_SUPPRESSION_THRESHOLD = 0.5\n\n\ndef main():\n # Read TensorFlow graph\n detection_graph = tf_utils.load_model(FROZEN_GRAPH_PATH)\n\n # Read video from disk and count frames\n cap = cv2.VideoCapture(VIDEO_PATH)\n\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n # CROP_WIDTH = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n # CROP_HEIGHT = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n with tf.Session(graph=detection_graph) as sess:\n\n processed_images = 0\n while cap.isOpened():\n\n if DETECT_EVERY_N_SECONDS:\n cap.set(cv2.CAP_PROP_POS_FRAMES,\n processed_images * fps * DETECT_EVERY_N_SECONDS)\n\n ret, frame = cap.read()\n if ret:\n tic = time.time()\n\n # crops are images as ndarrays of shape\n # (number_crops, CROP_HEIGHT, CROP_WIDTH, 3)\n # crop coordinates are the ymin, xmin, ymax, xmax coordinates in\n # the original image\n crops, crops_coordinates = ops.extract_crops(\n frame, CROP_HEIGHT, CROP_WIDTH,\n CROP_STEP_VERTICAL, CROP_STEP_VERTICAL)\n\n # Uncomment this if you also uncommented the two lines before\n # creating the TF session.\n # crops = np.array([crops[0]])\n # crops_coordinates = [crops_coordinates[0]]\n\n detection_dict = tf_utils.run_inference_for_batch(crops, sess)\n\n # The detection boxes obtained are relative to each crop. Get\n # boxes relative to the original image\n # IMPORTANT! The boxes coordinates are in the following order:\n # (ymin, xmin, ymax, xmax)\n boxes = []\n for box_absolute, boxes_relative in zip(\n crops_coordinates, detection_dict['detection_boxes']):\n boxes.extend(ops.get_absolute_boxes(\n box_absolute,\n boxes_relative[np.any(boxes_relative, axis=1)]))\n if boxes:\n boxes = np.vstack(boxes)\n\n # Remove overlapping boxes\n boxes = ops.non_max_suppression_fast(\n boxes, NON_MAX_SUPPRESSION_THRESHOLD)\n\n # Get scores to display them on top of each detection\n boxes_scores = detection_dict['detection_scores']\n boxes_scores = boxes_scores[np.nonzero(boxes_scores)]\n\n for box, score in zip(boxes, boxes_scores):\n if score > SCORE_THRESHOLD:\n ymin, xmin, ymax, xmax = box\n color_detected_rgb = cv_utils.predominant_rgb_color(\n frame, ymin, xmin, ymax, xmax)\n text = '{:.2f}'.format(score)\n cv_utils.add_rectangle_with_text(\n frame, ymin, xmin, ymax, xmax,\n color_detected_rgb, text)\n\n if OUTPUT_WINDOW_WIDTH:\n frame = cv_utils.resize_width_keeping_aspect_ratio(\n frame, OUTPUT_WINDOW_WIDTH)\n\n cv2.imshow('Detection result', frame)\n cv2.waitKey(1)\n processed_images += 1\n\n toc = time.time()\n processing_time_ms = (toc - tic) * 100\n logging.debug(\n 'Detected {} objects in {} images in {:.2f} ms'.format(\n len(boxes), len(crops), processing_time_ms))\n\n else:\n # No more frames. Break the loop\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.expand_dims",
"numpy.nonzero",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.Session",
"numpy.any",
"numpy.vstack"
],
[
"numpy.nonzero",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.Session",
"numpy.any",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MosyMosy/VDT | [
"e07f28d0cd6367ed30740c147ed2f270ead8fb63"
] | [
"models/resnet10_BITrans.py"
] | [
"import torch\n# from torch.autograd import Variable\nimport torch.nn as nn\nimport math\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.nn.utils.weight_norm import WeightNorm\nfrom Batchtransfer_EMA import BatchInstanceTransNorm as BIT2d\n\ndef init_layer(L):\n # Initialization using fan-in\n if isinstance(L, nn.Conv2d):\n n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels\n L.weight.data.normal_(0,math.sqrt(2.0/float(n)))\n elif isinstance(L, BIT2d):\n L.weight.data.fill_(1)\n L.bias.data.fill_(0)\n\nclass Flatten(nn.Module):\n def __init__(self):\n super(Flatten, self).__init__()\n \n def forward(self, x): \n return x.view(x.size(0), -1)\n\n# Simple ResNet Block\nclass SimpleBlock(nn.Module):\n maml = False #Default\n def __init__(self, indim, outdim, half_res):\n super(SimpleBlock, self).__init__()\n self.indim = indim\n self.outdim = outdim\n\n self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)\n self.BN1 = BIT2d(outdim)\n \n self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)\n self.BN2 = BIT2d(outdim)\n\n self.relu1 = nn.ReLU(inplace=True)\n self.relu2 = nn.ReLU(inplace=True)\n\n self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]\n\n self.half_res = half_res\n\n # if the input number of channels is not equal to the output, then need a 1x1 convolution\n if indim!=outdim:\n\n self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)\n self.BNshortcut = BIT2d(outdim)\n\n self.parametrized_layers.append(self.shortcut)\n self.parametrized_layers.append(self.BNshortcut)\n self.shortcut_type = '1x1'\n else:\n self.shortcut_type = 'identity'\n\n for layer in self.parametrized_layers:\n init_layer(layer)\n\n def forward(self, x):\n out = self.C1(x)\n out = self.BN1(out)\n out = self.relu1(out)\n\n out = self.C2(out)\n out = self.BN2(out)\n short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))\n out = out + short_out\n out = self.relu2(out)\n return out\n\n# Bottleneck block\nclass BottleneckBlock(nn.Module):\n def __init__(self, indim, outdim, half_res):\n super(BottleneckBlock, self).__init__()\n bottleneckdim = int(outdim/4)\n self.indim = indim\n self.outdim = outdim\n\n self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)\n self.BN1 = BIT2d(bottleneckdim)\n self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)\n self.BN2 = BIT2d(bottleneckdim)\n self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)\n self.BN3 = BIT2d(outdim)\n\n self.relu = nn.ReLU()\n self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2, self.C3, self.BN3]\n self.half_res = half_res\n\n\n # if the input number of channels is not equal to the output, then need a 1x1 convolution\n if indim!=outdim:\n\n self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if half_res else 1, bias=False)\n\n self.parametrized_layers.append(self.shortcut)\n self.shortcut_type = '1x1'\n else:\n self.shortcut_type = 'identity'\n\n for layer in self.parametrized_layers:\n init_layer(layer)\n\n\n def forward(self, x):\n\n short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)\n out = self.C1(x)\n out = self.BN1(out)\n out = self.relu(out)\n out = self.C2(out)\n out = self.BN2(out)\n out = self.relu(out)\n out = self.C3(out)\n out = self.BN3(out)\n out = out + short_out\n\n out = self.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = False):\n # list_of_num_layers specifies number of layers in each stage\n # list_of_out_dims specifies number of output channel for each stage\n super(ResNet,self).__init__()\n assert len(list_of_num_layers)==4, 'Can have only four stages'\n\n conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n bn1 = BIT2d(64)\n\n relu = nn.ReLU()\n pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n init_layer(conv1)\n init_layer(bn1)\n\n trunk = [conv1, bn1, relu, pool1]\n\n indim = 64\n for i in range(4):\n\n for j in range(list_of_num_layers[i]):\n half_res = (i>=1) and (j==0)\n B = block(indim, list_of_out_dims[i], half_res)\n trunk.append(B)\n indim = list_of_out_dims[i]\n\n if flatten:\n # avgpool = nn.AvgPool2d(7)\n avgpool = nn.AdaptiveAvgPool2d((1, 1))\n trunk.append(avgpool)\n trunk.append(Flatten())\n self.final_feat_dim = indim\n else:\n self.final_feat_dim = [ indim, 7, 7]\n\n self.trunk = nn.Sequential(*trunk)\n\n def forward(self,x):\n out = self.trunk(x)\n return out\n\ndef ResNet10_BITrans( flatten = True):\n return ResNet(SimpleBlock, [1,1,1,1],[64,128,256,512], flatten)\n\n\n\n\n\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlkalineDevelopment/56openpilot | [
"fb9a557d77bc8409ff14261e4a05fcd2da709836"
] | [
"selfdrive/controls/lib/lateral_planner.py"
] | [
"import math\nimport numpy as np\nfrom common.realtime import sec_since_boot, DT_MDL\nfrom common.numpy_fast import interp\nfrom selfdrive.swaglog import cloudlog\nfrom selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import LateralMpc\nfrom selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS\nfrom selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE\nfrom selfdrive.config import Conversions as CV\nimport cereal.messaging as messaging\nfrom cereal import log\n\nLaneChangeState = log.LateralPlan.LaneChangeState\nLaneChangeDirection = log.LateralPlan.LaneChangeDirection\n\nLANE_CHANGE_SPEED_MIN = 25 * CV.MPH_TO_MS\nLANE_CHANGE_TIME_MAX = 10.\n\nDESIRES = {\n LaneChangeDirection.none: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,\n },\n LaneChangeDirection.left: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,\n },\n LaneChangeDirection.right: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,\n },\n}\n\n\nclass LateralPlanner:\n def __init__(self, CP, use_lanelines=True, wide_camera=False):\n self.use_lanelines = use_lanelines\n self.LP = LanePlanner(wide_camera)\n\n self.last_cloudlog_t = 0\n self.steer_rate_cost = CP.steerRateCost\n\n self.solution_invalid_cnt = 0\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n self.lane_change_timer = 0.0\n self.lane_change_ll_prob = 1.0\n self.keep_pulse_timer = 0.0\n self.prev_one_blinker = False\n self.desire = log.LateralPlan.Desire.none\n\n self.path_xyz = np.zeros((TRAJECTORY_SIZE, 3))\n self.path_xyz_stds = np.ones((TRAJECTORY_SIZE, 3))\n self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))\n self.t_idxs = np.arange(TRAJECTORY_SIZE)\n self.y_pts = np.zeros(TRAJECTORY_SIZE)\n\n self.lat_mpc = LateralMpc()\n self.reset_mpc(np.zeros(6))\n\n def reset_mpc(self, x0=np.zeros(6)):\n self.x0 = x0\n self.lat_mpc.reset(x0=self.x0)\n\n def update(self, sm):\n v_ego = sm['carState'].vEgo\n active = sm['controlsState'].active\n measured_curvature = sm['controlsState'].curvature\n\n md = sm['modelV2']\n self.LP.parse_model(sm['modelV2'])\n if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:\n self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])\n self.t_idxs = np.array(md.position.t)\n self.plan_yaw = list(md.orientation.z)\n if len(md.position.xStd) == TRAJECTORY_SIZE:\n self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])\n\n # Lane change logic\n one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker\n below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN\n\n if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX):\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n else:\n # LaneChangeState.off\n if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:\n self.lane_change_state = LaneChangeState.preLaneChange\n self.lane_change_ll_prob = 1.0\n\n # LaneChangeState.preLaneChange\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n # Set lane change direction\n if sm['carState'].leftBlinker:\n self.lane_change_direction = LaneChangeDirection.left\n elif sm['carState'].rightBlinker:\n self.lane_change_direction = LaneChangeDirection.right\n else: # If there are no blinkers we will go back to LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n\n torque_applied = sm['carState'].steeringPressed and \\\n ((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))\n\n blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))\n\n if not one_blinker or below_lane_change_speed:\n self.lane_change_state = LaneChangeState.off\n elif torque_applied and not blindspot_detected:\n self.lane_change_state = LaneChangeState.laneChangeStarting\n\n # LaneChangeState.laneChangeStarting\n elif self.lane_change_state == LaneChangeState.laneChangeStarting:\n # fade out over .5s\n self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2 * DT_MDL, 0.0)\n\n # 98% certainty\n lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob\n if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:\n self.lane_change_state = LaneChangeState.laneChangeFinishing\n\n # LaneChangeState.laneChangeFinishing\n elif self.lane_change_state == LaneChangeState.laneChangeFinishing:\n # fade in laneline over 1s\n self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)\n if self.lane_change_ll_prob > 0.99:\n self.lane_change_direction = LaneChangeDirection.none\n if one_blinker:\n self.lane_change_state = LaneChangeState.preLaneChange\n else:\n self.lane_change_state = LaneChangeState.off\n\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:\n self.lane_change_timer = 0.0\n else:\n self.lane_change_timer += DT_MDL\n\n self.prev_one_blinker = one_blinker\n\n self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]\n\n # Send keep pulse once per second during LaneChangeStart.preLaneChange\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.laneChangeStarting]:\n self.keep_pulse_timer = 0.0\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n self.keep_pulse_timer += DT_MDL\n if self.keep_pulse_timer > 1.0:\n self.keep_pulse_timer = 0.0\n elif self.desire in [log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight]:\n self.desire = log.LateralPlan.Desire.none\n\n # Turn off lanes during lane change\n if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:\n self.LP.lll_prob *= self.lane_change_ll_prob\n self.LP.rll_prob *= self.lane_change_ll_prob\n if self.use_lanelines:\n d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)\n self.lat_mpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, self.steer_rate_cost)\n else:\n d_path_xyz = self.path_xyz\n path_cost = np.clip(abs(self.path_xyz[0, 1] / self.path_xyz_stds[0, 1]), 0.5, 1.5) * MPC_COST_LAT.PATH\n # Heading cost is useful at low speed, otherwise end of plan can be off-heading\n heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])\n self.lat_mpc.set_weights(path_cost, heading_cost, self.steer_rate_cost)\n y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:, 1])\n heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)\n self.y_pts = y_pts\n\n assert len(y_pts) == LAT_MPC_N + 1\n assert len(heading_pts) == LAT_MPC_N + 1\n self.x0[4] = v_ego\n self.lat_mpc.run(self.x0,\n v_ego,\n CAR_ROTATION_RADIUS,\n y_pts,\n heading_pts)\n # init state for next\n self.x0[3] = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.lat_mpc.x_sol[:, 3])\n\n # Check for infeasible MPC solution\n mpc_nans = any(math.isnan(x) for x in self.lat_mpc.x_sol[:, 3])\n t = sec_since_boot()\n if mpc_nans or self.lat_mpc.solution_status != 0:\n self.reset_mpc()\n self.x0[3] = measured_curvature\n if t > self.last_cloudlog_t + 5.0:\n self.last_cloudlog_t = t\n cloudlog.warning(\"Lateral mpc - nan: True\")\n\n if self.lat_mpc.cost > 20000. or mpc_nans:\n self.solution_invalid_cnt += 1\n else:\n self.solution_invalid_cnt = 0\n\n def publish(self, sm, pm):\n plan_solution_valid = self.solution_invalid_cnt < 2\n plan_send = messaging.new_message('lateralPlan')\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2'])\n plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)\n plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]\n plan_send.lateralPlan.psis = [float(x) for x in self.lat_mpc.x_sol[0:CONTROL_N, 2]]\n plan_send.lateralPlan.curvatures = [float(x) for x in self.lat_mpc.x_sol[0:CONTROL_N, 3]]\n plan_send.lateralPlan.curvatureRates = [float(x) for x in self.lat_mpc.u_sol[0:CONTROL_N - 1]] + [0.0]\n plan_send.lateralPlan.lProb = float(self.LP.lll_prob)\n plan_send.lateralPlan.rProb = float(self.LP.rll_prob)\n plan_send.lateralPlan.dProb = float(self.LP.d_prob)\n\n plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)\n\n plan_send.lateralPlan.desire = self.desire\n plan_send.lateralPlan.useLaneLines = self.use_lanelines\n plan_send.lateralPlan.laneChangeState = self.lane_change_state\n plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction\n\n pm.send('lateralPlan', plan_send)\n"
] | [
[
"numpy.arange",
"numpy.linalg.norm",
"numpy.ones",
"numpy.column_stack",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
n778509775/JDLBER | [
"20f209348f3aa10b85c61efd7253c94cd64a6a8a"
] | [
"network.py"
] | [
"#!/usr/bin/env python\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef init_weights(m):\n \"\"\" initialize weights of fully connected layer\n \"\"\"\n if type(m) == nn.Linear:\n nn.init.orthogonal_(m.weight, gain=1)\n m.bias.data.zero_()\n elif type(m) == nn.BatchNorm1d:\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\nclass Encoder(nn.Module):\n def __init__(self, num_inputs):\n super(Encoder, self).__init__()\n self.encoder = nn.Sequential(\n nn.BatchNorm1d(num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(num_inputs, num_inputs),\n\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(num_inputs, num_inputs))\n self.encoder.apply(init_weights)\n def forward(self, x):\n x = self.encoder(x)\n return x\n\n\n# Decoder_a\nclass Decoder_a(nn.Module):\n def __init__(self, num_inputs):\n super(Decoder_a, self).__init__()\n self.decoder = nn.Sequential(\n nn.Linear(num_inputs, num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Linear(num_inputs, num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(num_inputs, num_inputs))\n self.decoder.apply(init_weights)\n def forward(self, x):\n x = self.decoder(x)\n return x\n\n# Decoder_b\nclass Decoder_b(nn.Module):\n def __init__(self, num_inputs):\n super(Decoder_b, self).__init__()\n self.decoder = nn.Sequential(\n nn.Linear(num_inputs, num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Linear(num_inputs, num_inputs),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(num_inputs, num_inputs))\n self.decoder.apply(init_weights)\n def forward(self, x):\n x = self.decoder(x)\n return x\n\n#classifier combine with autoencoder\nclass Discriminator(nn.Module):\n def __init__(self, num_inputs):\n super(Discriminator, self).__init__()\n\n self.model = nn.Sequential(\n nn.Linear(num_inputs, 64),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(64, 16),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(16, 1),\n nn.Sigmoid(),\n )\n\n def forward(self, z):\n validity = self.model(z)\n return validity\n\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.init.constant_",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.init.orthogonal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sowmyamanojna/BT3051-Data-Structures-and-Algorithms | [
"09c17e42c2e173a6ab10339f08fbc1505db8ea56",
"09c17e42c2e173a6ab10339f08fbc1505db8ea56"
] | [
"lab_session/calculate_pi.py",
"lab_session/integral.py"
] | [
"import random\nimport matplotlib.pyplot as plt\n\npi_vals = []\n\npi = 0\nn = 100\nm = 10**6\nfor i in range(m):\n\tfor j in range(n):\n\t\t[x, y] = [random.random(), random.random()]\n\t\tif x**2 + y**2 <= 1.0:\n\t\t\tpi += 1\n\tpi = (pi/n)*4\n\n\tpi_vals.append(pi)\n\nitern = [i for i in range(m)]\nplt.plot(itern, pi_vals, '.')\nplt.show()",
"import matplotlib.pyplot as plt\nimport random\n\n\ndef func(x):\n\tval = 4*x**3\n\treturn val\n\n\ns = 0\nfor i in range(ini, end):\n\tx = random.random()\n\ty = random.random()\n\n\tif y <= func(x):\n\t\ts += 1\n\t\tplt.plot(x, y)\n\nprint (\"val =\", s/10**5)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bmu/pandas | [
"549b72f07ffdeb6d54b2865c90d95a256e4231ad",
"549b72f07ffdeb6d54b2865c90d95a256e4231ad"
] | [
"pandas/core/panel.py",
"pandas/tseries/tests/test_tslib.py"
] | [
"\"\"\"\nContains data structures designed for manipulating panel (3-dimensional) data\n\"\"\"\n# pylint: disable=E1103,W0231,W0212,W0621\nfrom __future__ import division\nfrom pandas.compat import (map, zip, range, lrange, lmap, u, OrderedDict,\n OrderedDefaultdict)\nfrom pandas import compat\nimport sys\nimport warnings\nimport numpy as np\nfrom pandas.core.common import (PandasError, _try_sort, _default_index,\n _infer_dtype_from_scalar, notnull, is_list_like)\nfrom pandas.core.categorical import Categorical\nfrom pandas.core.index import (Index, MultiIndex, _ensure_index,\n _get_combined_index)\nfrom pandas.core.indexing import maybe_droplevels\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays,\n create_block_manager_from_blocks)\nfrom pandas.core.series import Series\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.tools.util import cartesian_product\nfrom pandas import compat\nfrom pandas.util.decorators import (deprecate, Appender, Substitution,\n deprecate_kwarg)\nimport pandas.core.common as com\nimport pandas.core.ops as ops\nimport pandas.core.nanops as nanops\nimport pandas.computation.expressions as expressions\nfrom pandas import lib\n\n_shared_doc_kwargs = dict(\n axes='items, major_axis, minor_axis',\n klass=\"Panel\",\n axes_single_arg=\"{0,1,2,'items','major_axis','minor_axis'}\")\n_shared_doc_kwargs['args_transpose'] = (\"three positional arguments: each one\"\n \"of\\n %s\" %\n _shared_doc_kwargs['axes_single_arg'])\n\n\ndef _ensure_like_indices(time, panels):\n \"\"\"\n Makes sure that time and panels are conformable\n \"\"\"\n n_time = len(time)\n n_panel = len(panels)\n u_panels = np.unique(panels) # this sorts!\n u_time = np.unique(time)\n if len(u_time) == n_time:\n time = np.tile(u_time, len(u_panels))\n if len(u_panels) == n_panel:\n panels = np.repeat(u_panels, len(u_time))\n return time, panels\n\n\ndef panel_index(time, panels, names=['time', 'panel']):\n \"\"\"\n Returns a multi-index suitable for a panel-like DataFrame\n\n Parameters\n ----------\n time : array-like\n Time index, does not have to repeat\n panels : array-like\n Panel index, does not have to repeat\n names : list, optional\n List containing the names of the indices\n\n Returns\n -------\n multi_index : MultiIndex\n Time index is the first level, the panels are the second level.\n\n Examples\n --------\n >>> years = range(1960,1963)\n >>> panels = ['A', 'B', 'C']\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),\n (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),\n (1962, 'C')], dtype=object)\n\n or\n\n >>> import numpy as np\n >>> years = np.repeat(range(1960,1963), 3)\n >>> panels = np.tile(['A', 'B', 'C'], 3)\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),\n (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),\n (1962, 'C')], dtype=object)\n \"\"\"\n time, panels = _ensure_like_indices(time, panels)\n time_factor = Categorical.from_array(time)\n panel_factor = Categorical.from_array(panels)\n\n labels = [time_factor.codes, panel_factor.codes]\n levels = [time_factor.categories, panel_factor.categories]\n return MultiIndex(levels, labels, sortorder=None, names=names,\n verify_integrity=False)\n\n\nclass Panel(NDFrame):\n\n \"\"\"\n Represents wide format panel data, stored as 3-dimensional array\n\n Parameters\n ----------\n data : ndarray (items x major x minor), or dict of DataFrames\n items : Index or array-like\n axis=0\n major_axis : Index or array-like\n axis=1\n minor_axis : Index or array-like\n axis=2\n dtype : dtype, default None\n Data type to force, otherwise infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n \"\"\"\n\n @property\n def _constructor(self):\n return type(self)\n\n _constructor_sliced = DataFrame\n\n def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,\n copy=False, dtype=None):\n self._init_data(data=data, items=items, major_axis=major_axis,\n minor_axis=minor_axis, copy=copy, dtype=dtype)\n\n def _init_data(self, data, copy, dtype, **kwargs):\n \"\"\"\n Generate ND initialization; axes are passed\n as required objects to __init__\n \"\"\"\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]\n\n if kwargs:\n raise TypeError('_init_data() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n axes = None\n if isinstance(data, BlockManager):\n if any(x is not None for x in passed_axes):\n axes = [x if x is not None else y\n for x, y in zip(passed_axes, data.axes)]\n mgr = data\n elif isinstance(data, dict):\n mgr = self._init_dict(data, passed_axes, dtype=dtype)\n copy = False\n dtype = None\n elif isinstance(data, (np.ndarray, list)):\n mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)\n copy = False\n dtype = None\n else: # pragma: no cover\n raise PandasError('Panel constructor not properly called!')\n\n NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)\n\n def _init_dict(self, data, axes, dtype=None):\n haxis = axes.pop(self._info_axis_number)\n\n # prefilter if haxis passed\n if haxis is not None:\n haxis = _ensure_index(haxis)\n data = OrderedDict((k, v) for k, v\n in compat.iteritems(data) if k in haxis)\n else:\n ks = list(data.keys())\n if not isinstance(data, OrderedDict):\n ks = _try_sort(ks)\n haxis = Index(ks)\n\n for k, v in compat.iteritems(data):\n if isinstance(v, dict):\n data[k] = self._constructor_sliced(v)\n\n # extract axis for remaining axes & create the slicemap\n raxes = [self._extract_axis(self, data, axis=i)\n if a is None else a for i, a in enumerate(axes)]\n raxes_sm = self._extract_axes_for_slice(self, raxes)\n\n # shallow copy\n arrays = []\n haxis_shape = [len(a) for a in raxes]\n for h in haxis:\n v = values = data.get(h)\n if v is None:\n values = np.empty(haxis_shape, dtype=dtype)\n values.fill(np.nan)\n elif isinstance(v, self._constructor_sliced):\n d = raxes_sm.copy()\n d['copy'] = False\n v = v.reindex(**d)\n if dtype is not None:\n v = v.astype(dtype)\n values = v.values\n arrays.append(values)\n\n return self._init_arrays(arrays, haxis, [haxis] + raxes)\n\n def _init_arrays(self, arrays, arr_names, axes):\n return create_block_manager_from_arrays(arrays, arr_names, axes)\n\n @classmethod\n def from_dict(cls, data, intersect=False, orient='items', dtype=None):\n \"\"\"\n Construct Panel from dict of DataFrame objects\n\n Parameters\n ----------\n data : dict\n {field : DataFrame}\n intersect : boolean\n Intersect indexes of input DataFrames\n orient : {'items', 'minor'}, default 'items'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the items of the result panel, pass 'items'\n (default). Otherwise if the columns of the values of the passed\n DataFrame objects should be the items (which in the case of\n mixed-dtype data you should do), instead pass 'minor'\n\n\n Returns\n -------\n Panel\n \"\"\"\n orient = orient.lower()\n if orient == 'minor':\n new_data = OrderedDefaultdict(dict)\n for col, df in compat.iteritems(data):\n for item, s in compat.iteritems(df):\n new_data[item][col] = s\n data = new_data\n elif orient != 'items': # pragma: no cover\n raise ValueError('Orientation must be one of {items, minor}.')\n\n d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)\n ks = list(d['data'].keys())\n if not isinstance(d['data'], OrderedDict):\n ks = list(sorted(ks))\n d[cls._info_axis_name] = Index(ks)\n return cls(**d)\n\n def __getitem__(self, key):\n if isinstance(self._info_axis, MultiIndex):\n return self._getitem_multilevel(key)\n if not (is_list_like(key) or isinstance(key, slice)):\n return super(Panel, self).__getitem__(key)\n return self.ix[key]\n\n def _getitem_multilevel(self, key):\n info = self._info_axis\n loc = info.get_loc(key)\n if isinstance(loc, (slice, np.ndarray)):\n new_index = info[loc]\n result_index = maybe_droplevels(new_index, key)\n slices = [loc] + [slice(None) for x in range(\n self._AXIS_LEN - 1)]\n new_values = self.values[slices]\n\n d = self._construct_axes_dict(self._AXIS_ORDERS[1:])\n d[self._info_axis_name] = result_index\n result = self._constructor(new_values, **d)\n return result\n else:\n return self._get_item_cache(key)\n\n def _init_matrix(self, data, axes, dtype=None, copy=False):\n values = self._prep_ndarray(self, data, copy=copy)\n\n if dtype is not None:\n try:\n values = values.astype(dtype)\n except Exception:\n raise ValueError('failed to cast to %s' % dtype)\n\n shape = values.shape\n fixed_axes = []\n for i, ax in enumerate(axes):\n if ax is None:\n ax = _default_index(shape[i])\n else:\n ax = _ensure_index(ax)\n fixed_axes.append(ax)\n\n return create_block_manager_from_blocks([values], fixed_axes)\n\n #----------------------------------------------------------------------\n # Comparison methods\n\n def _compare_constructor(self, other, func):\n if not self._indexed_same(other):\n raise Exception('Can only compare identically-labeled '\n 'same type objects')\n\n new_data = {}\n for col in self._info_axis:\n new_data[col] = func(self[col], other[col])\n\n d = self._construct_axes_dict(copy=False)\n return self._constructor(data=new_data, **d)\n\n #----------------------------------------------------------------------\n # Magic methods\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular Panel\n\n Invoked by unicode(df) in py2 only.\n Yields a Unicode String in both py2/py3.\n \"\"\"\n\n class_name = str(self.__class__)\n\n shape = self.shape\n dims = u('Dimensions: %s') % ' x '.join(\n [\"%d (%s)\" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])\n\n def axis_pretty(a):\n v = getattr(self, a)\n if len(v) > 0:\n return u('%s axis: %s to %s') % (a.capitalize(),\n com.pprint_thing(v[0]),\n com.pprint_thing(v[-1]))\n else:\n return u('%s axis: None') % a.capitalize()\n\n output = '\\n'.join(\n [class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])\n return output\n\n def _get_plane_axes_index(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes indexes\n \"\"\"\n axis_name = self._get_axis_name(axis)\n\n if axis_name == 'major_axis':\n index = 'minor_axis'\n columns = 'items'\n if axis_name == 'minor_axis':\n index = 'major_axis'\n columns = 'items'\n elif axis_name == 'items':\n index = 'major_axis'\n columns = 'minor_axis'\n\n return index, columns\n\n def _get_plane_axes(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes\n \"\"\"\n return [ self._get_axis(axi) for axi in self._get_plane_axes_index(axis) ]\n\n fromDict = from_dict\n\n def to_sparse(self, fill_value=None, kind='block'):\n \"\"\"\n Convert to SparsePanel\n\n Parameters\n ----------\n fill_value : float, default NaN\n kind : {'block', 'integer'}\n\n Returns\n -------\n y : SparseDataFrame\n \"\"\"\n from pandas.core.sparse import SparsePanel\n frames = dict(compat.iteritems(self))\n return SparsePanel(frames, items=self.items,\n major_axis=self.major_axis,\n minor_axis=self.minor_axis,\n default_kind=kind,\n default_fill_value=fill_value)\n\n def to_excel(self, path, na_rep='', engine=None, **kwargs):\n \"\"\"\n Write each DataFrame in Panel to a separate excel sheet\n\n Parameters\n ----------\n path : string or ExcelWriter object\n File path or existing ExcelWriter\n na_rep : string, default ''\n Missing data representation\n engine : string, default None\n write engine to use - you can also set this via the options\n ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n\n Other Parameters\n ----------------\n float_format : string, default None\n Format string for floating point numbers\n cols : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : upper left cell row to dump data frame\n startcol : upper left cell column to dump data frame\n\n Notes\n -----\n Keyword arguments (and na_rep) are passed to the ``to_excel`` method\n for each DataFrame written.\n \"\"\"\n from pandas.io.excel import ExcelWriter\n\n if isinstance(path, compat.string_types):\n writer = ExcelWriter(path, engine=engine)\n else:\n writer = path\n kwargs['na_rep'] = na_rep\n\n for item, df in compat.iteritems(self):\n name = str(item)\n df.to_excel(writer, name, **kwargs)\n writer.save()\n\n def as_matrix(self):\n self._consolidate_inplace()\n return self._data.as_matrix()\n\n #----------------------------------------------------------------------\n # Getting and setting elements\n\n def get_value(self, *args, **kwargs):\n \"\"\"\n Quickly retrieve single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n nargs = len(args)\n nreq = self._AXIS_LEN\n\n # require an arg for each axis\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis, you gave'\n ' {0} args, but {1} are required'.format(nargs,\n nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('get_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n return lower.get_value(*args[1:], takeable=takeable)\n\n def set_value(self, *args, **kwargs):\n \"\"\"\n Quickly set single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n value : scalar\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n panel : Panel\n If label combo is contained, will be reference to calling Panel,\n otherwise a new object\n \"\"\"\n # require an arg for each axis and the value\n nargs = len(args)\n nreq = self._AXIS_LEN + 1\n\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis plus the '\n 'value provided, you gave {0} args, but {1} are '\n 'required'.format(nargs, nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('set_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n try:\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n lower.set_value(*args[1:], takeable=takeable)\n return self\n except KeyError:\n axes = self._expand_axes(args)\n d = self._construct_axes_dict_from(self, axes, copy=False)\n result = self.reindex(**d)\n args = list(args)\n likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])\n made_bigger = not np.array_equal(\n axes[0], self._info_axis)\n # how to make this logic simpler?\n if made_bigger:\n com._possibly_cast_item(result, args[0], likely_dtype)\n\n return result.set_value(*args)\n\n def _box_item_values(self, key, values):\n if self.ndim == values.ndim:\n result = self._constructor(values)\n\n # a dup selection will yield a full ndim\n if result._get_axis(0).is_unique:\n result = result[key]\n\n return result\n\n d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])\n return self._constructor_sliced(values, **d)\n\n def __setitem__(self, key, value):\n shape = tuple(self.shape)\n if isinstance(value, self._constructor_sliced):\n value = value.reindex(\n **self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))\n mat = value.values\n elif isinstance(value, np.ndarray):\n if value.shape != shape[1:]:\n raise ValueError(\n 'shape of value must be {0}, shape of given object was '\n '{1}'.format(shape[1:], tuple(map(int, value.shape))))\n mat = np.asarray(value)\n elif np.isscalar(value):\n dtype, value = _infer_dtype_from_scalar(value)\n mat = np.empty(shape[1:], dtype=dtype)\n mat.fill(value)\n else:\n raise TypeError('Cannot set item of type: %s' % str(type(value)))\n\n mat = mat.reshape(tuple([1]) + shape[1:])\n NDFrame._set_item(self, key, mat)\n\n def _unpickle_panel_compat(self, state): # pragma: no cover\n \"Unpickle the panel\"\n _unpickle = com._unpickle_array\n vals, items, major, minor = state\n\n items = _unpickle(items)\n major = _unpickle(major)\n minor = _unpickle(minor)\n values = _unpickle(vals)\n wp = Panel(values, items, major, minor)\n self._data = wp._data\n\n def conform(self, frame, axis='items'):\n \"\"\"\n Conform input DataFrame to align with chosen axis pair.\n\n Parameters\n ----------\n frame : DataFrame\n axis : {'items', 'major', 'minor'}\n\n Axis the input corresponds to. E.g., if axis='major', then\n the frame's columns would be items, and the index would be\n values of the minor axis\n\n Returns\n -------\n DataFrame\n \"\"\"\n axes = self._get_plane_axes(axis)\n return frame.reindex(**self._extract_axes_for_slice(self, axes))\n\n def head(self, n=5):\n raise NotImplementedError\n\n def tail(self, n=5):\n raise NotImplementedError\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\" don't allow a multi reindex on Panel or above ndim \"\"\"\n return False\n\n def dropna(self, axis=0, how='any', inplace=False):\n \"\"\"\n Drop 2D from panel, holding passed axis constant\n\n Parameters\n ----------\n axis : int, default 0\n Axis to hold constant. E.g. axis=1 will drop major_axis entries\n having a certain amount of NA data\n how : {'all', 'any'}, default 'any'\n 'any': one or more values are NA in the DataFrame along the\n axis. For 'all' they all must be.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n dropped : Panel\n \"\"\"\n axis = self._get_axis_number(axis)\n\n values = self.values\n mask = com.notnull(values)\n\n for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):\n mask = mask.sum(ax)\n\n per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])\n\n if how == 'all':\n cond = mask > 0\n else:\n cond = mask == per_slice\n\n new_ax = self._get_axis(axis)[cond]\n result = self.reindex_axis(new_ax, axis=axis)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _combine(self, other, func, axis=0):\n if isinstance(other, Panel):\n return self._combine_panel(other, func)\n elif isinstance(other, DataFrame):\n return self._combine_frame(other, func, axis=axis)\n elif np.isscalar(other):\n return self._combine_const(other, func)\n\n def _combine_const(self, other, func):\n new_values = func(self.values, other)\n d = self._construct_axes_dict()\n return self._constructor(new_values, **d)\n\n def _combine_frame(self, other, func, axis=0):\n index, columns = self._get_plane_axes(axis)\n axis = self._get_axis_number(axis)\n\n other = other.reindex(index=index, columns=columns)\n\n if axis == 0:\n new_values = func(self.values, other.values)\n elif axis == 1:\n new_values = func(self.values.swapaxes(0, 1), other.values.T)\n new_values = new_values.swapaxes(0, 1)\n elif axis == 2:\n new_values = func(self.values.swapaxes(0, 2), other.values)\n new_values = new_values.swapaxes(0, 2)\n\n return self._constructor(new_values, self.items, self.major_axis,\n self.minor_axis)\n\n def _combine_panel(self, other, func):\n items = self.items.union(other.items)\n major = self.major_axis.union(other.major_axis)\n minor = self.minor_axis.union(other.minor_axis)\n\n # could check that everything's the same size, but forget it\n this = self.reindex(items=items, major=major, minor=minor)\n other = other.reindex(items=items, major=major, minor=minor)\n\n result_values = func(this.values, other.values)\n\n return self._constructor(result_values, items, major, minor)\n\n def major_xs(self, key, copy=None):\n \"\"\"\n Return slice of panel along major axis\n\n Parameters\n ----------\n key : object\n Major axis label\n copy : boolean [deprecated]\n Whether to make a copy of the data\n\n Returns\n -------\n y : DataFrame\n index -> minor axis, columns -> items\n\n Notes\n -----\n major_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or levels\n it is a superset of major_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n if copy is not None:\n warnings.warn(\"copy keyword is deprecated, \"\n \"default is to return a copy or a view if possible\")\n\n return self.xs(key, axis=self._AXIS_LEN - 2)\n\n def minor_xs(self, key, copy=None):\n \"\"\"\n Return slice of panel along minor axis\n\n Parameters\n ----------\n key : object\n Minor axis label\n copy : boolean [deprecated]\n Whether to make a copy of the data\n\n Returns\n -------\n y : DataFrame\n index -> major axis, columns -> items\n\n Notes\n -----\n minor_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or levels\n it is a superset of minor_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n if copy is not None:\n warnings.warn(\"copy keyword is deprecated, \"\n \"default is to return a copy or a view if possible\")\n\n return self.xs(key, axis=self._AXIS_LEN - 1)\n\n def xs(self, key, axis=1, copy=None):\n \"\"\"\n Return slice of panel along selected axis\n\n Parameters\n ----------\n key : object\n Label\n axis : {'items', 'major', 'minor}, default 1/'major'\n copy : boolean [deprecated]\n Whether to make a copy of the data\n\n Returns\n -------\n y : ndim(self)-1\n\n Notes\n -----\n xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or levels\n it is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n if copy is not None:\n warnings.warn(\"copy keyword is deprecated, \"\n \"default is to return a copy or a view if possible\")\n\n axis = self._get_axis_number(axis)\n if axis == 0:\n return self[key]\n\n self._consolidate_inplace()\n axis_number = self._get_axis_number(axis)\n new_data = self._data.xs(key, axis=axis_number, copy=False)\n result = self._construct_return_type(new_data)\n copy = new_data.is_mixed_type\n result._set_is_copy(self, copy=copy)\n return result\n\n _xs = xs\n\n def _ixs(self, i, axis=0):\n \"\"\"\n i : int, slice, or sequence of integers\n axis : int\n \"\"\"\n\n ax = self._get_axis(axis)\n key = ax[i]\n\n # xs cannot handle a non-scalar key, so just reindex here\n # if we have a multi-index and a single tuple, then its a reduction (GH 7516)\n if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):\n if is_list_like(key):\n indexer = {self._get_axis_name(axis): key}\n return self.reindex(**indexer)\n\n # a reduction\n if axis == 0:\n values = self._data.iget(i)\n return self._box_item_values(key, values)\n\n # xs by position\n self._consolidate_inplace()\n new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)\n return self._construct_return_type(new_data)\n\n def groupby(self, function, axis='major'):\n \"\"\"\n Group data on given axis, returning GroupBy object\n\n Parameters\n ----------\n function : callable\n Mapping function for chosen access\n axis : {'major', 'minor', 'items'}, default 'major'\n\n Returns\n -------\n grouped : PanelGroupBy\n \"\"\"\n from pandas.core.groupby import PanelGroupBy\n axis = self._get_axis_number(axis)\n return PanelGroupBy(self, function, axis=axis)\n\n def to_frame(self, filter_observations=True):\n \"\"\"\n Transform wide format into long (stacked) format as DataFrame whose\n columns are the Panel's items and whose index is a MultiIndex formed\n of the Panel's major and minor axes.\n\n Parameters\n ----------\n filter_observations : boolean, default True\n Drop (major, minor) pairs without a complete set of observations\n across all the items\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n _, N, K = self.shape\n\n if filter_observations:\n # shaped like the return DataFrame\n mask = com.notnull(self.values).all(axis=0)\n # size = mask.sum()\n selector = mask.ravel()\n else:\n # size = N * K\n selector = slice(None, None)\n\n data = {}\n for item in self.items:\n data[item] = self[item].values.ravel()[selector]\n\n def construct_multi_parts(idx, n_repeat, n_shuffle=1):\n axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)\n labels = [x[selector] for x in axis_idx.labels]\n levels = axis_idx.levels\n names = axis_idx.names\n return labels, levels, names\n\n def construct_index_parts(idx, major=True):\n levels = [idx]\n if major:\n labels = [np.arange(N).repeat(K)[selector]]\n names = idx.name or 'major'\n else:\n labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]\n labels = [labels.ravel()[selector]]\n names = idx.name or 'minor'\n names = [names]\n return labels, levels, names\n\n if isinstance(self.major_axis, MultiIndex):\n major_labels, major_levels, major_names = construct_multi_parts(\n self.major_axis, n_repeat=K)\n else:\n major_labels, major_levels, major_names = construct_index_parts(\n self.major_axis)\n\n if isinstance(self.minor_axis, MultiIndex):\n minor_labels, minor_levels, minor_names = construct_multi_parts(\n self.minor_axis, n_repeat=N, n_shuffle=K)\n else:\n minor_labels, minor_levels, minor_names = construct_index_parts(\n self.minor_axis, major=False)\n\n levels = major_levels + minor_levels\n labels = major_labels + minor_labels\n names = major_names + minor_names\n\n index = MultiIndex(levels=levels, labels=labels,\n names=names, verify_integrity=False)\n\n return DataFrame(data, index=index, columns=self.items)\n\n to_long = deprecate('to_long', to_frame)\n toLong = deprecate('toLong', to_frame)\n\n def apply(self, func, axis='major', **kwargs):\n \"\"\"\n Applies function along input axis of the Panel\n\n Parameters\n ----------\n func : function\n Function to apply to each combination of 'other' axes\n e.g. if axis = 'items', then the combination of major_axis/minor_axis\n will be passed a Series\n axis : {'major', 'minor', 'items'}\n Additional keyword arguments will be passed as keywords to the function\n\n Examples\n --------\n >>> p.apply(numpy.sqrt) # returns a Panel\n >>> p.apply(lambda x: x.sum(), axis=0) # equiv to p.sum(0)\n >>> p.apply(lambda x: x.sum(), axis=1) # equiv to p.sum(1)\n >>> p.apply(lambda x: x.sum(), axis=2) # equiv to p.sum(2)\n\n Returns\n -------\n result : Pandas Object\n \"\"\"\n\n if kwargs and not isinstance(func, np.ufunc):\n f = lambda x: func(x, **kwargs)\n else:\n f = func\n\n # 2d-slabs\n if isinstance(axis, (tuple,list)) and len(axis) == 2:\n return self._apply_2d(f, axis=axis)\n\n axis = self._get_axis_number(axis)\n\n # try ufunc like\n if isinstance(f, np.ufunc):\n try:\n result = np.apply_along_axis(func, axis, self.values)\n return self._wrap_result(result, axis=axis)\n except (AttributeError):\n pass\n\n # 1d\n return self._apply_1d(f, axis=axis)\n\n def _apply_1d(self, func, axis):\n\n axis_name = self._get_axis_name(axis)\n ax = self._get_axis(axis)\n ndim = self.ndim\n values = self.values\n\n # iter thru the axes\n slice_axis = self._get_axis(axis)\n slice_indexer = [0]*(ndim-1)\n indexer = np.zeros(ndim, 'O')\n indlist = list(range(ndim))\n indlist.remove(axis)\n indexer[axis] = slice(None, None)\n indexer.put(indlist, slice_indexer)\n planes = [ self._get_axis(axi) for axi in indlist ]\n shape = np.array(self.shape).take(indlist)\n\n # all the iteration points\n points = cartesian_product(planes)\n\n results = []\n for i in range(np.prod(shape)):\n\n # construct the object\n pts = tuple([ p[i] for p in points ])\n indexer.put(indlist, slice_indexer)\n\n obj = Series(values[tuple(indexer)],index=slice_axis,name=pts)\n result = func(obj)\n\n results.append(result)\n\n # increment the indexer\n slice_indexer[-1] += 1\n n = -1\n while (slice_indexer[n] >= shape[n]) and (n > (1-ndim)):\n slice_indexer[n-1] += 1\n slice_indexer[n] = 0\n n -= 1\n\n # empty object\n if not len(results):\n return self._constructor(**self._construct_axes_dict())\n\n # same ndim as current\n if isinstance(results[0],Series):\n arr = np.vstack([ r.values for r in results ])\n arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))\n tranp = np.array([axis]+indlist).argsort()\n arr = arr.transpose(tuple(list(tranp)))\n return self._constructor(arr,**self._construct_axes_dict())\n\n # ndim-1 shape\n results = np.array(results).reshape(shape)\n if results.ndim == 2 and axis_name != self._info_axis_name:\n results = results.T\n planes = planes[::-1]\n return self._construct_return_type(results,planes)\n\n def _apply_2d(self, func, axis):\n \"\"\" handle 2-d slices, equiv to iterating over the other axis \"\"\"\n\n ndim = self.ndim\n axis = [ self._get_axis_number(a) for a in axis ]\n\n # construct slabs, in 2-d this is a DataFrame result\n indexer_axis = list(range(ndim))\n for a in axis:\n indexer_axis.remove(a)\n indexer_axis = indexer_axis[0]\n\n slicer = [ slice(None,None) ] * ndim\n ax = self._get_axis(indexer_axis)\n\n results = []\n for i, e in enumerate(ax):\n\n slicer[indexer_axis] = i\n sliced = self.iloc[tuple(slicer)]\n\n obj = func(sliced)\n results.append((e,obj))\n\n return self._construct_return_type(dict(results))\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n if numeric_only:\n raise NotImplementedError(\n 'Panel.{0} does not implement numeric_only.'.format(name))\n\n axis_name = self._get_axis_name(axis)\n axis_number = self._get_axis_number(axis_name)\n f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)\n\n result = f(self.values)\n\n axes = self._get_plane_axes(axis_name)\n if result.ndim == 2 and axis_name != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n def _construct_return_type(self, result, axes=None):\n \"\"\" return the type for the ndim of the result \"\"\"\n ndim = getattr(result,'ndim',None)\n\n # need to assume they are the same\n if ndim is None:\n if isinstance(result,dict):\n ndim = getattr(list(compat.itervalues(result))[0],'ndim',None)\n\n # a saclar result\n if ndim is None:\n ndim = 0\n\n # have a dict, so top-level is +1 dim\n else:\n ndim += 1\n\n # scalar\n if ndim == 0:\n return Series(result)\n\n # same as self\n elif self.ndim == ndim:\n \"\"\" return the construction dictionary for these axes \"\"\"\n if axes is None:\n return self._constructor(result)\n return self._constructor(result, **self._construct_axes_dict())\n\n # sliced\n elif self.ndim == ndim + 1:\n if axes is None:\n return self._constructor_sliced(result)\n return self._constructor_sliced(\n result, **self._extract_axes_for_slice(self, axes))\n\n raise PandasError('invalid _construct_return_type [self->%s] '\n '[result->%s]' % (self, result))\n\n def _wrap_result(self, result, axis):\n axis = self._get_axis_name(axis)\n axes = self._get_plane_axes(axis)\n if result.ndim == 2 and axis != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n @Appender(_shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None\n else kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None\n else kwargs.pop('minor', None))\n return super(Panel, self).reindex(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None\n else kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None\n else kwargs.pop('minor', None))\n return super(Panel, self).rename(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=np.nan):\n return super(Panel, self).reindex_axis(labels=labels, axis=axis,\n method=method, level=level,\n copy=copy, limit=limit,\n fill_value=fill_value)\n\n @Appender(_shared_docs['transpose'] % _shared_doc_kwargs)\n def transpose(self, *args, **kwargs):\n return super(Panel, self).transpose(*args, **kwargs)\n\n def count(self, axis='major'):\n \"\"\"\n Return number of observations over requested axis.\n\n Parameters\n ----------\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n count : DataFrame\n \"\"\"\n i = self._get_axis_number(axis)\n\n values = self.values\n mask = np.isfinite(values)\n result = mask.sum(axis=i,dtype='int64')\n\n return self._wrap_result(result, axis)\n\n @deprecate_kwarg(old_arg_name='lags', new_arg_name='periods')\n def shift(self, periods=1, freq=None, axis='major'):\n \"\"\"\n Shift major or minor axis by specified number of leads/lags. Drops\n periods right now compared with DataFrame.shift\n\n Parameters\n ----------\n lags : int\n axis : {'major', 'minor'}\n\n Returns\n -------\n shifted : Panel\n \"\"\"\n if freq:\n return self.tshift(periods, freq, axis=axis)\n\n if axis == 'items':\n raise ValueError('Invalid axis')\n\n return super(Panel, self).slice_shift(periods, axis=axis)\n\n def tshift(self, periods=1, freq=None, axis='major', **kwds):\n return super(Panel, self).tshift(periods, freq, axis, **kwds)\n\n def join(self, other, how='left', lsuffix='', rsuffix=''):\n \"\"\"\n Join items with other Panel either on major and minor axes column\n\n Parameters\n ----------\n other : Panel or list of Panels\n Index should be similar to one of the columns in this one\n how : {'left', 'right', 'outer', 'inner'}\n How to handle indexes of the two objects. Default: 'left'\n for joining on index, None otherwise\n * left: use calling frame's index\n * right: use input frame's index\n * outer: form union of indexes\n * inner: use intersection of indexes\n lsuffix : string\n Suffix to use from left frame's overlapping columns\n rsuffix : string\n Suffix to use from right frame's overlapping columns\n\n Returns\n -------\n joined : Panel\n \"\"\"\n from pandas.tools.merge import concat\n\n if isinstance(other, Panel):\n join_major, join_minor = self._get_join_index(other, how)\n this = self.reindex(major=join_major, minor=join_minor)\n other = other.reindex(major=join_major, minor=join_minor)\n merged_data = this._data.merge(other._data, lsuffix, rsuffix)\n return self._constructor(merged_data)\n else:\n if lsuffix or rsuffix:\n raise ValueError('Suffixes not supported when passing '\n 'multiple panels')\n\n if how == 'left':\n how = 'outer'\n join_axes = [self.major_axis, self.minor_axis]\n elif how == 'right':\n raise ValueError('Right join not supported with multiple '\n 'panels')\n else:\n join_axes = None\n\n return concat([self] + list(other), axis=0, join=how,\n join_axes=join_axes, verify_integrity=True)\n\n def update(self, other, join='left', overwrite=True, filter_func=None,\n raise_conflict=False):\n \"\"\"\n Modify Panel in place using non-NA values from passed\n Panel, or object coercible to Panel. Aligns on items\n\n Parameters\n ----------\n other : Panel, or object coercible to Panel\n join : How to join individual DataFrames\n {'left', 'right', 'outer', 'inner'}, default 'left'\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling panel\n filter_func : callable(1d-array) -> 1d-array<boolean>, default None\n Can choose to replace values other than NA. Return True for values\n that should be updated\n raise_conflict : bool\n If True, will raise an error if a DataFrame and other both\n contain data in the same place.\n \"\"\"\n\n if not isinstance(other, self._constructor):\n other = self._constructor(other)\n\n axis_name = self._info_axis_name\n axis_values = self._info_axis\n other = other.reindex(**{axis_name: axis_values})\n\n for frame in axis_values:\n self[frame].update(other[frame], join, overwrite, filter_func,\n raise_conflict)\n\n def _get_join_index(self, other, how):\n if how == 'left':\n join_major, join_minor = self.major_axis, self.minor_axis\n elif how == 'right':\n join_major, join_minor = other.major_axis, other.minor_axis\n elif how == 'inner':\n join_major = self.major_axis.intersection(other.major_axis)\n join_minor = self.minor_axis.intersection(other.minor_axis)\n elif how == 'outer':\n join_major = self.major_axis.union(other.major_axis)\n join_minor = self.minor_axis.union(other.minor_axis)\n return join_major, join_minor\n\n # miscellaneous data creation\n @staticmethod\n def _extract_axes(self, data, axes, **kwargs):\n \"\"\" return a list of the axis indicies \"\"\"\n return [self._extract_axis(self, data, axis=i, **kwargs) for i, a\n in enumerate(axes)]\n\n @staticmethod\n def _extract_axes_for_slice(self, axes):\n \"\"\" return the slice dictionary for these axes \"\"\"\n return dict([(self._AXIS_SLICEMAP[i], a)\n for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN -\n len(axes):], axes)])\n\n @staticmethod\n def _prep_ndarray(self, values, copy=True):\n if not isinstance(values, np.ndarray):\n values = np.asarray(values)\n # NumPy strings are a pain, convert to object\n if issubclass(values.dtype.type, compat.string_types):\n values = np.array(values, dtype=object, copy=True)\n else:\n if copy:\n values = values.copy()\n if values.ndim != self._AXIS_LEN:\n raise ValueError(\"The number of dimensions required is {0}, \"\n \"but the number of dimensions of the \"\n \"ndarray given was {1}\".format(self._AXIS_LEN,\n values.ndim))\n return values\n\n @staticmethod\n def _homogenize_dict(self, frames, intersect=True, dtype=None):\n \"\"\"\n Conform set of _constructor_sliced-like objects to either\n an intersection of indices / columns or a union.\n\n Parameters\n ----------\n frames : dict\n intersect : boolean, default True\n\n Returns\n -------\n dict of aligned results & indicies\n \"\"\"\n\n result = dict()\n # caller differs dict/ODict, presered type\n if isinstance(frames, OrderedDict):\n result = OrderedDict()\n\n adj_frames = OrderedDict()\n for k, v in compat.iteritems(frames):\n if isinstance(v, dict):\n adj_frames[k] = self._constructor_sliced(v)\n else:\n adj_frames[k] = v\n\n axes = self._AXIS_ORDERS[1:]\n axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(\n self, adj_frames, axes, intersect=intersect))])\n\n reindex_dict = dict(\n [(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])\n reindex_dict['copy'] = False\n for key, frame in compat.iteritems(adj_frames):\n if frame is not None:\n result[key] = frame.reindex(**reindex_dict)\n else:\n result[key] = None\n\n axes_dict['data'] = result\n return axes_dict\n\n @staticmethod\n def _extract_axis(self, data, axis=0, intersect=False):\n\n index = None\n if len(data) == 0:\n index = Index([])\n elif len(data) > 0:\n raw_lengths = []\n indexes = []\n\n have_raw_arrays = False\n have_frames = False\n\n for v in data.values():\n if isinstance(v, self._constructor_sliced):\n have_frames = True\n indexes.append(v._get_axis(axis))\n elif v is not None:\n have_raw_arrays = True\n raw_lengths.append(v.shape[axis])\n\n if have_frames:\n index = _get_combined_index(indexes, intersect=intersect)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError('ndarrays must match shape on axis %d' % axis)\n\n if have_frames:\n if lengths[0] != len(index):\n raise AssertionError('Length of data and index must match')\n else:\n index = Index(np.arange(lengths[0]))\n\n if index is None:\n index = Index([])\n\n return _ensure_index(index)\n\n @classmethod\n def _add_aggregate_operations(cls, use_numexpr=True):\n \"\"\" add the operations to the cls; evaluate the doc strings again \"\"\"\n\n # doc strings substitors\n _agg_doc = \"\"\"\nWrapper method for %%s\n\nParameters\n----------\nother : %s or %s\"\"\" % (cls._constructor_sliced.__name__, cls.__name__) + \"\"\"\naxis : {\"\"\" + ', '.join(cls._AXIS_ORDERS) + \"}\" + \"\"\"\nAxis to broadcast over\n\nReturns\n-------\n\"\"\" + cls.__name__ + \"\\n\"\n\n def _panel_arith_method(op, name, str_rep=None, default_axis=None,\n fill_zeros=None, **eval_kwargs):\n def na_op(x, y):\n try:\n result = expressions.evaluate(op, str_rep, x, y,\n raise_on_error=True,\n **eval_kwargs)\n except TypeError:\n result = op(x, y)\n\n # handles discrepancy between numpy and numexpr on division/mod\n # by 0 though, given that these are generally (always?)\n # non-scalars, I'm not sure whether it's worth it at the moment\n result = com._fill_zeros(result, x, y, name, fill_zeros)\n return result\n\n @Substitution(name)\n @Appender(_agg_doc)\n def f(self, other, axis=0):\n return self._combine(other, na_op, axis=axis)\n f.__name__ = name\n return f\n\n # add `div`, `mul`, `pow`, etc..\n ops.add_flex_arithmetic_methods(\n cls, _panel_arith_method, use_numexpr=use_numexpr,\n flex_comp_method=ops._comp_method_PANEL)\n\nPanel._setup_axes(axes=['items', 'major_axis', 'minor_axis'],\n info_axis=0,\n stat_axis=1,\n aliases={'major': 'major_axis',\n 'minor': 'minor_axis'},\n slicers={'major_axis': 'index',\n 'minor_axis': 'columns'})\n\nops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)\nPanel._add_aggregate_operations()\nPanel._add_numeric_operations()\n\nWidePanel = Panel\nLongPanel = DataFrame\n",
"import nose\nfrom distutils.version import LooseVersion\nimport numpy as np\n\nfrom pandas import tslib\nimport pandas._period as period\nimport datetime\n\nfrom pandas.core.api import Timestamp, Series, Timedelta, Period\nfrom pandas.tslib import get_timezone\nfrom pandas._period import period_asfreq, period_ordinal\nfrom pandas.tseries.index import date_range\nfrom pandas.tseries.frequencies import get_freq\nimport pandas.tseries.offsets as offsets\nimport pandas.util.testing as tm\nfrom pandas.util.testing import assert_series_equal\n\nclass TestTimestamp(tm.TestCase):\n\n def test_constructor(self):\n base_str = '2014-07-01 09:00'\n base_dt = datetime.datetime(2014, 7, 1, 9)\n base_expected = 1404205200000000000\n\n # confirm base representation is correct\n import calendar\n self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000, base_expected)\n\n tests = [(base_str, base_dt, base_expected),\n ('2014-07-01 10:00', datetime.datetime(2014, 7, 1, 10),\n base_expected + 3600 * 1000000000),\n ('2014-07-01 09:00:00.000008000',\n datetime.datetime(2014, 7, 1, 9, 0, 0, 8), base_expected + 8000),\n ('2014-07-01 09:00:00.000000005',\n Timestamp('2014-07-01 09:00:00.000000005'), base_expected + 5)]\n\n tm._skip_if_no_pytz()\n tm._skip_if_no_dateutil()\n import pytz\n import dateutil\n timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0),\n ('Asia/Tokyo', 9), ('US/Eastern', -4), ('dateutil/US/Pacific', -7),\n (pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)]\n\n for date_str, date, expected in tests:\n for result in [Timestamp(date_str), Timestamp(date)]:\n # only with timestring\n self.assertEqual(result.value, expected)\n self.assertEqual(tslib.pydt_to_i8(result), expected)\n\n # re-creation shouldn't affect to internal value\n result = Timestamp(result)\n self.assertEqual(result.value, expected)\n self.assertEqual(tslib.pydt_to_i8(result), expected)\n\n # with timezone\n for tz, offset in timezones:\n for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:\n expected_tz = expected - offset * 3600 * 1000000000\n self.assertEqual(result.value, expected_tz)\n self.assertEqual(tslib.pydt_to_i8(result), expected_tz)\n\n # should preserve tz\n result = Timestamp(result)\n self.assertEqual(result.value, expected_tz)\n self.assertEqual(tslib.pydt_to_i8(result), expected_tz)\n\n # should convert to UTC\n result = Timestamp(result, tz='UTC')\n expected_utc = expected - offset * 3600 * 1000000000\n self.assertEqual(result.value, expected_utc)\n self.assertEqual(tslib.pydt_to_i8(result), expected_utc)\n\n def test_constructor_with_stringoffset(self):\n # GH 7833\n base_str = '2014-07-01 11:00:00+02:00'\n base_dt = datetime.datetime(2014, 7, 1, 9)\n base_expected = 1404205200000000000\n\n # confirm base representation is correct\n import calendar\n self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000, base_expected)\n\n tests = [(base_str, base_expected),\n ('2014-07-01 12:00:00+02:00', base_expected + 3600 * 1000000000),\n ('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),\n ('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]\n\n tm._skip_if_no_pytz()\n tm._skip_if_no_dateutil()\n import pytz\n import dateutil\n timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0),\n ('Asia/Tokyo', 9), ('US/Eastern', -4),\n ('dateutil/US/Pacific', -7),\n (pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)]\n\n for date_str, expected in tests:\n for result in [Timestamp(date_str)]:\n # only with timestring\n self.assertEqual(result.value, expected)\n self.assertEqual(tslib.pydt_to_i8(result), expected)\n\n # re-creation shouldn't affect to internal value\n result = Timestamp(result)\n self.assertEqual(result.value, expected)\n self.assertEqual(tslib.pydt_to_i8(result), expected)\n\n # with timezone\n for tz, offset in timezones:\n result = Timestamp(date_str, tz=tz)\n expected_tz = expected\n self.assertEqual(result.value, expected_tz)\n self.assertEqual(tslib.pydt_to_i8(result), expected_tz)\n\n # should preserve tz\n result = Timestamp(result)\n self.assertEqual(result.value, expected_tz)\n self.assertEqual(tslib.pydt_to_i8(result), expected_tz)\n\n # should convert to UTC\n result = Timestamp(result, tz='UTC')\n expected_utc = expected\n self.assertEqual(result.value, expected_utc)\n self.assertEqual(tslib.pydt_to_i8(result), expected_utc)\n\n # This should be 2013-11-01 05:00 in UTC -> converted to Chicago tz\n result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')\n self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)\n expected_repr = \"Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')\"\n self.assertEqual(repr(result), expected_repr)\n self.assertEqual(result, eval(repr(result)))\n\n # This should be 2013-11-01 05:00 in UTC -> converted to Tokyo tz (+09:00)\n result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')\n self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)\n expected_repr = \"Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')\"\n self.assertEqual(repr(result), expected_repr)\n self.assertEqual(result, eval(repr(result)))\n\n def test_constructor_invalid(self):\n with tm.assertRaisesRegexp(TypeError, 'Cannot convert input'):\n Timestamp(slice(2))\n with tm.assertRaisesRegexp(ValueError, 'Cannot convert Period'):\n Timestamp(Period('1000-01-01'))\n\n def test_conversion(self):\n # GH 9255\n ts = Timestamp('2000-01-01')\n\n result = ts.to_pydatetime()\n expected = datetime.datetime(2000, 1, 1)\n self.assertEqual(result, expected)\n self.assertEqual(type(result), type(expected))\n\n result = ts.to_datetime64()\n expected = np.datetime64(ts.value, 'ns')\n self.assertEqual(result, expected)\n self.assertEqual(type(result), type(expected))\n self.assertEqual(result.dtype, expected.dtype)\n\n def test_repr(self):\n tm._skip_if_no_pytz()\n tm._skip_if_no_dateutil()\n\n dates = ['2014-03-07', '2014-01-01 09:00', '2014-01-01 00:00:00.000000001']\n\n # dateutil zone change (only matters for repr)\n import dateutil\n if dateutil.__version__ >= LooseVersion('2.3') and dateutil.__version__ <= LooseVersion('2.4'):\n timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']\n else:\n timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/America/Los_Angeles']\n\n freqs = ['D', 'M', 'S', 'N']\n\n for date in dates:\n for tz in timezones:\n for freq in freqs:\n\n # avoid to match with timezone name\n freq_repr = \"'{0}'\".format(freq)\n if tz.startswith('dateutil'):\n tz_repr = tz.replace('dateutil', '')\n else:\n tz_repr = tz\n\n date_only = Timestamp(date)\n self.assertIn(date, repr(date_only))\n self.assertNotIn(tz_repr, repr(date_only))\n self.assertNotIn(freq_repr, repr(date_only))\n self.assertEqual(date_only, eval(repr(date_only)))\n\n date_tz = Timestamp(date, tz=tz)\n self.assertIn(date, repr(date_tz))\n self.assertIn(tz_repr, repr(date_tz))\n self.assertNotIn(freq_repr, repr(date_tz))\n self.assertEqual(date_tz, eval(repr(date_tz)))\n\n date_freq = Timestamp(date, offset=freq)\n self.assertIn(date, repr(date_freq))\n self.assertNotIn(tz_repr, repr(date_freq))\n self.assertIn(freq_repr, repr(date_freq))\n self.assertEqual(date_freq, eval(repr(date_freq)))\n\n date_tz_freq = Timestamp(date, tz=tz, offset=freq)\n self.assertIn(date, repr(date_tz_freq))\n self.assertIn(tz_repr, repr(date_tz_freq))\n self.assertIn(freq_repr, repr(date_tz_freq))\n self.assertEqual(date_tz_freq, eval(repr(date_tz_freq)))\n\n # this can cause the tz field to be populated, but it's redundant to information in the datestring\n tm._skip_if_no_pytz()\n import pytz\n date_with_utc_offset = Timestamp('2014-03-13 00:00:00-0400', tz=None)\n self.assertIn('2014-03-13 00:00:00-0400', repr(date_with_utc_offset))\n self.assertNotIn('tzoffset', repr(date_with_utc_offset))\n self.assertIn('pytz.FixedOffset(-240)', repr(date_with_utc_offset))\n expr = repr(date_with_utc_offset).replace(\"'pytz.FixedOffset(-240)'\",\n 'pytz.FixedOffset(-240)')\n self.assertEqual(date_with_utc_offset, eval(expr))\n\n def test_bounds_with_different_units(self):\n out_of_bounds_dates = (\n '1677-09-21',\n '2262-04-12',\n )\n\n time_units = ('D', 'h', 'm', 's', 'ms', 'us')\n\n for date_string in out_of_bounds_dates:\n for unit in time_units:\n self.assertRaises(\n ValueError,\n Timestamp,\n np.datetime64(date_string, dtype='M8[%s]' % unit)\n )\n\n in_bounds_dates = (\n '1677-09-23',\n '2262-04-11',\n )\n\n for date_string in in_bounds_dates:\n for unit in time_units:\n Timestamp(\n np.datetime64(date_string, dtype='M8[%s]' % unit)\n )\n\n def test_tz(self):\n t = '2014-02-01 09:00'\n ts = Timestamp(t)\n local = ts.tz_localize('Asia/Tokyo')\n self.assertEqual(local.hour, 9)\n self.assertEqual(local, Timestamp(t, tz='Asia/Tokyo'))\n conv = local.tz_convert('US/Eastern')\n self.assertEqual(conv,\n Timestamp('2014-01-31 19:00', tz='US/Eastern'))\n self.assertEqual(conv.hour, 19)\n\n # preserves nanosecond\n ts = Timestamp(t) + offsets.Nano(5)\n local = ts.tz_localize('Asia/Tokyo')\n self.assertEqual(local.hour, 9)\n self.assertEqual(local.nanosecond, 5)\n conv = local.tz_convert('US/Eastern')\n self.assertEqual(conv.nanosecond, 5)\n self.assertEqual(conv.hour, 19)\n\n def test_tz_localize_ambiguous(self):\n\n ts = Timestamp('2014-11-02 01:00')\n ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)\n ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)\n\n rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern')\n self.assertEqual(rng[1], ts_dst)\n self.assertEqual(rng[2], ts_no_dst)\n self.assertRaises(ValueError, ts.tz_localize, 'US/Eastern', ambiguous='infer')\n\n # GH 8025\n with tm.assertRaisesRegexp(TypeError, 'Cannot localize tz-aware Timestamp, use '\n 'tz_convert for conversions'):\n Timestamp('2011-01-01' ,tz='US/Eastern').tz_localize('Asia/Tokyo')\n\n with tm.assertRaisesRegexp(TypeError, 'Cannot convert tz-naive Timestamp, use '\n 'tz_localize to localize'):\n Timestamp('2011-01-01').tz_convert('Asia/Tokyo')\n\n def test_tz_localize_roundtrip(self):\n for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:\n for t in ['2014-02-01 09:00', '2014-07-08 09:00', '2014-11-01 17:00',\n '2014-11-05 00:00']:\n ts = Timestamp(t)\n localized = ts.tz_localize(tz)\n self.assertEqual(localized, Timestamp(t, tz=tz))\n\n with tm.assertRaises(TypeError):\n localized.tz_localize(tz)\n\n reset = localized.tz_localize(None)\n self.assertEqual(reset, ts)\n self.assertTrue(reset.tzinfo is None)\n\n def test_tz_convert_roundtrip(self):\n for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:\n for t in ['2014-02-01 09:00', '2014-07-08 09:00', '2014-11-01 17:00',\n '2014-11-05 00:00']:\n ts = Timestamp(t, tz='UTC')\n converted = ts.tz_convert(tz)\n\n reset = converted.tz_convert(None)\n self.assertEqual(reset, Timestamp(t))\n self.assertTrue(reset.tzinfo is None)\n self.assertEqual(reset, converted.tz_convert('UTC').tz_localize(None))\n\n def test_barely_oob_dts(self):\n one_us = np.timedelta64(1).astype('timedelta64[us]')\n\n # By definition we can't go out of bounds in [ns], so we\n # convert the datetime64s to [us] so we can go out of bounds\n min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')\n max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')\n\n # No error for the min/max datetimes\n Timestamp(min_ts_us)\n Timestamp(max_ts_us)\n\n # One us less than the minimum is an error\n self.assertRaises(ValueError, Timestamp, min_ts_us - one_us)\n\n # One us more than the maximum is an error\n self.assertRaises(ValueError, Timestamp, max_ts_us + one_us)\n\n def test_utc_z_designator(self):\n self.assertEqual(get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo), 'UTC')\n\n def test_now(self):\n # #9000\n ts_from_string = Timestamp('now')\n ts_from_method = Timestamp.now()\n ts_datetime = datetime.datetime.now()\n\n ts_from_string_tz = Timestamp('now', tz='US/Eastern')\n ts_from_method_tz = Timestamp.now(tz='US/Eastern')\n\n # Check that the delta between the times is less than 1s (arbitrarily small)\n delta = Timedelta(seconds=1)\n self.assertTrue(abs(ts_from_method - ts_from_string) < delta)\n self.assertTrue(abs(ts_datetime - ts_from_method) < delta)\n self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)\n self.assertTrue(abs(ts_from_string_tz.tz_localize(None)\n - ts_from_method_tz.tz_localize(None)) < delta)\n\n def test_today(self):\n\n ts_from_string = Timestamp('today')\n ts_from_method = Timestamp.today()\n ts_datetime = datetime.datetime.today()\n\n ts_from_string_tz = Timestamp('today', tz='US/Eastern')\n ts_from_method_tz = Timestamp.today(tz='US/Eastern')\n\n # Check that the delta between the times is less than 1s (arbitrarily small)\n delta = Timedelta(seconds=1)\n self.assertTrue(abs(ts_from_method - ts_from_string) < delta)\n self.assertTrue(abs(ts_datetime - ts_from_method) < delta)\n self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)\n self.assertTrue(abs(ts_from_string_tz.tz_localize(None)\n - ts_from_method_tz.tz_localize(None)) < delta)\n\nclass TestDatetimeParsingWrappers(tm.TestCase):\n def test_does_not_convert_mixed_integer(self):\n bad_date_strings = (\n '-50000',\n '999',\n '123.1234',\n 'm',\n 'T'\n )\n\n for bad_date_string in bad_date_strings:\n self.assertFalse(\n tslib._does_string_look_like_datetime(bad_date_string)\n )\n\n good_date_strings = (\n '2012-01-01',\n '01/01/2012',\n 'Mon Sep 16, 2013',\n '01012012',\n '0101',\n '1-1',\n )\n\n for good_date_string in good_date_strings:\n self.assertTrue(\n tslib._does_string_look_like_datetime(good_date_string)\n )\n\n\nclass TestArrayToDatetime(tm.TestCase):\n def test_parsing_valid_dates(self):\n arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)\n self.assert_numpy_array_equal(\n tslib.array_to_datetime(arr),\n np.array(\n [\n '2013-01-01T00:00:00.000000000-0000',\n '2013-01-02T00:00:00.000000000-0000'\n ],\n dtype='M8[ns]'\n )\n )\n\n arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object)\n self.assert_numpy_array_equal(\n tslib.array_to_datetime(arr),\n np.array(\n [\n '2013-09-16T00:00:00.000000000-0000',\n '2013-09-17T00:00:00.000000000-0000'\n ],\n dtype='M8[ns]'\n )\n )\n\n def test_number_looking_strings_not_into_datetime(self):\n # #4601\n # These strings don't look like datetimes so they shouldn't be\n # attempted to be converted\n arr = np.array(['-352.737091', '183.575577'], dtype=object)\n self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr)\n\n arr = np.array(['1', '2', '3', '4', '5'], dtype=object)\n self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr)\n\n def test_coercing_dates_outside_of_datetime64_ns_bounds(self):\n invalid_dates = [\n datetime.date(1000, 1, 1),\n datetime.datetime(1000, 1, 1),\n '1000-01-01',\n 'Jan 1, 1000',\n np.datetime64('1000-01-01'),\n ]\n\n for invalid_date in invalid_dates:\n self.assertRaises(\n ValueError,\n tslib.array_to_datetime,\n np.array([invalid_date], dtype='object'),\n coerce=False,\n raise_=True,\n )\n self.assertTrue(\n np.array_equal(\n tslib.array_to_datetime(\n np.array([invalid_date], dtype='object'), coerce=True\n ),\n np.array([tslib.iNaT], dtype='M8[ns]')\n )\n )\n\n arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)\n self.assert_numpy_array_equal(\n tslib.array_to_datetime(arr, coerce=True),\n np.array(\n [\n tslib.iNaT,\n '2000-01-01T00:00:00.000000000-0000'\n ],\n dtype='M8[ns]'\n )\n )\n\n def test_coerce_of_invalid_datetimes(self):\n arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object)\n\n # Without coercing, the presence of any invalid dates prevents\n # any values from being converted\n self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr)\n\n # With coercing, the invalid dates becomes iNaT\n self.assert_numpy_array_equal(\n tslib.array_to_datetime(arr, coerce=True),\n np.array(\n [\n '2013-01-01T00:00:00.000000000-0000',\n tslib.iNaT,\n tslib.iNaT\n ],\n dtype='M8[ns]'\n )\n )\n\n def test_parsing_timezone_offsets(self):\n # All of these datetime strings with offsets are equivalent\n # to the same datetime after the timezone offset is added\n dt_strings = [\n '01-01-2013 08:00:00+08:00',\n '2013-01-01T08:00:00.000000000+0800',\n '2012-12-31T16:00:00.000000000-0800',\n '12-31-2012 23:00:00-01:00',\n ]\n\n expected_output = tslib.array_to_datetime(\n np.array(['01-01-2013 00:00:00'], dtype=object)\n )\n\n for dt_string in dt_strings:\n self.assert_numpy_array_equal(\n tslib.array_to_datetime(\n np.array([dt_string], dtype=object)\n ),\n expected_output\n )\n\nclass TestTimestampNsOperations(tm.TestCase):\n def setUp(self):\n self.timestamp = Timestamp(datetime.datetime.utcnow())\n\n def assert_ns_timedelta(self, modified_timestamp, expected_value):\n value = self.timestamp.value\n modified_value = modified_timestamp.value\n\n self.assertEqual(modified_value - value, expected_value)\n\n def test_timedelta_ns_arithmetic(self):\n self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'), -123)\n\n def test_timedelta_ns_based_arithmetic(self):\n self.assert_ns_timedelta(self.timestamp + np.timedelta64(1234567898, 'ns'), 1234567898)\n\n def test_timedelta_us_arithmetic(self):\n self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'), -123000)\n\n def test_timedelta_ms_arithmetic(self):\n time = self.timestamp + np.timedelta64(-123, 'ms')\n self.assert_ns_timedelta(time, -123000000)\n\n def test_nanosecond_string_parsing(self):\n ts = Timestamp('2013-05-01 07:15:45.123456789')\n # GH 7878\n expected_repr = '2013-05-01 07:15:45.123456789'\n expected_value = 1367392545123456789\n self.assertEqual(ts.value, expected_value)\n self.assertIn(expected_repr, repr(ts))\n\n ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')\n self.assertEqual(ts.value, expected_value - 9 * 3600 * 1000000000)\n self.assertIn(expected_repr, repr(ts))\n\n ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')\n self.assertEqual(ts.value, expected_value)\n self.assertIn(expected_repr, repr(ts))\n\n ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')\n self.assertEqual(ts.value, expected_value + 4 * 3600 * 1000000000)\n self.assertIn(expected_repr, repr(ts))\n\n def test_nanosecond_timestamp(self):\n # GH 7610\n expected = 1293840000000000005\n t = Timestamp('2011-01-01') + offsets.Nano(5)\n self.assertEqual(repr(t), \"Timestamp('2011-01-01 00:00:00.000000005')\")\n self.assertEqual(t.value, expected)\n self.assertEqual(t.nanosecond, 5)\n\n t = Timestamp(t)\n self.assertEqual(repr(t), \"Timestamp('2011-01-01 00:00:00.000000005')\")\n self.assertEqual(t.value, expected)\n self.assertEqual(t.nanosecond, 5)\n\n t = Timestamp(np.datetime64('2011-01-01 00:00:00.000000005Z'))\n self.assertEqual(repr(t), \"Timestamp('2011-01-01 00:00:00.000000005')\")\n self.assertEqual(t.value, expected)\n self.assertEqual(t.nanosecond, 5)\n\n expected = 1293840000000000010\n t = t + offsets.Nano(5)\n self.assertEqual(repr(t), \"Timestamp('2011-01-01 00:00:00.000000010')\")\n self.assertEqual(t.value, expected)\n self.assertEqual(t.nanosecond, 10)\n\n t = Timestamp(t)\n self.assertEqual(repr(t), \"Timestamp('2011-01-01 00:00:00.000000010')\")\n self.assertEqual(t.value, expected)\n self.assertEqual(t.nanosecond, 10)\n\n t = Timestamp(np.datetime64('2011-01-01 00:00:00.000000010Z'))\n self.assertEqual(repr(t), \"Timestamp('2011-01-01 00:00:00.000000010')\")\n self.assertEqual(t.value, expected)\n self.assertEqual(t.nanosecond, 10)\n\n def test_nat_arithmetic(self):\n # GH 6873\n nat = tslib.NaT\n t = Timestamp('2014-01-01')\n dt = datetime.datetime(2014, 1, 1)\n delta = datetime.timedelta(3600)\n\n # Timestamp / datetime\n for (left, right) in [(nat, nat), (nat, t), (dt, nat)]:\n # NaT + Timestamp-like should raise TypeError\n with tm.assertRaises(TypeError):\n left + right\n with tm.assertRaises(TypeError):\n right + left\n\n # NaT - Timestamp-like (or inverse) returns NaT\n self.assertTrue((left - right) is tslib.NaT)\n self.assertTrue((right - left) is tslib.NaT)\n\n # timedelta-like\n # offsets are tested in test_offsets.py\n for (left, right) in [(nat, delta)]:\n # NaT + timedelta-like returns NaT\n self.assertTrue((left + right) is tslib.NaT)\n # timedelta-like + NaT should raise TypeError\n with tm.assertRaises(TypeError):\n right + left\n\n self.assertTrue((left - right) is tslib.NaT)\n with tm.assertRaises(TypeError):\n right - left\n\n\nclass TestTslib(tm.TestCase):\n\n def test_intraday_conversion_factors(self):\n self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('H'), False), 24)\n self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('T'), False), 1440)\n self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('S'), False), 86400)\n self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('L'), False), 86400000)\n self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('U'), False), 86400000000)\n self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('N'), False), 86400000000000)\n\n self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('T'), False), 60)\n self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('S'), False), 3600)\n self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('L'), False), 3600000)\n self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('U'), False), 3600000000)\n self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('N'), False), 3600000000000)\n\n self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('S'), False), 60)\n self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('L'), False), 60000)\n self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('U'), False), 60000000)\n self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('N'), False), 60000000000)\n\n self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('L'), False), 1000)\n self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('U'), False), 1000000)\n self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('N'), False), 1000000000)\n\n self.assertEqual(period_asfreq(1, get_freq('L'), get_freq('U'), False), 1000)\n self.assertEqual(period_asfreq(1, get_freq('L'), get_freq('N'), False), 1000000)\n\n self.assertEqual(period_asfreq(1, get_freq('U'), get_freq('N'), False), 1000)\n\n def test_period_ordinal_start_values(self):\n # information for 1.1.1970\n self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('Y')))\n self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('M')))\n self.assertEqual(1, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('W')))\n self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('D')))\n self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('B')))\n\n def test_period_ordinal_week(self):\n self.assertEqual(1, period_ordinal(1970, 1, 4, 0, 0, 0, 0, 0, get_freq('W')))\n self.assertEqual(2, period_ordinal(1970, 1, 5, 0, 0, 0, 0, 0, get_freq('W')))\n\n self.assertEqual(2284, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0, get_freq('W')))\n self.assertEqual(2285, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0, get_freq('W')))\n\n def test_period_ordinal_business_day(self):\n # Thursday\n self.assertEqual(11415, period_ordinal(2013, 10, 3, 0, 0, 0, 0, 0, get_freq('B')))\n # Friday\n self.assertEqual(11416, period_ordinal(2013, 10, 4, 0, 0, 0, 0, 0, get_freq('B')))\n # Saturday\n self.assertEqual(11417, period_ordinal(2013, 10, 5, 0, 0, 0, 0, 0, get_freq('B')))\n # Sunday\n self.assertEqual(11417, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0, get_freq('B')))\n # Monday\n self.assertEqual(11417, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0, get_freq('B')))\n # Tuesday\n self.assertEqual(11418, period_ordinal(2013, 10, 8, 0, 0, 0, 0, 0, get_freq('B')))\n\n def test_tslib_tz_convert(self):\n def compare_utc_to_local(tz_didx, utc_didx):\n f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz)\n result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)\n result_single = np.vectorize(f)(tz_didx.asi8)\n self.assert_numpy_array_equal(result, result_single)\n\n def compare_local_to_utc(tz_didx, utc_didx):\n f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC')\n result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')\n result_single = np.vectorize(f)(utc_didx.asi8)\n self.assert_numpy_array_equal(result, result_single)\n\n for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']:\n # US: 2014-03-09 - 2014-11-11\n # MOSCOW: 2014-10-26 / 2014-12-31\n tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz)\n utc_didx = date_range('2014-03-01', '2015-01-10', freq='H')\n compare_utc_to_local(tz_didx, utc_didx)\n # local tz to UTC can be differ in hourly (or higher) freqs because of DST\n compare_local_to_utc(tz_didx, utc_didx)\n\n tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz)\n utc_didx = date_range('2000-01-01', '2020-01-01', freq='D')\n compare_utc_to_local(tz_didx, utc_didx)\n compare_local_to_utc(tz_didx, utc_didx)\n\n tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz)\n utc_didx = date_range('2000-01-01', '2100-01-01', freq='A')\n compare_utc_to_local(tz_didx, utc_didx)\n compare_local_to_utc(tz_didx, utc_didx)\n\n # Check empty array\n result = tslib.tz_convert(np.array([], dtype=np.int64),\n tslib.maybe_get_tz('US/Eastern'),\n tslib.maybe_get_tz('Asia/Tokyo'))\n self.assert_numpy_array_equal(result, np.array([], dtype=np.int64))\n\nclass TestTimestampOps(tm.TestCase):\n def test_timestamp_and_datetime(self):\n self.assertEqual((Timestamp(datetime.datetime(2013, 10, 13)) - datetime.datetime(2013, 10, 12)).days, 1)\n self.assertEqual((datetime.datetime(2013, 10, 12) - Timestamp(datetime.datetime(2013, 10, 13))).days, -1)\n\n def test_timestamp_and_series(self):\n timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', tz='US/Eastern'))\n first_timestamp = timestamp_series[0]\n\n delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')])\n assert_series_equal(timestamp_series - first_timestamp, delta_series)\n assert_series_equal(first_timestamp - timestamp_series, -delta_series)\n\n def test_addition_subtraction_types(self):\n # Assert on the types resulting from Timestamp +/- various date/time objects\n datetime_instance = datetime.datetime(2014, 3, 4)\n timedelta_instance = datetime.timedelta(seconds=1)\n # build a timestamp with a frequency, since then it supports addition/subtraction of integers\n timestamp_instance = date_range(datetime_instance, periods=1, freq='D')[0]\n\n self.assertEqual(type(timestamp_instance + 1), Timestamp)\n self.assertEqual(type(timestamp_instance - 1), Timestamp)\n\n # Timestamp + datetime not supported, though subtraction is supported and yields timedelta\n # more tests in tseries/base/tests/test_base.py\n self.assertEqual(type(timestamp_instance - datetime_instance), Timedelta)\n self.assertEqual(type(timestamp_instance + timedelta_instance), Timestamp)\n self.assertEqual(type(timestamp_instance - timedelta_instance), Timestamp)\n\n # Timestamp +/- datetime64 not supported, so not tested (could possibly assert error raised?)\n timedelta64_instance = np.timedelta64(1, 'D')\n self.assertEqual(type(timestamp_instance + timedelta64_instance), Timestamp)\n self.assertEqual(type(timestamp_instance - timedelta64_instance), Timestamp)\n\n def test_addition_subtraction_preserve_frequency(self):\n timestamp_instance = date_range('2014-03-05', periods=1, freq='D')[0]\n timedelta_instance = datetime.timedelta(days=1)\n original_freq = timestamp_instance.freq\n self.assertEqual((timestamp_instance + 1).freq, original_freq)\n self.assertEqual((timestamp_instance - 1).freq, original_freq)\n self.assertEqual((timestamp_instance + timedelta_instance).freq, original_freq)\n self.assertEqual((timestamp_instance - timedelta_instance).freq, original_freq)\n\n timedelta64_instance = np.timedelta64(1, 'D')\n self.assertEqual((timestamp_instance + timedelta64_instance).freq, original_freq)\n self.assertEqual((timestamp_instance - timedelta64_instance).freq, original_freq)\n\n def test_resolution(self):\n\n for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],\n [period.D_RESO, period.D_RESO, period.D_RESO, period.D_RESO,\n period.H_RESO, period.T_RESO, period.S_RESO, period.MS_RESO, period.US_RESO]):\n for tz in [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Eastern']:\n idx = date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)\n result = period.resolution(idx.asi8, idx.tz)\n self.assertEqual(result, expected)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] | [
[
"pandas.core.generic.NDFrame.__init__",
"pandas.core.common.is_list_like",
"pandas.util.decorators.deprecate_kwarg",
"pandas.core.categorical.Categorical.from_array",
"numpy.asarray",
"pandas.compat.range",
"pandas.compat.map",
"pandas.core.internals.create_block_manager_from_blocks",
"pandas.core.generic.NDFrame._set_item",
"pandas.compat.iteritems",
"pandas.core.frame.DataFrame",
"pandas.core.common._default_index",
"pandas.core.groupby.PanelGroupBy",
"pandas.core.common.notnull",
"pandas.compat.OrderedDefaultdict",
"pandas.compat.OrderedDict",
"pandas.core.series.Series",
"numpy.unique",
"pandas.core.ops.add_special_arithmetic_methods",
"numpy.arange",
"pandas.core.common._infer_dtype_from_scalar",
"pandas.core.index._ensure_index",
"pandas.util.decorators.deprecate",
"numpy.apply_along_axis",
"numpy.zeros",
"pandas.core.common._possibly_cast_item",
"pandas.util.decorators.Appender",
"pandas.core.index.MultiIndex",
"pandas.core.common.pprint_thing",
"pandas.core.index._get_combined_index",
"pandas.core.common._try_sort",
"pandas.core.common.PandasError",
"pandas.compat.u",
"pandas.core.common._fill_zeros",
"pandas.tools.util.cartesian_product",
"pandas.compat.itervalues",
"numpy.array",
"pandas.computation.expressions.evaluate",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.io.excel.ExcelWriter",
"pandas.util.decorators.Substitution",
"numpy.isfinite",
"pandas.core.internals.create_block_manager_from_arrays",
"numpy.array_equal",
"pandas.core.sparse.SparsePanel",
"numpy.empty",
"pandas.compat.zip",
"numpy.prod",
"numpy.isscalar",
"pandas.core.indexing.maybe_droplevels",
"numpy.vstack",
"pandas.core.index.Index"
],
[
"pandas.core.api.Timedelta",
"pandas.tslib.array_to_datetime",
"pandas.util.testing._skip_if_no_pytz",
"pandas.tseries.frequencies.get_freq",
"pandas.core.api.Timestamp.now",
"pandas.tslib.tz_convert",
"pandas.tslib._does_string_look_like_datetime",
"pandas.tslib.maybe_get_tz",
"pandas.util.testing.assert_series_equal",
"pandas.tslib.tz_convert_single",
"pandas.tslib.pydt_to_i8",
"numpy.timedelta64",
"pandas.core.api.Timestamp.today",
"pandas.core.api.Timestamp",
"numpy.array",
"pandas.util.testing._skip_if_no_dateutil",
"pandas.util.testing.assertRaisesRegexp",
"pandas.tseries.index.date_range",
"numpy.datetime64",
"pandas.tseries.offsets.Nano",
"pandas.util.testing.assertRaises",
"numpy.vectorize",
"pandas._period.resolution",
"pandas.core.api.Period"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DengSonic/PyFR | [
"dde524ed56f4a4feca376b51db4b21eb6fa4b113"
] | [
"pyfr/backends/openmp/base.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom pyfr.backends.base import BaseBackend\n\n\nclass OpenMPBackend(BaseBackend):\n name = 'openmp'\n\n def __init__(self, cfg):\n super().__init__(cfg)\n\n # Take the default alignment requirement to be 32-bytes\n self.alignb = cfg.getint('backend-openmp', 'alignb', 32)\n\n if self.alignb < 32 or (self.alignb & (self.alignb - 1)):\n raise ValueError('Alignment must be a power of 2 and >= 32')\n\n # Compute the SoA size\n self.soasz = self.alignb // np.dtype(self.fpdtype).itemsize\n\n from pyfr.backends.openmp import (blasext, cblas, gimmik, packing,\n provider, types, xsmm)\n\n # Register our data types\n self.base_matrix_cls = types.OpenMPMatrixBase\n self.const_matrix_cls = types.OpenMPConstMatrix\n self.matrix_cls = types.OpenMPMatrix\n self.matrix_bank_cls = types.OpenMPMatrixBank\n self.matrix_slice_cls = types.OpenMPMatrixSlice\n self.queue_cls = types.OpenMPQueue\n self.view_cls = types.OpenMPView\n self.xchg_matrix_cls = types.OpenMPXchgMatrix\n self.xchg_view_cls = types.OpenMPXchgView\n\n # Instantiate mandatory kernel provider classes\n kprovcls = [provider.OpenMPPointwiseKernelProvider,\n blasext.OpenMPBlasExtKernels,\n packing.OpenMPPackingKernels,\n gimmik.OpenMPGiMMiKKernels]\n self._providers = [k(self) for k in kprovcls]\n\n # Instantiate optional kernel provider classes\n for k in [xsmm.OpenMPXSMMKernels, cblas.OpenMPCBLASKernels]:\n try:\n self._providers.append(k(self))\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n pass\n\n # Pointwise kernels\n self.pointwise = self._providers[0]\n\n def _malloc_impl(self, nbytes):\n data = np.zeros(nbytes + self.alignb, dtype=np.uint8)\n offset = -data.ctypes.data % self.alignb\n\n return data[offset:nbytes + offset]\n"
] | [
[
"numpy.zeros",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lartpang/ZoomNet | [
"1f329e80db5469eaf6a513ec384cd19bafdaece2"
] | [
"utils/pipeline/dataloader.py"
] | [
"# -*- coding: utf-8 -*-\n# @Time : 2021/5/29\n# @Author : Lart Pang\n# @GitHub : https://github.com/lartpang\n\nfrom functools import partial\n\nfrom torch.utils import data\n\nfrom utils import builder, misc\n\n\ndef get_tr_loader(cfg, shuffle=True, drop_last=True, pin_memory=True):\n dataset = builder.build_obj_from_registry(\n registry_name=\"DATASETS\",\n obj_name=cfg.datasets.train.dataset_type,\n obj_cfg=dict(\n root=[(name, path) for name, path in cfg.datasets.train.path.items()],\n shape=cfg.datasets.train.shape,\n extra_scales=cfg.train.ms.extra_scales if cfg.train.ms.enable else None,\n interp_cfg=cfg.datasets.train.get(\"interp_cfg\", None),\n ),\n )\n if cfg.use_ddp:\n train_sampler = data.distributed.DistributedSampler(dataset, shuffle=shuffle)\n shuffle = False\n else:\n train_sampler = None\n shuffle = shuffle\n\n if cfg.train.ms.enable:\n collate_fn = getattr(dataset, \"collate_fn\", None)\n assert collate_fn is not None\n else:\n collate_fn = None\n\n loader = data.DataLoader(\n dataset=dataset,\n batch_size=cfg.train.batch_size,\n sampler=train_sampler,\n shuffle=shuffle,\n num_workers=cfg.train.num_workers,\n drop_last=drop_last,\n pin_memory=pin_memory,\n collate_fn=collate_fn,\n worker_init_fn=partial(misc.customized_worker_init_fn, base_seed=cfg.base_seed)\n if cfg.use_custom_worker_init\n else None,\n )\n print(f\"Length of Trainset: {len(dataset)}\")\n return loader\n\n\ndef get_te_loader(cfg, shuffle=False, drop_last=False, pin_memory=True) -> list:\n for i, (te_data_name, te_data_path) in enumerate(cfg.datasets.test.path.items()):\n dataset = builder.build_obj_from_registry(\n registry_name=\"DATASETS\",\n obj_name=cfg.datasets.test.dataset_type,\n obj_cfg=dict(\n root=(te_data_name, te_data_path),\n shape=cfg.datasets.test.shape,\n interp_cfg=cfg.datasets.test.get(\"interp_cfg\", None),\n ),\n )\n\n loader = data.DataLoader(\n dataset=dataset,\n batch_size=cfg.test.batch_size,\n num_workers=cfg.test.num_workers,\n shuffle=shuffle,\n drop_last=drop_last,\n pin_memory=pin_memory,\n collate_fn=getattr(dataset, \"collate_fn\", None),\n worker_init_fn=partial(misc.customized_worker_init_fn, base_seed=cfg.base_seed)\n if cfg.use_custom_worker_init\n else None,\n )\n print(f\"Testing with testset: {te_data_name}: {len(dataset)}\")\n yield te_data_name, te_data_path, loader\n"
] | [
[
"torch.utils.data.distributed.DistributedSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luca-morreale/semantic-segmentation-pytorch | [
"d823fb4115a7ef5c8d47b3e5995a498bbcd9a9b6"
] | [
"visualization/kitti_visualizer.py"
] | [
"import os\nimport numpy as np\nfrom lib.utils.utils import unique\nfrom visualization.utils_name_generation import generate_image_name\nimport cv2\n\ncolormap = {\n 0: (128, 128, 128), # Sky\n 1: (128, 0, 0), # Building\n 2: (128, 64, 128), # Road\n 3: (0, 0, 192), # Sidewalk\n 4: (64, 64, 128), # Fence\n 5: (128, 128, 0), # Vegetation\n 6: (192, 192, 128), # Pole\n 7: (64, 0, 128), # Car\n 8: (192, 128, 128), # Sign\n 9: (64, 64, 0), # Pedestrian\n 10: (0, 128, 192), # Cyclist\n 11: (0, 0, 0) # Void\n}\n\nconversion_list = {\n 1: 1, #wall\n 2: 1, #building;edifice\n 3: 0, #sky\n 4: 2, #floor;flooring\n 5: 5, #tree\n 6: 1, #ceiling\n 7: 2, #road;route\n 8: 11, #bed\n 9: 1, #windowpane;window\n 10: 5, #grass\n 11: 11, #cabinet\n 12: 3, #sidewalk;pavement\n 13: 9, #person;individual;someone;somebody;mortal;soul\n 14: 2, #earth;ground\n 15: 1, #door;double;door\n 16: 11, #table\n 17: 11, #mountain;mount\n 18: 5, #plant;flora;plant;life\n 19: 11, #curtain;drape;drapery;mantle;pall\n 20: 11, #chair\n 21: 7, #car;auto;automobile;machine;motorcar\n 22: 11, #water\n 23: 11, #painting;picture\n 24: 11, #sofa;couch;lounge\n 25: 11, #shelf\n 26: 1, #house\n 27: 11, #sea\n 28: 11, #mirror\n 29: 11, #rug;carpet;carpeting\n 30: 2, #field\n 31: 11, #armchair\n 32: 11, #seat\n 33: 4, #fence;fencing\n 34: 11, #desk\n 35: 11, #rock;stone\n 36: 11, #wardrobe;closet;press\n 37: 6, #lamp\n 38: 11, #bathtub;bathing;tub;bath;tub\n 39: 4, #railing;rail\n 40: 11, #,cushion\n 41: 11, #base;pedestal;stand\n 42: 11, #box\n 43: 6, #column;pillar\n 44: 8, #signboard;sign\n 45: 11, #chest;of;drawers;chest;bureau;dresser\n 46: 11, #counter\n 47: 2, #sand\n 48: 11, #sink\n 49: 1, #skyscraper\n 50: 11, #fireplace;hearth;open;fireplace\n 51: 11, #refrigerator;icebox\n 52: 11, #grandstand;covered;stand\n 53: 2, #,path\n 54: 4, #stairs;steps\n 55: 2, #runway\n 56: 1, #case;display;case;showcase;vitrine\n 57: 11, #pool;table;billiard;table;snooker;table\n 58: 11, #pillow\n 59: 11, #screen;door;screen\n 60: 4, #stairway;staircase\n 61: 11, #river\n 62: 11, #,bridge;span\n 63: 11, #bookcase\n 64: 11, #blind;screen\n 65: 11, #coffee;table;cocktail;table\n 66: 11, #toilet;can;commode;crapper;pot;potty;stool;throne\n 67: 11, #flower\n 68: 11, #book\n 69: 11, #hill\n 70: 11, #bench\n 71: 11, #countertop\n 72: 11, #stove;kitchen;stove;range;kitchen;range;cooking;stove\n 73: 11, #palm;palm;tree\n 74: 11, #kitchen;island\n 75: 11, #computer;computing;machine;computing;device;data;processor;electronic;computer;information;processing;system\n 76: 11, #swivel;chair\n 77: 11, #boat\n 78: 11, #bar\n 79: 11, #arcade;machine\n 80: 11, #hovel;hut;hutch;shack;shanty\n 81: 7, #bus;autobus;coach;charabanc;double-decker;jitney;motorbus;motorcoach;omnibus;passenger;vehicle\n 82: 11, #towel\n 83: 6, #light;light;source\n 84: 7, #truck;motortruck\n 85: 1, #tower\n 86: 11, #chandelier;pendant;pendent\n 87: 11, #awning;sunshade;sunblind\n 88: 6, #streetlight;street;lamp\n 89: 11, #booth;cubicle;stall;kiosk\n 90: 11, #television;television;receiver;television;set;tv;tv;set;idiot;box;boob;tube;telly;goggle;box\n 91: 11, #airplane;aeroplane;plane\n 92: 11, #dirt;track\n 93: 11, #apparel;wearing;apparel;dress;clothes\n 94: 6, #pole\n 95: 3, #land;ground;soil\n 96: 11, #bannister;banister;balustrade;balusters;handrail\n 97: 11, #escalator;moving;staircase;moving;stairway\n 98: 11, #ottoman;pouf;pouffe;puff;hassock\n 99: 11, #bottle\n 100: 11, #buffet;counter;sideboard\n 101: 11, #poster;posting;placard;notice;bill;card\n 102: 11, #stage\n 103: 7, #van\n 104: 11, #ship\n 105: 11, #fountain\n 106: 11, #conveyer;belt;conveyor;belt;conveyer;conveyor;transporter\n 107: 11, #canopy\n 108: 11, #washer;automatic;washer;washing;machine\n 109: 11, #plaything;toy\n 110: 11, #swimming;pool;swimming;bath;natatorium\n 111: 11, #0,stool\n 112: 11, #barrel;cask\n 113: 11, #basket;handbasket\n 114: 11, #waterfall;falls\n 115: 11, #tent;collapsible;shelter\n 116: 11, #bag\n 117: 10, #minibike;motorbike\n 118: 11, #cradle\n 119: 11, #oven\n 120: 11, #ball\n 121: 11, #food;solid;food\n 122: 11, #step;stair\n 123: 7, #tank;storage;tank\n 124: 11, #trade;name;brand;name;brand;marque\n 125: 11, #microwave;microwave;oven\n 126: 11, #pot;flowerpot\n 127: 11, #animal;animate;being;beast;brute;creature;fauna\n 128: 10, #bicycle;bike;wheel;cycle\n 129: 11, #lake\n 130: 11, #dishwasher;dish;washer;dishwashing;machine\n 131: 11, #screen;silver;screen;projection;screen\n 132: 11, #blanket;cover\n 133: 11, #sculpture\n 134: 11, #hood;exhaust;hood\n 135: 11, #sconce\n 136: 11, #vase\n 137: 8, #traffic;light;traffic;signal;stoplight\n 138: 11, #tray\n 139: 11, #ashcan;trash;can;garbage;can;wastebin;ash;bin;ash-bin;ashbin;dustbin;trash;barrel;trash;bin\n 140: 11, #fan\n 141: 11, #pier;wharf;wharfage;dock\n 142: 11, #crt;screen\n 143: 11, #plate\n 144: 11, #monitor;monitoring;device\n 145: 11, #bulletin;board;notice;board\n 146: 11, #shower\n 147: 11, #radiator\n 148: 11, #glass;drinking;glass\n 149: 11, #clock\n 150: 11, #flag\n}\n\n\ndef convert_labels_to_kitti(predictions, mode='BGR'):\n predictions = predictions.astype('int')\n labelmap_kitti = np.zeros(predictions.shape, dtype=np.uint8)\n labelmap_rgb = np.zeros((predictions.shape[0], predictions.shape[1], 3),\n dtype=np.uint8)\n for label in unique(predictions):\n if label < 0:\n continue\n\n label_kitti = conversion_list[label + 1]\n\n labelmap_rgb += (predictions == label)[:, :, np.newaxis] * \\\n np.tile(np.uint8(colormap[label_kitti]),\n (predictions.shape[0], predictions.shape[1], 1))\n labelmap_kitti[predictions == label] = label_kitti\n\n if mode == 'BGR':\n return labelmap_kitti, labelmap_rgb[:, :, ::-1]\n else:\n return labelmap_kitti, labelmap_rgb\n\n\ndef visualize_result(data, preds, args):\n (img, info) = data\n\n kitti_pred, pred_color = convert_labels_to_kitti(preds)\n\n # aggregate images and save\n im_vis = pred_color.astype(np.uint8)\n\n img_name_rgb, img_name = generate_image_name(info)\n a = os.path.join(args.output_path, img_name_rgb)\n print(a)\n cv2.imwrite(os.path.join(args.output_path, img_name_rgb), im_vis)\n\n # aggregate images and save\n im_vis = kitti_pred.astype(np.uint8)\n cv2.imwrite(os.path.join(args.output_path, img_name), im_vis)\n\n"
] | [
[
"numpy.uint8",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sofieditmer/deep_learning | [
"43f7f97f09aef1057e088356094d3e869cff5cba"
] | [
"utils/utils.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nThis script stores a plotting function.\n\"\"\"\n\n# Dependencies\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Function for plotting loss and accuracy learning curves.\ndef plot_history(H, epochs):\n \"\"\"\n Utility function for plotting model history using matplotlib\n \n H: model history \n epochs: number of epochs for which the model was trained\n \"\"\"\n plt.style.use(\"fivethirtyeight\")\n plt.figure()\n plt.plot(np.arange(0, epochs), H.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"accuracy\"], label=\"train_acc\")\n plt.plot(np.arange(0, epochs), H.history[\"val_accuracy\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.tight_layout()\n plt.show()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
something678/TodKat | [
"b26d9c617684e60cd25ff225a71adb6bfa3b0a6c"
] | [
"sentence_transformers/SentenceTransformer.py"
] | [
"import json\nimport logging\nimport os\nimport shutil\nfrom collections import OrderedDict\nfrom typing import List, Dict, Tuple, Iterable, Type\nfrom zipfile import ZipFile\nimport sys\n\nimport numpy as np\nimport transformers\nimport torch\nfrom numpy import ndarray\nfrom torch import nn, Tensor\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm, trange\n\nfrom . import __DOWNLOAD_SERVER__\nfrom .evaluation import SentenceEvaluator\nfrom .util import import_from_string, batch_to_device, http_get\nfrom . import __version__\n\n\nclass SentenceTransformer(nn.Sequential):\n def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):\n if modules is not None and not isinstance(modules, OrderedDict):\n modules = OrderedDict(\n [(str(idx), module) for idx, module in enumerate(modules)])\n\n if model_name_or_path is not None and model_name_or_path != \"\":\n logging.info(\"Load pretrained DialogTransformer: {}\".format(\n model_name_or_path))\n\n if '/' not in model_name_or_path and '\\\\' not in model_name_or_path and not os.path.isdir(model_name_or_path):\n logging.info(\"Did not find a / or \\\\ in the name. Assume to download model from server\")\n model_name_or_path = __DOWNLOAD_SERVER__ + model_name_or_path + '.zip'\n\n if model_name_or_path.startswith('http://') or model_name_or_path.startswith('https://'):\n model_url = model_name_or_path\n folder_name = model_url.replace(\"https://\", \"\").replace(\"http://\", \"\").replace(\"/\", \"_\")[:250]\n\n # print('===================')\n\n try:\n from torch.hub import _get_torch_home\n torch_cache_home = _get_torch_home()\n if torch_cache_home.startswith(\n 'C:\\\\Users\\\\something678/.cache\\\\torch'):\n torch_cache_home = torch_cache_home.replace(\n 'C:\\\\Users\\\\something678/.cache\\\\torch',\n ('G:\\\\KnowledgeBaseData'\n '\\\\sentenceTransformers_datasets'\n '\\\\downloaded_saved_model'))\n elif torch_cache_home.startswith(\n '/home/something678/.cache/torch'):\n torch_cache_home = torch_cache_home.replace(\n '/home/something678/.cache/torch',\n ('/media/Data1/something678/sentence-transformers-master'\n '/my_downloaded_saved_model'))\n\n # print('=================== didnt enter exception')\n except ImportError:\n torch_cache_home = os.path.expanduser(\n os.getenv('TORCH_HOME', os.path.join(\n os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))\n default_cache_path = os.path.join(torch_cache_home, 'sentence_transformers')\n model_path = os.path.join(default_cache_path, folder_name)\n os.makedirs(model_path, exist_ok=True)\n\n if not os.listdir(model_path):\n if model_url[-1] is \"/\":\n model_url = model_url[:-1]\n logging.info(\"Downloading sentence transformer model from {} and saving it at {}\".format(model_url, model_path))\n try:\n zip_save_path = os.path.join(model_path, 'model.zip')\n http_get(model_url, zip_save_path)\n with ZipFile(zip_save_path, 'r') as zip:\n zip.extractall(model_path)\n except Exception as e:\n shutil.rmtree(model_path)\n raise e\n else:\n model_path = model_name_or_path\n\n #### Load from disk\n if model_path is not None:\n logging.info(\"Load SentenceTransformer from folder: {}\".format(model_path))\n\n if os.path.exists(os.path.join(model_path, 'config.json')):\n with open(os.path.join(model_path, 'config.json')) as fIn:\n config = json.load(fIn)\n if config['__version__'] > __version__:\n logging.warning(\"You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\\n\\n\\n\".format(config['__version__'], __version__))\n\n with open(os.path.join(model_path, 'modules.json')) as fIn:\n contained_modules = json.load(fIn)\n\n # the modules are bert, LSTM and so-on\n modules = OrderedDict()\n for module_config in contained_modules:\n module_class = import_from_string(module_config['type'])\n module = module_class.load(os.path.join(model_path, module_config['path']))\n modules[module_config['name']] = module\n\n super().__init__(modules)\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n logging.info(\"Use pytorch device: {}\".format(device))\n self.device = torch.device(device)\n self.to(device)\n\n def encode(self, sentences: List[str], batch_size: int = 8, show_progress_bar: bool = None) -> List[ndarray]:\n \"\"\"\n :param sentences:\n the sentences to embed\n :param batch_size:\n the batch size used for the computation\n :param show_progress_bar:\n Output a progress bar when encode sentences\n :return:\n a list with ndarrays of the embeddings for each sentence\n \"\"\"\n if show_progress_bar is None:\n show_progress_bar = (logging.getLogger().getEffectiveLevel()==logging.INFO or logging.getLogger().getEffectiveLevel()==logging.DEBUG)\n\n all_embeddings = []\n length_sorted_idx = np.argsort([len(sen) for sen in sentences])\n\n iterator = range(0, len(sentences), batch_size)\n if show_progress_bar:\n iterator = tqdm(iterator, desc=\"Batches\")\n\n for batch_idx in iterator:\n batch_tokens = []\n\n batch_start = batch_idx\n batch_end = min(batch_start + batch_size, len(sentences))\n\n longest_seq = 0\n\n for idx in length_sorted_idx[batch_start: batch_end]:\n sentence = sentences[idx]\n tokens = self.tokenize(sentence)\n longest_seq = max(longest_seq, len(tokens))\n batch_tokens.append(tokens)\n\n features = {}\n for text in batch_tokens:\n sentence_features = self.get_sentence_features(text, longest_seq)\n\n for feature_name in sentence_features:\n if feature_name not in features:\n features[feature_name] = []\n features[feature_name].append(sentence_features[feature_name])\n\n for feature_name in features:\n features[feature_name] = torch.tensor(np.asarray(features[feature_name])).to(self.device)\n\n with torch.no_grad():\n embeddings = self.forward(features)\n embeddings = embeddings['sentence_embedding'].to('cpu').numpy()\n all_embeddings.extend(embeddings)\n\n reverting_order = np.argsort(length_sorted_idx)\n all_embeddings = [all_embeddings[idx] for idx in reverting_order]\n\n return all_embeddings\n\n def get_max_seq_length(self):\n if hasattr(self._first_module(), 'max_seq_length'):\n return self._first_module().max_seq_length\n\n return None\n\n def tokenize(self, text):\n return self._first_module().tokenize(text)\n\n def get_sentence_features(self, *features):\n return self._first_module().get_sentence_features(*features)\n\n def get_sentence_embedding_dimension(self):\n return self._last_module().get_sentence_embedding_dimension()\n\n def _first_module(self):\n \"\"\"Returns the first module of this sequential embedder\"\"\"\n return self._modules[next(iter(self._modules))]\n\n def _last_module(self):\n \"\"\"Returns the last module of this sequential embedder\"\"\"\n return self._modules[next(reversed(self._modules))]\n\n def save(self, path):\n \"\"\"\n Saves all elements for this seq. sentence embedder into different sub-folders\n \"\"\"\n if path is None:\n return\n\n logging.info(\"Save model to {}\".format(path))\n contained_modules = []\n\n for idx, name in enumerate(self._modules):\n module = self._modules[name]\n model_path = os.path.join(path, str(idx)+\"_\"+type(module).__name__)\n os.makedirs(model_path, exist_ok=True)\n module.save(model_path)\n contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})\n\n with open(os.path.join(path, 'modules.json'), 'w') as fOut:\n json.dump(contained_modules, fOut, indent=2)\n\n with open(os.path.join(path, 'config.json'), 'w') as fOut:\n json.dump({'__version__': __version__}, fOut, indent=2)\n\n def smart_batching_collate(self, batch):\n \"\"\"\n Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model\n batchsizes vary among the batches.\n the list of two-sentnce pairs are batched so that\n they can be fed to bert\n Actually it converts instances to the batches\n The dataloader has default collate_fn, that is, each batch is a list,\n and [0] is feature[0], [1] is feature[1], etc., see collate_fn in\n dataloader.py for detailed usages\n\n :param batch:\n a batch from a SmartBatchingDataset\n :return:\n a batch of tensors for the model\n \"\"\"\n num_texts = len(batch[0][0])\n\n labels = []\n paired_texts = [[] for _ in range(num_texts)]\n max_seq_len = [0] * num_texts\n for tokens, label in batch:\n labels.append(label)\n for i in range(num_texts):\n paired_texts[i].append(tokens[i])\n max_seq_len[i] = max(max_seq_len[i], len(tokens[i]))\n\n features = []\n for idx in range(num_texts):\n max_len = max_seq_len[idx]\n feature_lists = {}\n for text in paired_texts[idx]:\n sentence_features = self.get_sentence_features(text, max_len)\n\n for feature_name in sentence_features:\n if feature_name not in feature_lists:\n feature_lists[feature_name] = []\n feature_lists[feature_name].append(sentence_features[feature_name])\n\n for feature_name in feature_lists:\n feature_lists[feature_name] = torch.tensor(np.asarray(feature_lists[feature_name]))\n\n features.append(feature_lists)\n\n return {'features': features, 'labels': torch.stack(labels)}\n\n def fit(self,\n train_objectives: Iterable[Tuple[DataLoader, nn.Module]],\n evaluator: SentenceEvaluator,\n epochs: int = 1,\n steps_per_epoch = None,\n scheduler: str = 'WarmupLinear',\n warmup_steps: int = 10000,\n optimizer_class: Type[Optimizer] = transformers.AdamW,\n optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},\n weight_decay: float = 0.01,\n evaluation_steps: int = 0,\n output_path: str = None,\n save_best_model: bool = True,\n max_grad_norm: float = 1,\n fp16: bool = False,\n fp16_opt_level: str = 'O1',\n local_rank: int = -1\n ):\n \"\"\"\n :param weight_decay:\n :param scheduler:\n :param warmup_steps:\n :param optimizer:\n :param evaluation_steps:\n :param output_path:\n :param save_best_model:\n :param max_grad_norm:\n :param fp16:\n :param fp16_opt_level:\n :param local_rank:\n :param train_objectives:\n Tuples of DataLoader and LossConfig\n :param evaluator:\n :param epochs:\n :param steps_per_epoch: Train for x steps in each epoch. If set to None, the length of the dataset will be used\n \"\"\"\n if output_path is not None:\n os.makedirs(output_path, exist_ok=True)\n if os.listdir(output_path):\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(\n output_path))\n\n dataloaders = [dataloader for dataloader, _ in train_objectives]\n '''\n Each dataloader corresponds to a model, denoted as the train_objectives here\n '''\n\n # Use smart batching\n for dataloader in dataloaders:\n dataloader.collate_fn = self.smart_batching_collate\n\n '''\n '''\n loss_models = [loss for _, loss in train_objectives]\n # retrieve the loss_models\n device = self.device\n for loss_model in loss_models:\n loss_model.to(device)\n\n self.best_score = -9999999\n\n if steps_per_epoch is None or steps_per_epoch == 0:\n steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])\n # the smallerest dataset determines the steps_per_epoch, that is\n # the num_of_batches per epoch\n\n num_train_steps = int(steps_per_epoch * epochs)\n\n # Prepare optimizers\n optimizers = []\n schedulers = []\n # for each epoch\n # >>> lambda1 = lambda epoch: epoch // 30\n # >>> lambda2 = lambda epoch: 0.95 ** epoch\n # >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])\n for loss_model in loss_models:\n param_optimizer = list(loss_model.named_parameters())\n '''\n Choose parameters to optimize\n '''\n\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n t_total = num_train_steps\n if local_rank != -1:\n t_total = t_total // torch.distributed.get_world_size()\n\n optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)\n\n scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=t_total)\n\n optimizers.append(optimizer)\n schedulers.append(scheduler_obj)\n\n if fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n for train_idx in range(len(loss_models)):\n model, optimizer = amp.initialize(loss_models[train_idx], optimizers[train_idx], opt_level=fp16_opt_level)\n loss_models[train_idx] = model\n optimizers[train_idx] = optimizer\n\n global_step = 0\n # steps_per_epoch * number_of_loss_models\n data_iterators = [iter(dataloader) for dataloader in dataloaders]\n\n num_train_objectives = len(train_objectives)\n\n for epoch in trange(epochs, desc=\"Epoch\"):\n training_steps = 0\n\n for loss_model in loss_models:\n loss_model.zero_grad()\n loss_model.train()\n\n for _ in trange(steps_per_epoch, desc=\"Iteration\", smoothing=0.05):\n for train_idx in range(num_train_objectives):\n loss_model = loss_models[train_idx]\n optimizer = optimizers[train_idx]\n scheduler = schedulers[train_idx]\n data_iterator = data_iterators[train_idx]\n\n try:\n data = next(data_iterator)\n except StopIteration:\n # logging.info(\"Restart data_iterator\")\n data_iterator = iter(dataloaders[train_idx])\n data_iterators[train_idx] = data_iterator\n data = next(data_iterator)\n\n features, labels = batch_to_device(data, self.device)\n loss_value = loss_model(features, labels)\n\n if fp16:\n with amp.scale_loss(loss_value, optimizer) as scaled_loss:\n # scale the loss_value by the amplifier\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm)\n else:\n loss_value.backward()\n torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)\n\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n\n training_steps += 1\n global_step += 1\n\n if evaluation_steps > 0 and training_steps % evaluation_steps == 0:\n self._eval_during_training(evaluator, output_path, save_best_model, epoch, training_steps)\n for loss_model in loss_models:\n loss_model.zero_grad()\n loss_model.train()\n self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1)\n\n def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):\n \"\"\"\n :param evaluator:\n the evaluator\n :param output_path:\n the evaluator can write the results to this path\n \"\"\"\n if output_path is not None:\n os.makedirs(output_path, exist_ok=True)\n return evaluator(self, output_path)\n\n def _eval_during_training(\n self, evaluator, output_path, save_best_model, epoch, steps):\n \"\"\"Runs evaluation during the training\"\"\"\n if evaluator is not None:\n score = evaluator(\n self, output_path=output_path, epoch=epoch, steps=steps)\n if score > self.best_score and save_best_model:\n self.save(output_path)\n self.best_score = score\n\n def _get_scheduler(\n self, optimizer, scheduler: str, warmup_steps: int, t_total: int):\n \"\"\"\n Returns the correct learning rate scheduler\n \"\"\"\n scheduler = scheduler.lower()\n if scheduler == 'constantlr':\n return transformers.get_constant_schedule(optimizer)\n elif scheduler == 'warmupconstant':\n # this uses warmup\n return transformers.get_constant_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps)\n elif scheduler == 'warmuplinear':\n return transformers.get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n elif scheduler == 'warmupcosine':\n return transformers.get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n elif scheduler == 'warmupcosinewithhardrestarts':\n return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n else:\n raise ValueError(\"Unknown scheduler {}\".format(scheduler))\n"
] | [
[
"numpy.asarray",
"torch.hub._get_torch_home",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"numpy.argsort",
"torch.distributed.get_world_size",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cnakhl/quimb | [
"482a21ebdaa0e8236924dbbdc435e8de68d86719"
] | [
"quimb/tensor/drawing.py"
] | [
"\"\"\"Functionailty for drawing tensor networks.\n\"\"\"\nimport textwrap\nimport importlib\nimport collections\n\nimport numpy as np\n\nfrom ..utils import valmap\n\n\nHAS_FA2 = importlib.util.find_spec('fa2') is not None\n\n\ndef parse_dict_to_tids_or_inds(spec, tn, default='__NONE__'):\n \"\"\"Parse a dictionary possibly containing a mix of tags, tids and inds, to\n a dictionary with only sinlge tids and inds as keys. If a tag or set of\n tags are given as a key, all matching tensor tids will receive the value.\n \"\"\"\n #\n if (spec is not None) and (not isinstance(spec, dict)):\n # assume new default value for everything\n return collections.defaultdict(lambda: spec)\n\n # allow not specifying a default value\n if default != '__NONE__':\n new = collections.defaultdict(lambda: default)\n else:\n new = {}\n\n if spec is None:\n return new\n\n # parse the special values\n for k, v in spec.items():\n if (\n # given as tid\n (isinstance(k, int) and k in tn.tensor_map) or\n # given as ind\n (isinstance(k, str) and k in tn.ind_map)\n ):\n # already a tid\n new[k] = v\n continue\n\n for tid in tn._get_tids_from_tags(k):\n new[tid] = v\n\n return new\n\n\ndef _add_or_merge_edge(G, u, v, attrs):\n if not G.has_edge(u, v):\n G.add_edge(u, v, **attrs)\n else:\n # multibond - update attrs\n attrs0 = G.edges[u, v]\n # average colors\n attrs0['color'] = tuple(\n (x + y) / 2 for x, y in zip(attrs0['color'], attrs['color']))\n attrs0['ind'] += ' ' + attrs['ind']\n # hide original edge and instead track multiple bond sizes\n attrs0['multiedge_inds'].append(attrs['ind'])\n attrs0['multiedge_sizes'].append(attrs['edge_size'])\n attrs0['spring_weight'] /= (attrs['edge_size'] + 1)\n attrs0['edge_size'] = 0\n\n\ndef draw_tn(\n tn,\n color=None,\n *,\n output_inds=None,\n highlight_inds=(),\n highlight_tids=(),\n highlight_inds_color=(1.0, 0.2, 0.2, 1.0),\n highlight_tids_color=(1.0, 0.2, 0.2, 1.0),\n show_inds=None,\n show_tags=None,\n show_scalars=True,\n custom_colors=None,\n title=None,\n legend=True,\n fix=None,\n k=None,\n iterations=200,\n initial_layout='spectral',\n use_forceatlas2=1000,\n use_spring_weight=False,\n node_color=None,\n node_scale=1.0,\n node_size=None,\n node_shape='o',\n node_outline_size=None,\n node_outline_darkness=0.8,\n node_hatch='',\n edge_color=None,\n edge_scale=1.0,\n edge_alpha=1 / 2,\n multiedge_spread=0.1,\n show_left_inds=True,\n arrow_closeness=1.1,\n arrow_length=1.0,\n arrow_overhang=1.0,\n arrow_linewidth=1.0,\n label_color=None,\n font_size=10,\n font_size_inner=7,\n figsize=(6, 6),\n margin=None,\n xlims=None,\n ylims=None,\n get=None,\n return_fig=False,\n ax=None,\n):\n \"\"\"Plot this tensor network as a networkx graph using matplotlib,\n with edge width corresponding to bond dimension.\n\n Parameters\n ----------\n color : sequence of tags, optional\n If given, uniquely color any tensors which have each of the tags.\n If some tensors have more than of the tags, only one color will show.\n output_inds : sequence of str, optional\n For hyper tensor networks explicitly specify which indices should be\n drawn as outer indices. If not set, the outer indices are assumed to be\n those that only appear on a single tensor.\n highlight_inds : iterable, optional\n Highlight these edges.\n highlight_tids : iterable, optional\n Highlight these nodes.\n highlight_inds_color\n What color to use for ``highlight_inds`` nodes.\n highlight_tids_color : tuple[float], optional\n What color to use for ``highlight_tids`` nodes.\n show_inds : {None, False, True, 'all', 'bond-size'}, optional\n Explicitly turn on labels for each tensors indices.\n show_tags : {None, False, True}, optional\n Explicitly turn on labels for each tensors tags.\n show_scalars : bool, optional\n Whether to show scalar tensors (floating nodes with no edges).\n custom_colors : sequence of colors, optional\n Supply a custom sequence of colors to match the tags given\n in ``color``.\n title : str, optional\n Set a title for the axis.\n legend : bool, optional\n Whether to draw a legend for the colored tags.\n fix : dict[tags_ind_or_tid], (float, float)], optional\n Used to specify actual relative positions for each tensor node.\n Each key should be a sequence of tags that uniquely identifies a\n tensor, a ``tid``, or a ``ind``, and each value should be a ``(x, y)``\n coordinate tuple.\n k : float, optional\n The optimal distance between nodes.\n iterations : int, optional\n How many iterations to perform when when finding the best layout\n using node repulsion. Ramp this up if the graph is drawing messily.\n initial_layout : {'spectral', 'kamada_kawai', 'circular', 'planar', \\\\\n 'random', 'shell', 'bipartite', ...}, optional\n The name of a networkx layout to use before iterating with the\n spring layout. Set ``iterations=0`` if you just want to use this\n layout only.\n use_forceatlas2 : bool or int, optional\n Whether to try and use ``forceatlas2`` (``fa2``) for the spring layout\n relaxation instead of ``networkx``. If an integer, only try and use\n beyond that many nodes (it can give messier results on smaller graphs).\n use_spring_weight : bool, optional\n Whether to use inverse bond sizes as spring weights to the force\n repulsion layout algorithms.\n node_color : tuple[float], optional\n Default color of nodes.\n node_size : None, float or dict, optional\n How big to draw the tensors. Can be a global single value, or a dict\n containing values for specific tags or tids. This is in absolute\n figure units. See ``node_scale`` simply scale the node sizes up or\n down.\n node_scale : float, optional\n Scale the node sizes by this factor, in addition to the automatica\n scaling based on the number of tensors.\n node_shape : None, str or dict, optional\n What shape to draw the tensors. Should correspond to a matplotlib\n scatter marker. Can be a global single value, or a dict containing\n values for specific tags or tids.\n node_outline_size : None, float or dict, optional\n The width of the border of each node. Can be a global single value, or\n a dict containing values for specific tags or tids.\n node_outline_darkness : float, optional\n Darkening of nodes outlines.\n edge_color : tuple[float], optional\n Default color of edges.\n edge_scale : float, optional\n How much to scale the width of the edges.\n edge_alpha : float, optional\n Set the alpha (opacity) of the drawn edges.\n multiedge_spread : float, optional\n How much to spread the lines of multi-edges.\n show_left_inds : bool, optional\n Whether to show ``tensor.left_inds`` as incoming arrows.\n arrow_closeness : float, optional\n How close to draw the arrow to its target.\n arrow_length : float, optional\n The size of the arrow with respect to the edge.\n arrow_overhang : float, optional\n Varies the arrowhead between a triangle (0.0) and 'V' (1.0).\n label_color : tuple[float], optional\n Color to draw labels with.\n font_size : int, optional\n Font size for drawing tags and outer indices.\n font_size_inner : int, optional\n Font size for drawing inner indices.\n figsize : tuple of int\n The size of the drawing.\n margin : None or float, optional\n Specify an argument for ``ax.margin``, else the plot limits will try\n and be computed based on the node positions and node sizes.\n xlims : None or tuple, optional\n Explicitly set the x plot range.\n xlims : None or tuple, optional\n Explicitly set the y plot range.\n get : {None, 'pos', 'graph'}, optional\n If ``None`` then plot as normal, else if:\n\n - ``'pos'``, return the plotting positions of each ``tid`` and\n ``ind`` drawn as a node, this can supplied to subsequent calls as\n ``fix=pos`` to maintain positions, even as the graph structure\n changes.\n - ``'graph'``, return the ``networkx.Graph`` object. Note that this\n will potentially have extra nodes representing output and hyper\n indices.\n\n return_fig : bool, optional\n If True and ``ax is None`` then return the figure created rather than\n executing ``pyplot.show()``.\n ax : matplotlib.Axis, optional\n Draw the graph on this axis rather than creating a new figure.\n \"\"\"\n import networkx as nx\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n import matplotlib.patches as patches\n from matplotlib.colors import to_rgb\n import math\n\n if output_inds is None:\n output_inds = set(tn.outer_inds())\n elif isinstance(output_inds, str):\n output_inds = {output_inds}\n else:\n output_inds = set(output_inds)\n\n # automatically decide whether to show tags and inds\n if show_inds is None:\n show_inds = (tn.num_tensors <= 20)\n if show_tags is None:\n show_tags = (tn.num_tensors <= 20)\n\n isdark = sum(to_rgb(mpl.rcParams['figure.facecolor'])) / 3 < 0.5\n if isdark:\n draw_color = (0.75, 0.77, 0.80, 1.0)\n else:\n draw_color = (0.45, 0.47, 0.50, 1.0)\n\n if edge_color is None:\n edge_color = draw_color\n else:\n edge_color = mpl.colors.to_rgb(edge_color)\n\n if node_color is None:\n node_color = draw_color\n else:\n node_color = mpl.colors.to_rgb(node_color)\n\n # set the size of the nodes and their border\n node_size = parse_dict_to_tids_or_inds(\n node_size, tn,\n default=node_scale * 1000 / tn.num_tensors**0.7)\n node_outline_size = parse_dict_to_tids_or_inds(\n node_outline_size, tn,\n default=min(3, node_size.default_factory()**0.5 / 5))\n node_shape = parse_dict_to_tids_or_inds(\n node_shape, tn, default='o')\n node_hatch = parse_dict_to_tids_or_inds(\n node_hatch, tn, default='')\n\n if label_color is None:\n label_color = mpl.rcParams['axes.labelcolor']\n\n # build the graph\n G = nx.Graph()\n hyperedges = []\n node_labels = dict()\n edge_labels = dict()\n\n for ix, tids in tn.ind_map.items():\n # general information for this index\n edge_attrs = {\n 'color': (highlight_inds_color if ix in highlight_inds else\n edge_color),\n 'ind': ix,\n 'edge_size': edge_scale * math.log2(tn.ind_size(ix)),\n }\n edge_attrs['multiedge_inds'] = [edge_attrs['ind']]\n edge_attrs['multiedge_sizes'] = [edge_attrs['edge_size']]\n edge_attrs['spring_weight'] = 1 / sum(t.ndim for t in tn._inds_get(ix))\n\n if (ix in output_inds) or (len(tids) != 2):\n # hyper or outer edge - needs dummy 'node' shown with zero size\n hyperedges.append(ix)\n for tid in tids:\n _add_or_merge_edge(G, tid, ix, edge_attrs)\n else:\n # standard edge\n _add_or_merge_edge(G, *tids, edge_attrs)\n if show_inds == 'all':\n edge_labels[tuple(tids)] = ix\n elif show_inds == 'bond-size':\n edge_labels[tuple(tids)] = tn.ind_size(ix)\n\n # color the nodes\n colors = get_colors(color, custom_colors)\n\n # set parameters for all the nodes\n for tid, t in tn.tensor_map.items():\n\n if tid not in G.nodes:\n # e.g. tensor is a scalar\n if show_scalars:\n G.add_node(tid)\n else:\n continue\n\n G.nodes[tid]['size'] = node_size[tid]\n G.nodes[tid]['outline_size'] = node_outline_size[tid]\n color = node_color\n for tag in colors:\n if tag in t.tags:\n color = colors[tag]\n if tid in highlight_tids:\n color = highlight_tids_color\n G.nodes[tid]['color'] = color\n G.nodes[tid]['outline_color'] = tuple(\n (1.0 if i == 3 else node_outline_darkness) * c\n for i, c in enumerate(color)\n )\n G.nodes[tid]['marker'] = node_shape[tid]\n G.nodes[tid]['hatch'] = node_hatch[tid]\n if show_tags:\n # make the tags appear with auto vertical extent\n node_label = '{' + str(list(t.tags))[1:-1] + '}'\n node_labels[tid] = \"\\n\".join(textwrap.wrap(\n node_label, max(2 * len(node_label) ** 0.5, 16)\n ))\n\n for hix in hyperedges:\n G.nodes[hix]['ind'] = hix\n G.nodes[hix]['color'] = (1.0, 1.0, 1.0, 1.0)\n G.nodes[hix]['size'] = 0.0\n G.nodes[hix]['outline_size'] = 0.0\n G.nodes[hix]['outline_color'] = (1.0, 1.0, 1.0, 1.0)\n G.nodes[hix]['marker'] = ''\n G.nodes[hix]['hatch'] = ''\n if show_inds == 'all':\n node_labels[hix] = hix\n elif show_inds == 'bond-size':\n node_labels[hix] = tn.ind_size(hix)\n\n if get == 'graph':\n return G\n\n if show_inds == 'bond-size':\n font_size = font_size_inner\n for oix in output_inds:\n node_labels[oix] = tn.ind_size(oix)\n elif show_inds:\n for oix in output_inds:\n node_labels[oix] = oix\n\n pos = get_positions(tn, G, fix, initial_layout, k, iterations,\n use_forceatlas2, use_spring_weight)\n\n if get == 'pos':\n return pos\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n ax.axis('off')\n ax.set_aspect('equal')\n if title is not None:\n ax.set_title(str(title))\n\n xmin = ymin = +float('inf')\n xmax = ymax = -float('inf')\n for xy in pos.values():\n xmin = min(xmin, xy[0])\n xmax = max(xmax, xy[0])\n ymin = min(ymin, xy[1])\n ymax = max(ymax, xy[1])\n\n if margin is None:\n # XXX: pad the plot range so that node circles are not clipped,\n # using the networkx node_size parameter, *which is in absolute\n # units* and so must be inverse transformed using matplotlib!\n inv = ax.transData.inverted()\n real_node_size = (abs(\n inv.transform((0, node_size.default_factory()))[1] -\n inv.transform((0, 0))[1]\n ) ** 0.5) / 4\n ax.set_xlim(xmin - real_node_size, xmax + real_node_size)\n ax.set_ylim(ymin - real_node_size, ymax + real_node_size)\n else:\n ax.margins(margin)\n\n created_fig = True\n else:\n created_fig = False\n\n nx.draw_networkx_edges(\n G, pos,\n width=tuple(x[2]['edge_size'] for x in G.edges(data=True)),\n edge_color=tuple(x[2]['color'] for x in G.edges(data=True)),\n alpha=edge_alpha,\n ax=ax,\n )\n\n # draw multiedges\n multiedge_centers = {}\n for i, j, attrs in G.edges(data=True):\n sizes = attrs['multiedge_sizes']\n multiplicity = len(sizes)\n if multiplicity > 1:\n rads = np.linspace(\n multiplicity * -multiedge_spread,\n multiplicity * +multiedge_spread,\n multiplicity\n )\n\n xa, ya = pos[i]\n xb, yb = pos[j]\n xab, yab = (xa + xb) / 2., (ya + yb) / 2.\n dx, dy = xb - xa, yb - ya\n\n inds = attrs['multiedge_inds']\n for sz, rad, ix in zip(sizes, rads, inds):\n\n # store the central point of the arc in case its needed by\n # the arrow drawing functionality\n cx, cy = xab + rad * dy * 0.5, yab - rad * dx * 0.5\n multiedge_centers[ix] = (cx, cy)\n\n ax.add_patch(patches.FancyArrowPatch(\n (xa, ya), (xb, yb),\n connectionstyle=patches.ConnectionStyle.Arc3(rad=rad),\n alpha=edge_alpha,\n linewidth=sz,\n color=attrs['color'],\n zorder=1,\n ))\n\n scatters = collections.defaultdict(lambda: collections.defaultdict(list))\n\n for node, attrs in G.nodes(data=True):\n # need to group by marker and hatch as matplotlib doesn't map these\n key = (attrs['marker'], attrs['hatch'])\n scatters[key]['x'].append(pos[node][0])\n scatters[key]['y'].append(pos[node][1])\n scatters[key]['s'].append(attrs['size'])\n scatters[key]['c'].append(attrs['color'])\n scatters[key]['linewidths'].append(attrs['outline_size'])\n scatters[key]['edgecolors'].append(attrs['outline_color'])\n\n # plot the nodes\n for (marker, hatch), data in scatters.items():\n ax.scatter(\n data['x'],\n data['y'],\n s=data['s'],\n c=data['c'],\n marker=marker,\n linewidths=data['linewidths'],\n edgecolors=data['edgecolors'],\n hatch=hatch,\n zorder=2,\n )\n\n # draw incomcing arrows for tensor left_inds\n if show_left_inds:\n for tid, t in tn.tensor_map.items():\n if t.left_inds is not None:\n for ind in t.left_inds:\n if ind in hyperedges:\n tida = ind\n else:\n tida, = (x for x in tn.ind_map[ind] if x != tid)\n tidb = tid\n (xa, ya), (xb, yb) = pos[tida], pos[tidb]\n\n edge_width = G.get_edge_data(tida, tidb)['edge_size']\n edge_length = ((xb - xa)**2 + (yb - ya)**2)**0.5\n arrow_scale = (\n 0.02 * arrow_length * edge_width / edge_length**0.5\n )\n\n # arrow start and change\n if ind in multiedge_centers:\n x, y = multiedge_centers[ind]\n else:\n x = (xa + arrow_closeness * xb) / (1 + arrow_closeness)\n y = (ya + arrow_closeness * yb) / (1 + arrow_closeness)\n\n dx = (xb - xa) * arrow_scale\n dy = (yb - ya) * arrow_scale\n\n ax.add_patch(patches.FancyArrow(\n x, y, dx, dy,\n width=0, # don't draw tail\n length_includes_head=True,\n head_width=(dx**2 + dy**2)**0.5,\n head_length=(dx**2 + dy**2)**0.5,\n linewidth=arrow_linewidth,\n color=(\n highlight_inds_color if ind in highlight_inds else\n edge_color\n ),\n alpha=edge_alpha,\n fill=True,\n shape='full',\n overhang=arrow_overhang,\n ))\n\n if show_inds in {'all', 'bond-size'}:\n nx.draw_networkx_edge_labels(\n G, pos,\n edge_labels=edge_labels,\n font_size=font_size_inner,\n font_color=label_color,\n ax=ax,\n )\n if show_tags or show_inds:\n nx.draw_networkx_labels(\n G, pos,\n labels=node_labels,\n font_size=font_size,\n font_color=label_color,\n ax=ax,\n )\n\n # create legend\n if colors and legend:\n handles = []\n for color in colors.values():\n handles += [plt.Line2D([0], [0], marker='o', color=color,\n linestyle='', markersize=10)]\n\n # needed in case '_' is the first character\n lbls = [f\" {lbl}\" for lbl in colors]\n\n plt.legend(handles, lbls, ncol=max(round(len(handles) / 20), 1),\n loc='center left', bbox_to_anchor=(1, 0.5))\n\n if not created_fig:\n # we added to axisting axes\n return\n\n if xlims is not None:\n ax.set_xlim(xlims)\n if ylims is not None:\n ax.set_ylim(ylims)\n\n if return_fig:\n return fig\n else:\n plt.show()\n plt.close(fig)\n\n\n# colorblind palettes by Bang Wong (https://www.nature.com/articles/nmeth.1618)\n\n_COLORS_DEFAULT = (\n '#56B4E9', # light blue\n '#E69F00', # orange\n '#009E73', # green\n '#D55E00', # red\n '#F0E442', # yellow\n '#CC79A7', # purple\n '#0072B2', # dark blue\n)\n\n_COLORS_SORTED = (\n '#0072B2', # dark blue\n '#56B4E9', # light blue\n '#009E73', # green\n '#F0E442', # yellow\n '#E69F00', # orange\n '#D55E00', # red\n '#CC79A7', # purple\n)\n\n\ndef mod_sat(c, mod):\n \"\"\"Modify the luminosity of rgb color ``c``.\n \"\"\"\n from matplotlib.colors import hsv_to_rgb, rgb_to_hsv\n\n h, s, v = rgb_to_hsv(c[:3])\n return (*hsv_to_rgb((h, mod * s, v)), 1.0)\n\n\ndef auto_colors(nc):\n import math\n from matplotlib.colors import LinearSegmentedColormap\n\n cmap = LinearSegmentedColormap.from_list('wong', _COLORS_SORTED)\n\n xs = list(map(cmap, np.linspace(0, 1.0, nc)))\n\n # modulate color saturation with sine to generate local distinguishability\n # ... but only turn on gradually for increasing number of nodes\n sat_mod_period = min(4, nc / 7)\n sat_mod_factor = max(0.0, 2 / 3 * math.tanh((nc - 7) / 4))\n\n return [\n mod_sat(\n c, 1 - sat_mod_factor * math.sin(math.pi * i / sat_mod_period)**2\n )\n for i, c in enumerate(xs)\n ]\n\n\ndef get_colors(color, custom_colors=None):\n \"\"\"Generate a sequence of rgbs for tag(s) ``color``.\n \"\"\"\n from matplotlib.colors import to_rgba\n\n if color is None:\n return dict()\n\n if isinstance(color, str):\n color = (color,)\n\n if custom_colors is not None:\n rgbs = list(map(to_rgba, custom_colors))\n return dict(zip(color, rgbs))\n\n nc = len(color)\n if nc <= 7:\n return dict(zip(color, list(map(to_rgba, _COLORS_DEFAULT))))\n\n rgbs = auto_colors(nc)\n return dict(zip(color, rgbs))\n\n\ndef _rotate(xy, theta):\n \"\"\"Return a rotated set of points.\n \"\"\"\n s = np.sin(theta)\n c = np.cos(theta)\n\n xyr = np.empty_like(xy)\n xyr[:, 0] = c * xy[:, 0] - s * xy[:, 1]\n xyr[:, 1] = s * xy[:, 0] + c * xy[:, 1]\n\n return xyr\n\n\ndef _span(xy):\n \"\"\"Return the vertical span of the points.\n \"\"\"\n return xy[:, 1].max() - xy[:, 1].min()\n\n\ndef _massage_pos(pos, nangles=360, flatten=False):\n \"\"\"Rotate a position dict's points to cover a small vertical span\n \"\"\"\n xy = np.empty((len(pos), 2))\n for i, (x, y) in enumerate(pos.values()):\n xy[i, 0] = x\n xy[i, 1] = y\n\n thetas = np.linspace(0, 2 * np.pi, nangles, endpoint=False)\n rxys = (_rotate(xy, theta) for theta in thetas)\n rxy0 = min(rxys, key=lambda rxy: _span(rxy))\n\n if flatten:\n rxy0[:, 1] /= 2\n\n return dict(zip(pos, rxy0))\n\n\ndef get_positions(\n tn,\n G,\n fix=None,\n initial_layout='spectral',\n k=None,\n iterations=200,\n use_forceatlas2=False,\n use_spring_weight=False,\n):\n import networkx as nx\n\n if fix is None:\n fix = dict()\n else:\n fix = parse_dict_to_tids_or_inds(fix, tn)\n # find range with which to scale spectral points with\n xmin, xmax, ymin, ymax = (\n f(fix.values(), key=lambda xy: xy[i])[i]\n for f, i in [(min, 0), (max, 0), (min, 1), (max, 1)])\n if xmin == xmax:\n xmin, xmax = xmin - 1, xmax + 1\n if ymin == ymax:\n ymin, ymax = ymin - 1, ymax + 1\n xymin, xymax = min(xmin, ymin), max(xmax, ymax)\n\n if all(node in fix for node in G.nodes):\n # everything is already fixed\n return fix\n\n # use spectral or other layout as starting point\n pos0 = getattr(nx, initial_layout + '_layout')(G)\n\n # scale points to fit with specified positions\n if fix:\n # but update with fixed positions\n pos0.update(valmap(lambda xy: np.array(\n (2 * (xy[0] - xymin) / (xymax - xymin) - 1,\n 2 * (xy[1] - xymin) / (xymax - xymin) - 1)), fix))\n fixed = fix.keys()\n else:\n fixed = None\n\n # and then relax remaining using spring layout\n if iterations:\n\n if use_forceatlas2 is True:\n use_forceatlas2 = 1\n elif use_forceatlas2 in (0, False):\n use_forceatlas2 = float('inf')\n\n should_use_fa2 = (\n (fixed is None) and HAS_FA2 and (len(G) > use_forceatlas2)\n )\n\n weight = 'spring_weight' if use_spring_weight else None\n\n if should_use_fa2:\n from fa2 import ForceAtlas2\n pos = ForceAtlas2(verbose=False).forceatlas2_networkx_layout(\n G, pos=pos0, iterations=iterations, weight_attr=weight)\n else:\n pos = nx.spring_layout(\n G, pos=pos0, fixed=fixed, k=k, iterations=iterations,\n weight=weight)\n else:\n pos = pos0\n\n if not fix:\n # finally rotate them to cover a small vertical span\n pos = _massage_pos(pos)\n\n return pos\n"
] | [
[
"matplotlib.patches.ConnectionStyle.Arc3",
"matplotlib.colors.to_rgb",
"numpy.linspace",
"matplotlib.patches.FancyArrow",
"matplotlib.colors.hsv_to_rgb",
"numpy.empty_like",
"matplotlib.pyplot.Line2D",
"numpy.cos",
"matplotlib.pyplot.subplots",
"numpy.sin",
"matplotlib.colors.rgb_to_hsv",
"matplotlib.pyplot.close",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mpsonntag/nixpy | [
"fd6addf137e22dad5fc1b1a95bfc4ca2bd84da5d"
] | [
"nixio/test/test_data_array.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright © 2014, German Neuroinformatics Node (G-Node)\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted under the terms of the BSD License. See\n# LICENSE file in the root of the Project.\nimport os\nimport time\nfrom six import string_types\nimport sys\nimport unittest\nimport numpy as np\nimport nixio as nix\nfrom nixio.exceptions import IncompatibleDimensions\nfrom .tmp import TempDir\n\n\nclass TestDataArray(unittest.TestCase):\n\n def setUp(self):\n self.tmpdir = TempDir(\"dataarraytest\")\n self.testfilename = os.path.join(self.tmpdir.path, \"dataarraytest.nix\")\n self.file = nix.File.open(self.testfilename, nix.FileMode.Overwrite)\n self.block = self.file.create_block(\"test block\", \"recordingsession\")\n self.array = self.block.create_data_array(\"test array\", \"signal\",\n nix.DataType.Double, (100, ))\n self.other = self.block.create_data_array(\"other array\", \"signal\",\n nix.DataType.Double, (100, ))\n\n def tearDown(self):\n del self.file.blocks[self.block.id]\n self.file.close()\n self.tmpdir.cleanup()\n\n def test_data_array_eq(self):\n assert self.array == self.array\n assert not self.array == self.other\n assert self.array is not None\n\n def test_data_array_id(self):\n assert self.array.id is not None\n\n def test_data_array_name(self):\n assert self.array.name is not None\n\n def test_data_array_type(self):\n def set_none():\n self.array.type = None\n\n assert self.array.type is not None\n self.assertRaises(Exception, set_none)\n\n self.array.type = \"foo type\"\n assert self.array.type == \"foo type\"\n\n def test_data_array_definition(self):\n assert self.array.definition is None\n\n self.array.definition = \"definition\"\n assert self.array.definition == \"definition\"\n\n self.array.definition = None\n assert self.array.definition is None\n\n def test_data_array_timestamps(self):\n created_at = self.array.created_at\n assert created_at > 0\n\n updated_at = self.array.updated_at\n assert updated_at > 0\n\n self.array.force_created_at(1403530068)\n assert self.array.created_at == 1403530068\n\n def test_data_array_label(self):\n assert self.array.label is None\n\n self.array.label = \"label\"\n assert self.array.label == \"label\"\n\n self.array.label = None\n assert self.array.label is None\n\n def test_data_array_unit(self):\n assert self.array.unit is None\n\n self.array.unit = \"mV\"\n assert self.array.unit == \"mV\"\n\n self.array.unit = \"0.5*ms\"\n assert self.array.unit == \"0.5*ms\"\n\n self.array.unit = None\n assert self.array.unit is None\n\n def test_data_array_exp_origin(self):\n assert self.array.expansion_origin is None\n\n data = [10, 29, 33]\n intarray = self.block.create_data_array(\"intarray\", \"array\", nix.DataType.Int64, data=data)\n\n intarray.expansion_origin = 10.2\n assert intarray.expansion_origin == 10.2\n np.testing.assert_almost_equal(intarray[:], np.array(data) - 10.2)\n\n # single value retrieval\n np.testing.assert_almost_equal(intarray[1], data[1] - 10.2)\n\n intarray.expansion_origin = None\n assert intarray.expansion_origin is None\n np.testing.assert_almost_equal(intarray[:], np.array(data))\n\n def test_data_array_coefficients(self):\n assert self.array.polynom_coefficients == ()\n\n self.array.polynom_coefficients = (1.1, 2.2)\n assert self.array.polynom_coefficients == (1.1, 2.2)\n\n data = [10, 29, 33]\n intarray = self.block.create_data_array(\"intarray\", \"array\", nix.DataType.Int64, data=data)\n intarray.polynom_coefficients = (0.0, 0.1)\n np.testing.assert_almost_equal(intarray[:], np.array(data) * 0.1)\n\n # single value retrieval\n np.testing.assert_almost_equal(intarray[1], data[1] * 0.1)\n\n # Coefficient deletion\n intarray.polynom_coefficients = None\n np.testing.assert_almost_equal(intarray[:], np.array(data))\n\n def test_data_array_data(self):\n assert self.array.polynom_coefficients == ()\n\n data = np.array([float(i) for i in range(100)])\n dout = np.empty_like(data)\n self.array.write_direct(data)\n assert self.array.dtype == np.dtype(float)\n self.array.read_direct(dout)\n assert np.array_equal(data, dout)\n dout = np.array(self.array)\n assert np.array_equal(data, dout)\n assert self.array.data_extent == data.shape\n assert self.array.data_extent == self.array.shape\n assert self.array.size == data.size\n\n assert len(self.array) == len(data)\n\n dout = np.array(range(100))\n assert np.array_equal(data, dout)\n\n dout = self.array[...]\n assert np.array_equal(data, dout)\n\n # indexed writing (1-d)\n data = np.array([float(-i) for i in range(100)])\n self.array[()] = data\n assert np.array_equal(self.array[...], data)\n\n self.array[...] = [float(-i) for i in range(100)]\n assert np.array_equal(self.array[()], data)\n assert np.array_equal(self.array[0:-10], data[0:-10])\n assert np.array_equal(self.array[-10], np.array([data[-10]]))\n\n self.array[0] = 42\n assert self.array[0] == 42.0\n\n # changing shape via data_extent property\n self.array.data_extent = (200, )\n assert self.array.data_extent == (200, )\n\n data = np.eye(123)\n da1 = self.block.create_data_array(\"double array\", \"signal\", nix.DataType.Double, (123, 123))\n dset = da1\n dset.write_direct(data)\n dout = np.empty_like(data)\n dset.read_direct(dout)\n assert np.array_equal(data, dout)\n\n # indexing support in 2-d arrays\n with self.assertRaises(IndexError):\n _ = self.array[[], [1, 2]]\n\n dout = dset[12]\n assert dout.shape == data[12].shape\n assert np.array_equal(dout, data[12])\n assert np.array_equal(dset[()], data)\n assert np.array_equal(dset[...], data)\n assert np.array_equal(dset[12, ...], data[12, ...])\n assert np.array_equal(dset[..., 12], data[..., 12])\n assert np.array_equal(dset[1:], data[1:])\n assert np.array_equal(dset[-20:, -20:], data[123-20:, 123-20:])\n assert np.array_equal(dset[:1], data[:1])\n assert np.array_equal(dset[:-1, :-1], data[1:123, 1:123])\n assert np.array_equal(dset[1:10, 1:10], data[1:10, 1:10])\n assert np.array_equal(dset[1:-2, 1:-2], data[1:121, 1:121])\n\n da3 = self.block.create_data_array(\"int identity array\", \"signal\",\n nix.DataType.Int32, (123, 123))\n assert da3.shape == (123, 123)\n assert da3.dtype == np.dtype('i4')\n\n data = np.random.rand(3, 4, 5)\n da4 = self.block.create_data_array(\"3d array\", \"signal\",\n nix.DataType.Double, (3, 4, 5))\n dset = da4\n dset.write_direct(data)\n assert dset.shape == data.shape\n assert len(dset) == len(data)\n assert dset.size == data.size\n assert np.array_equal(dset[2, ...], data[2, ...])\n assert np.array_equal(dset[-1, ...], data[2, ...])\n assert np.array_equal(dset[..., 3], data[..., 3])\n assert np.array_equal(dset[..., -2], data[..., 3])\n assert np.array_equal(dset[2, ..., 3], data[2, ..., 3])\n assert np.array_equal(dset[2, ..., -2], data[2, ..., 3])\n assert np.array_equal(dset[1:2, ..., 3:5], data[1:2, ..., 3:5])\n assert np.array_equal(dset[1:2, ..., 3:-1], data[1:2, ..., 3:4])\n\n # indexed writing (n-d)\n data = np.random.rand(2, 2)\n dset[1, 0:2, 0:2] = data\n assert np.array_equal(dset[1, 0:2, 0:2], data)\n\n # test inferring shape & dtype from data, and writing the data\n test_ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n test_data = np.array(test_ten, dtype=int)\n da = self.block.create_data_array('created_from_data', 'b',\n data=test_data)\n assert da.shape == test_data.shape\n assert np.array_equal(test_data, da[:])\n assert test_ten == [x for x in da]\n\n # test for exceptions\n self.assertRaises(ValueError, self.block.create_data_array, 'x', 'y')\n self.assertRaises(ValueError, self.block.create_data_array,\n 'x', 'y', data=test_data, shape=(1, 1, 1))\n\n # test appending\n data = np.zeros((10, 5))\n da = self.block.create_data_array('append', 'double', data=data)\n to_append = np.zeros((2, 5))\n\n da.append(to_append)\n assert da.shape == (12, 5)\n\n to_append = np.zeros((12, 2))\n da.append(to_append, axis=1)\n assert da.shape == (12, 7)\n\n self.assertRaises(ValueError, da.append, np.zeros((3, 3, 3)))\n self.assertRaises(ValueError, da.append, np.zeros((5, 5)))\n\n def test_data_array_dtype(self):\n da = self.block.create_data_array('dtype_f8', 'b', 'f8', (10, 10))\n assert da.dtype == np.dtype('f8')\n\n da = self.block.create_data_array('dtype_i16', 'b', np.int16, (10, 10))\n data = da[:]\n assert da.dtype == np.int16\n assert data.dtype == np.int16\n\n da = self.block.create_data_array('dtype_int', 'b', int, (10, 10))\n assert da.dtype == np.dtype(int)\n\n da = self.block.create_data_array('dtype_ndouble', 'b',\n nix.DataType.Double, (10, 10))\n assert da.dtype == np.dtype('f8')\n\n da = self.block.create_data_array('dtype_auto', 'b', None, (10, 10))\n assert da.dtype == np.dtype('f8')\n\n test_data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], dtype=int)\n da = self.block.create_data_array('dtype_int_from_data', 'b',\n data=test_data)\n assert da.dtype == test_data.dtype\n\n bdata = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\n if sys.version_info[0] == 3:\n bdata = [bytes(x, 'UTF-8') for x in bdata]\n\n void_data = np.array(bdata, dtype='V1')\n da = self.block.create_data_array('dtype_opaque', 'b', data=void_data)\n assert da.dtype == np.dtype('V1')\n assert np.array_equal(void_data, da[:])\n\n def test_array_unicode(self):\n da = self.block.create_data_array(\"unicode\", \"lotsatext\",\n nix.DataType.String, shape=(4,))\n data = [\"Καφές\", \"Café\", \"咖啡\", \"☕\"]\n da.write_direct(data)\n\n assert data == list(da[:])\n\n def test_data_array_dimensions(self):\n assert len(self.array.dimensions) == 0\n\n self.array.append_set_dimension()\n self.array.append_range_dimension(range(10))\n self.array.append_sampled_dimension(0.1)\n\n assert len(self.array.dimensions) == 3\n\n self.assertRaises(KeyError, lambda: self.array.dimensions[\"notexist\"])\n self.assertRaises(IndexError, lambda: self.array.dimensions[-4])\n self.assertRaises(IndexError, lambda: self.array.dimensions[3])\n\n assert isinstance(str(self.array.dimensions), string_types)\n assert isinstance(repr(self.array.dimensions), string_types)\n\n dims = list(self.array.dimensions)\n for i in range(3):\n assert dims[i].index == self.array.dimensions[i].index\n assert(dims[i].dimension_type ==\n self.array.dimensions[i].dimension_type)\n\n assert(self.array.dimensions[i].index ==\n self.array.dimensions[i-3].index)\n\n self.array.delete_dimensions()\n\n def test_data_array_sources(self):\n source1 = self.block.create_source(\"source1\", \"channel\")\n source2 = self.block.create_source(\"source2\", \"electrode\")\n\n assert len(self.array.sources) == 0\n\n self.array.sources.append(source1)\n self.array.sources.append(source2)\n\n self.assertRaises(TypeError, self.array.sources.append, 100)\n\n assert len(self.array.sources) == 2\n assert source1 in self.array.sources\n assert source2 in self.array.sources\n\n del self.array.sources[source2]\n assert self.array.sources[0] == source1\n\n del self.array.sources[source1]\n assert len(self.array.sources) == 0\n\n def test_data_array_indexing(self):\n data = np.random.rand(50)\n da = self.block.create_data_array(\"random\", \"DataArray\",\n data=data)\n\n np.testing.assert_almost_equal(data[:], da[:])\n\n def check_idx(idx):\n np.testing.assert_almost_equal(da[idx], data[idx])\n\n check_idx(10)\n check_idx(Ellipsis)\n check_idx(slice(10, 15))\n\n def test_data_array_multi_slicing(self):\n shape = (5, 10, 15, 20)\n da = self.block.create_data_array(\n 'test', 'test',\n data=np.random.randint(65000, size=shape)\n )\n self.assertEqual(da[0, 0, 0, 0].shape, (1,))\n self.assertEqual(da[0, 0, 0, :].shape, (20,))\n self.assertEqual(da[0, 0, :, 0].shape, (15,))\n self.assertEqual(da[0, 0, :, :].shape, (15, 20))\n self.assertEqual(da[0, :, 0, 0].shape, (10,))\n self.assertEqual(da[0, :, 0, :].shape, (10, 20))\n self.assertEqual(da[0, :, :, 0].shape, (10, 15))\n self.assertEqual(da[0, :, :, :].shape, (10, 15, 20))\n self.assertEqual(da[:, 0, 0, 0].shape, (5,))\n self.assertEqual(da[:, 0, 0, :].shape, (5, 20))\n self.assertEqual(da[:, 0, :, 0].shape, (5, 15))\n self.assertEqual(da[:, 0, :, :].shape, (5, 15, 20))\n self.assertEqual(da[:, :, 0, 0].shape, (5, 10))\n self.assertEqual(da[:, :, 0, :].shape, (5, 10, 20))\n self.assertEqual(da[:, :, :, 0].shape, (5, 10, 15))\n self.assertEqual(da[:, :, :, :].shape, shape)\n\n def test_outofbounds_indexing(self):\n # test out of bounds IndexError exception\n oobtestda = self.block.create_data_array(\"oobdatatest\",\n \"data\", data=[1, 2, 10])\n with self.assertRaises(IndexError):\n _ = oobtestda[3]\n with self.assertRaises(IndexError):\n _ = oobtestda[10]\n with self.assertRaises(IndexError):\n _ = oobtestda[-7]\n\n def test_data_array_numpy_indexing(self):\n data = np.random.rand(50)\n da = self.block.create_data_array(\"random\", \"DataArray\",\n data=data)\n\n def check_idx(idx):\n np.testing.assert_almost_equal(da[idx], data[idx])\n\n check_idx(np.int8(10))\n check_idx(np.int16(20))\n check_idx(np.int32(42))\n check_idx(np.int64(9))\n\n def test_get_slice(self):\n data2d = np.random.random_sample((100, 2))\n da2d = self.block.create_data_array(\"get_slice 2d\", \"Data\",\n data=data2d)\n da2d.append_range_dimension(np.linspace(10, 19.8, 50))\n da2d.append_set_dimension()\n data = da2d[10:30, 1:2]\n islice = da2d.get_slice((10, 1), (20, 1),\n mode=nix.DataSliceMode.Index)\n np.testing.assert_almost_equal(data, islice)\n dslice = da2d.get_slice((12.0, 1), (4.0, 1),\n mode=nix.DataSliceMode.Data)\n np.testing.assert_almost_equal(data, dslice)\n dslice2 = da2d.get_slice((0.0, 1), (16.0, 1),\n mode=nix.DataSliceMode.Data)\n np.testing.assert_almost_equal(da2d[0:30, 1:2], dslice2)\n\n data3d = np.random.random_sample((30, 30, 5))\n da3d = self.block.create_data_array(\"get_slice 3d\", \"Data\",\n data=data3d)\n sdim = da3d.append_sampled_dimension(0.1)\n sdim.offset = 0.5\n da3d.append_sampled_dimension(2.0)\n da3d.append_set_dimension()\n\n data = data3d[5:15, 20:25, 3:5]\n islice = da3d.get_slice((5, 20, 3), (10, 5, 2),\n mode=nix.DataSliceMode.Index)\n np.testing.assert_almost_equal(data, islice)\n dslice = da3d.get_slice((1.0, 40.0, 3), (1.0, 10.0, 2),\n mode=nix.DataSliceMode.Data)\n np.testing.assert_almost_equal(data, dslice)\n\n with self.assertRaises(IncompatibleDimensions):\n da2d.get_slice((0, 0, 0), (10, 10, 10))\n\n with self.assertRaises(IncompatibleDimensions):\n da2d.get_slice((0, 0), (10,))\n\n with self.assertRaises(IncompatibleDimensions):\n da3d.get_slice((0, 0, 0), (3, 9, 40, 1))\n\n def test_dim_one_based(self):\n self.array.append_set_dimension()\n self.array.append_range_dimension(range(10))\n self.array.append_sampled_dimension(0.1)\n dim_container_one_based = self.array.iter_dimensions()\n for idx, dim in dim_container_one_based:\n assert self.array.dimensions[idx-1].dimension_type ==\\\n dim.dimension_type\n\n def test_timestamp_autoupdate(self):\n array = self.block.create_data_array(\"array.time\", \"signal\",\n nix.DataType.Double, (100, ))\n # Append dimensions and check time\n datime = array.updated_at\n time.sleep(1)\n array.append_set_dimension()\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.append_sampled_dimension(sampling_interval=0.1)\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.append_range_dimension(ticks=[0.1])\n self.assertNotEqual(datime, array.updated_at)\n\n # other properties\n datime = array.updated_at\n time.sleep(1)\n array.polynom_coefficients = [1.1, 2.2]\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.expansion_origin = -1\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.label = \"lbl\"\n self.assertNotEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.unit = \"Ms\"\n self.assertNotEqual(datime, array.updated_at)\n\n def test_timestamp_noautoupdate(self):\n self.file.auto_update_timestamps = False\n array = self.block.create_data_array(\"array.time\", \"signal\",\n nix.DataType.Double, (100, ))\n # Append dimensions and check time\n datime = array.updated_at\n time.sleep(1)\n array.append_set_dimension()\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.append_sampled_dimension(sampling_interval=0.1)\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.append_range_dimension(ticks=[0.1])\n self.assertEqual(datime, array.updated_at)\n\n # other properties\n datime = array.updated_at\n time.sleep(1)\n array.polynom_coefficients = [1.1, 2.2]\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.expansion_origin = -1\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.label = \"lbl\"\n self.assertEqual(datime, array.updated_at)\n\n datime = array.updated_at\n time.sleep(1)\n array.unit = \"Ms\"\n self.assertEqual(datime, array.updated_at)\n\n def test_data_deletion(self):\n data = [42.1337, 720.3, 190.0009]\n array = self.block.create_data_array(\"del.test\", \"test\", data=data)\n np.testing.assert_almost_equal(data, array[:])\n\n array[:] = None\n np.testing.assert_almost_equal([np.nan]*len(data), array[:])\n\n nda = len(self.block.data_arrays)\n del self.block.data_arrays[\"del.test\"]\n assert len(self.block.data_arrays) == nda-1\n assert \"del.test\" not in self.block.data_arrays\n\n def test_single_value_retrieval(self):\n assert self.array[1].shape == (1,)\n self.array.expansion_origin = 0.3\n assert self.array[1].shape == (1,)\n self.array.expansion_origin = None\n\n assert self.array[1].shape == (1,)\n self.array.polynom_coefficients = (1.2, 3.4)\n assert self.array[1].shape == (1,)\n self.array.polynom_coefficients = None\n\n assert self.array[1].shape == (1,)\n self.array.expansion_origin = 0.9\n self.array.polynom_coefficients = (1.2, 3.4)\n assert self.array[1].shape == (1,)\n self.array.expansion_origin = None\n self.array.polynom_coefficients = None\n\n assert self.array[1].shape == (1,)\n"
] | [
[
"numpy.array_equal",
"numpy.linspace",
"numpy.empty_like",
"numpy.eye",
"numpy.int32",
"numpy.int8",
"numpy.random.random_sample",
"numpy.dtype",
"numpy.int16",
"numpy.testing.assert_almost_equal",
"numpy.int64",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer | [
"57904544c6d6b43dcd5937afeb474c0a47456d98"
] | [
"models/model_unet.py"
] | [
"import tensorflow as tf\nfrom tensorflow.python.ops import control_flow_ops\nfrom six.moves import cPickle\nimport unet\nimport simplified_unet\n\narg_scope = tf.contrib.framework.arg_scope\n\n\nclass UnetModel(object):\n\n def __init__(self, number_class=3, is_training=True, is_simplified = False, dropout = True):\n\n \"\"\"Create the model\"\"\"\n self.n_classes = number_class\n self.is_training = is_training\n self.is_simplified = is_simplified\n self.dropout = dropout\n\n def _create_network(self, input_batch, dropout = False, is_training = True):\n\n \"\"\"\n Args:\n input_batch: batch of pre-processed images.\n keep_prob: probability of keeping neurons intact.\n\n Returns:\n A downsampled segmentation mask.\n \"\"\"\n if not self.is_simplified:\n net, _ = unet.unet(input_batch, self.n_classes, is_training = is_training, dropout = dropout, weight_decay=0.0005)\n else:\n net, _ = simplified_unet.unet(input_batch, self.n_classes, is_training = is_training, dropout = dropout, weight_decay=0.0005)\n\n return net\n\n def prepare_label(self, input_batch, new_size):\n \"\"\"Resize masks and perform one-hot encoding.\n Args:\n input_batch: input tensor of shape [batch_size H W 1].\n new_size: a tensor with new height and width.\n Returns:\n Outputs a tensor of shape [batch_size h w 21]\n with last dimension comprised of 0's and 1's only.\n \"\"\"\n with tf.name_scope('label_encode'):\n input_batch = tf.image.resize_nearest_neighbor(input_batch,\n new_size) # As labels are integer numbers, need to use NN interp.\n input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.\n input_batch = tf.one_hot(input_batch, depth=self.n_classes)\n return input_batch\n\n def preds(self, input_batch):\n \"\"\"Create the network and run inference on the input batch.\n\n Args:\n input_batch: batch of pre-processed images.\n\n Returns:\n Argmax over the predictions of the network of the same shape as the input.\n \"\"\"\n raw_output = self._create_network(tf.cast(input_batch, tf.float32), dropout = self.dropout, is_training = self.is_training)\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(input_batch)[1:3, ])\n raw_output = tf.argmax(raw_output, axis=3)\n raw_output = tf.expand_dims(raw_output, axis=3) # Create 4D-tensor.\n return tf.cast(raw_output, tf.uint8)\n\n def loss(self, img_batch, label_batch, mask_batch):\n \"\"\"Create the network, run inference on the input batch and compute loss.\n\n Args:\n input_batch: batch of pre-processed images.\n\n Returns:\n Pixel-wise softmax loss.\n \"\"\"\n raw_output = self._create_network(tf.cast(img_batch, tf.float32), dropout = self.dropout, is_training = self.is_training)\n\n # Get prediction output\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img_batch)[1:3, ])\n raw_output_up = tf.argmax(raw_output_up, axis=3)\n raw_output_up = tf.expand_dims(raw_output_up, axis=3) # Create 4D-tensor.\n pred = tf.cast(raw_output_up, tf.uint8)\n prediction = tf.reshape(raw_output, [-1, self.n_classes])\n\n # Prepare ground truth output\n label_batch = tf.image.resize_nearest_neighbor(label_batch, tf.stack(raw_output.get_shape()[1:3]))\n gt = tf.expand_dims(tf.cast(tf.reshape(label_batch, [-1]), tf.int32), axis=1)\n\n # Prepare mask\n if mask_batch != None:\n resized_mask_batch = tf.image.resize_nearest_neighbor(mask_batch, tf.stack(raw_output.get_shape()[1:3]))\n resized_mask_batch = tf.cast(tf.reshape(resized_mask_batch, [-1]), tf.float32)\n mask = tf.reshape(resized_mask_batch, gt.get_shape())\n\n # Calculate the masked loss \n epsilon = 0.00001 * tf.ones(prediction.get_shape(), tf.float32)\n if mask_batch != None:\n loss = tf.losses.sparse_softmax_cross_entropy(logits=prediction+epsilon, labels=gt, weights=mask)\n else:\n loss = tf.losses.sparse_softmax_cross_entropy(logits=prediction+epsilon, labels=gt)\n reduced_loss = tf.reduce_mean(loss)\n print(loss)\n\n\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n if update_ops:\n updates = tf.group(*update_ops)\n reduced_loss = control_flow_ops.with_dependencies([updates], reduced_loss)\n\n return pred, reduced_loss"
] | [
[
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.reduce_mean",
"tensorflow.get_collection",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.shape",
"tensorflow.name_scope",
"tensorflow.one_hot",
"tensorflow.argmax",
"tensorflow.group"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
paolorampazzo/mypySOT | [
"c22a0f297576aa6db79c7f0752d97445195dd9b4"
] | [
"pySOT/experimental_design.py"
] | [
"\"\"\"\n.. module:: experimental_design\n :synopsis: Methods for generating an experimental design.\n\n.. moduleauthor:: David Eriksson <[email protected]>,\n Yi Shen <[email protected]>\n\n:Module: experimental_design\n:Author: David Eriksson <[email protected]>\n Yi Shen <[email protected]>\n\"\"\"\n\nimport numpy as np\nimport pyDOE2 as pydoe\nimport abc\nimport six\nimport itertools\nfrom pySOT.utils import from_unit_box, round_vars\nfrom numpy.linalg import matrix_rank as rank\nfrom scipy.spatial.distance import cdist\nimport warnings\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass ExperimentalDesign(object):\n \"\"\"Base class for experimental designs.\n\n :ivar dim: Number of dimensions\n :ivar num_pts: Number of points in the experimental design\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self): # pragma: no cover\n self.dim = None\n self.num_pts = None\n\n @abc.abstractmethod\n def generate_points(self, lb=None,\n ub=None, int_var=None): # pragma: no cover\n pass\n\n\ndef _expdes_dist(gen, iterations, lb, ub, int_var):\n \"\"\"Helper method for picking the best experimental design.\n\n We generate iterations designs and picks the one the maximizes the\n minimum distance between points. This isn't a perfect criterion, but\n it will help avoid rank-defficient designs such as y=x.\n\n :param lb: Lower bounds\n :type lb: numpy.array\n :param ub: Upper bounds\n :type ub: numpy.array\n :param int_var: Indices of integer variables.\n :type int_var: numpy.array\n\n :return: Experimental design of size num_pts x dim\n :rtype: numpy.ndarray\n \"\"\"\n\n X = None\n best_score = 0\n for _ in range(iterations):\n cand = gen() # Generate a new design\n if all([x is not None for x in [lb, ub]]): # Map and round\n cand = round_vars(from_unit_box(cand, lb, ub), int_var, lb, ub)\n\n dists = cdist(cand, cand)\n np.fill_diagonal(dists, np.inf) # Since these are zero\n score = dists.min().min()\n\n if score > best_score and rank(cand) == cand.shape[1]:\n best_score = score\n X = cand.copy()\n\n if X is None:\n raise ValueError(\"No valid design found, increase num_pts?\")\n return X\n\n\n \nclass LatinHypercube(ExperimentalDesign):\n \"\"\"Latin Hypercube experimental design.\n\n :param dim: Number of dimensions\n :type dim: int\n :param num_pts: Number of desired sampling points\n :type num_pts: int\n :param criterion: Previously passed to pyDOE, now deprecated\n :type criterion: string\n :param iterations: Number of designs to choose from\n :type iterations: int\n\n :ivar dim: Number of dimensions\n :ivar num_pts: Number of points in the experimental design\n :ivar iterations: Number of points in the experimental design\n \"\"\"\n def __init__(self, dim, num_pts, criterion=None, iterations=1000):\n if criterion is not None:\n warnings.warn(\"Criterion is deprecated and will be removed.\")\n self.dim = dim\n self.num_pts = num_pts\n self.iterations = iterations\n\n def generate_points(self, lb=None, ub=None, int_var=None):\n \"\"\"Generate a new experimental design.\n\n You can specify lb, ub, int_var to have the design mapped to a\n specific domain. These inputs are ignored if one of lb\n or ub is None. The design is generated in [0, 1]^d in this case.\n\n :param lb: Lower bounds\n :type lb: numpy.array\n :param ub: Upper bounds\n :type ub: numpy.array\n :param int_var: Indices of integer variables. If None, [], or\n np.array([]) we assume all variables are continuous.\n :type int_var: numpy.array\n\n :return: Experimental design of size num_pts x dim\n :rtype: numpy.ndarray\n \"\"\"\n if int_var is None or len(int_var) == 0:\n int_var = np.array([])\n\n def wrapper():\n return pydoe.lhs(self.dim, self.num_pts, iterations=1)\n return _expdes_dist(wrapper, self.iterations, lb, ub, int_var)\n\n\nclass SymmetricLatinHypercube(ExperimentalDesign):\n \"\"\"Symmetric Latin hypercube experimental design.\n\n :param dim: Number of dimensions\n :type dim: int\n :param num_pts: Number of desired sampling points\n :type num_pts: int\n :param iterations: Number of designs to generate and pick the best from\n :type iterations: int\n\n :ivar dim: Number of dimensions\n :ivar num_pts: Number of points in the experimental design\n :ivar iterations: Number of points in the experimental design\n \"\"\"\n def __init__(self, dim, num_pts, iterations=1000):\n self.dim = dim\n self.num_pts = num_pts\n self.iterations = iterations\n\n def generate_points(self, lb=None, ub=None, int_var=None):\n \"\"\"Generate a new experimental design.\n\n You can specify lb, ub, int_var to have the design mapped to a\n specific domain. These inputs are ignored if one of lb\n or ub is None. The design is generated in [0, 1]^d in this case.\n\n :param lb: Lower bounds\n :type lb: numpy.array\n :param ub: Upper bounds\n :type ub: numpy.array\n :param int_var: Indices of integer variables. If None, [], or\n np.array([]) we assume all variables are continuous.\n :type int_var: numpy.array\n\n :return: Experimental design of size num_pts x dim\n :rtype: numpy.ndarray\n \"\"\"\n if int_var is None or len(int_var) == 0:\n int_var = np.array([])\n\n def wrapper():\n return self._slhd()\n return _expdes_dist(wrapper, self.iterations, lb, ub, int_var)\n\n def _slhd(self):\n \"\"\"Generate a symmetric Latin hypercube design in the unit hypercube.\n\n :return: Symmetric Latin hypercube design in the unit hypercube\n of size num_pts x dim\n :rtype: numpy.ndarray\n \"\"\"\n # Generate a one-dimensional array based on sample number\n points = np.zeros([self.num_pts, self.dim])\n points[:, 0] = np.arange(1, self.num_pts+1)\n\n # Get the last index of the row in the top half of the hypercube\n middleind = self.num_pts // 2\n\n # special manipulation if odd number of rows\n if self.num_pts % 2 == 1:\n points[middleind, :] = middleind + 1\n\n # Generate the top half of the hypercube matrix\n for j in range(1, self.dim):\n for i in range(middleind):\n if np.random.random() < 0.5:\n points[i, j] = self.num_pts - i\n else:\n points[i, j] = i + 1\n np.random.shuffle(points[:middleind, j])\n\n # Generate the bottom half of the hypercube matrix\n for i in range(middleind, self.num_pts):\n points[i, :] = self.num_pts + 1 - points[self.num_pts - 1 - i, :]\n\n return (points - 1) / (self.num_pts - 1) # Map to [0, 1]^d\n\n\nclass TwoFactorial(ExperimentalDesign):\n \"\"\"Two-factorial experimental design.\n\n The two-factorial experimental design consists of the corners\n of the unit hypercube, and hence :math:`2^{dim}` points.\n\n :param dim: Number of dimensions\n :type dim: int\n\n :ivar dim: Number of dimensions\n :ivar num_pts: Number of points in the experimental design\n\n :raises ValueError: If dim >= 15\n \"\"\"\n def __init__(self, dim):\n if dim >= 15:\n raise ValueError(\"Refusing to use >= 2^15 points.\")\n self.dim = dim\n self.num_pts = 2 ** dim\n\n def generate_points(self, lb=None, ub=None, int_var=None):\n \"\"\"Generate a two factorial design in the unit hypercube.\n\n You can specify lb, ub, int_var to have the design mapped to a\n specific domain. These inputs are ignored if one of lb\n or ub is None. The design is generated in [0, 1]^d in this case.\n\n :param lb: Lower bounds\n :type lb: numpy.array\n :param ub: Upper bounds\n :type ub: numpy.array\n :param int_var: Indices of integer variables. If None, [], or\n np.array([]) we assume all variables are continuous.\n :type int_var: numpy.array\n\n :return: Two factorial design in unit hypercube of size num_pts x dim\n :rtype: numpy.array\n \"\"\"\n if int_var is None or len(int_var) == 0:\n int_var = np.array([])\n\n X = np.array(list(itertools.product([0, 1], repeat=self.dim)))\n if all([x is not None for x in [lb, ub]]): # Map and round\n X = round_vars(from_unit_box(X, lb, ub), int_var, lb, ub)\n return X\n"
] | [
[
"numpy.random.random",
"numpy.linalg.matrix_rank",
"numpy.arange",
"scipy.spatial.distance.cdist",
"numpy.random.shuffle",
"numpy.fill_diagonal",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
aliborji/ShapeDefence | [
"92da19bb195b5161d997f6ee1cc777b07a714f6f"
] | [
"pix2pix-pytorch/networks.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.optim import lr_scheduler\n\n\ndef get_norm_layer(norm_type='instance'):\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n elif norm_type == 'switchable':\n norm_layer = SwitchNorm2d\n elif norm_type == 'none':\n norm_layer = None\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\n\ndef get_scheduler(optimizer, opt):\n if opt.lr_policy == 'lambda':\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)\n return lr_l\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n elif opt.lr_policy == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)\n elif opt.lr_policy == 'plateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\n elif opt.lr_policy == 'cosine':\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)\n else:\n return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)\n return scheduler\n\n\n# update learning rate (called once every epoch)\ndef update_learning_rate(scheduler, optimizer):\n scheduler.step()\n lr = optimizer.param_groups[0]['lr']\n print('learning rate = %.7f' % lr)\n\n\ndef init_weights(net, init_type='normal', gain=0.02):\n def init_func(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n init.normal_(m.weight.data, 0.0, gain)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=gain)\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, gain)\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network with %s' % init_type)\n net.apply(init_func)\n\n\ndef init_net(net, init_type='normal', init_gain=0.02, gpu_id='cuda:0'):\n net.to(gpu_id)\n init_weights(net, init_type, gain=init_gain)\n return net\n\n\ndef define_G(input_nc, output_nc, ngf, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_id='cuda:0'):\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)\n \n return init_net(net, init_type, init_gain, gpu_id)\n\n\n# Defines the generator that consists of Resnet blocks between a few\n# downsampling/upsampling operations.\nclass ResnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=9, padding_type='reflect'):\n assert(n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n self.input_nc = input_nc\n self.output_nc = output_nc\n self.ngf = ngf\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.inc = Inconv(input_nc, ngf, norm_layer, use_bias)\n self.down1 = Down(ngf, ngf * 2, norm_layer, use_bias)\n self.down2 = Down(ngf * 2, ngf * 4, norm_layer, use_bias)\n\n model = []\n for i in range(n_blocks):\n model += [ResBlock(ngf * 4, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n self.resblocks = nn.Sequential(*model)\n\n self.up1 = Up(ngf * 4, ngf * 2, norm_layer, use_bias)\n self.up2 = Up(ngf * 2, ngf, norm_layer, use_bias)\n\n self.outc = Outconv(ngf, output_nc)\n\n def forward(self, input):\n out = {}\n out['in'] = self.inc(input)\n out['d1'] = self.down1(out['in'])\n out['d2'] = self.down2(out['d1'])\n out['bottle'] = self.resblocks(out['d2'])\n out['u1'] = self.up1(out['bottle'])\n out['u2'] = self.up2(out['u1'])\n\n return self.outc(out['u2'])\n\n\nclass Inconv(nn.Module):\n def __init__(self, in_ch, out_ch, norm_layer, use_bias):\n super(Inconv, self).__init__()\n self.inconv = nn.Sequential(\n nn.ReflectionPad2d(3),\n nn.Conv2d(in_ch, out_ch, kernel_size=7, padding=0,\n bias=use_bias),\n norm_layer(out_ch),\n nn.ReLU(True)\n )\n\n def forward(self, x):\n x = self.inconv(x)\n return x\n\n\nclass Down(nn.Module):\n def __init__(self, in_ch, out_ch, norm_layer, use_bias):\n super(Down, self).__init__()\n self.down = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, kernel_size=3,\n stride=2, padding=1, bias=use_bias),\n norm_layer(out_ch),\n nn.ReLU(True)\n )\n\n def forward(self, x):\n x = self.down(x)\n return x\n\n\n# Define a Resnet block\nclass ResBlock(nn.Module):\n def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n super(ResBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\n\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n norm_layer(dim),\n nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return nn.ReLU(True)(out)\n\n\nclass Up(nn.Module):\n def __init__(self, in_ch, out_ch, norm_layer, use_bias):\n super(Up, self).__init__()\n self.up = nn.Sequential(\n # nn.Upsample(scale_factor=2, mode='nearest'),\n # nn.Conv2d(in_ch, out_ch,\n # kernel_size=3, stride=1,\n # padding=1, bias=use_bias),\n nn.ConvTranspose2d(in_ch, out_ch,\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(out_ch),\n nn.ReLU(True)\n )\n\n def forward(self, x):\n x = self.up(x)\n return x\n\n\nclass Outconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(Outconv, self).__init__()\n self.outconv = nn.Sequential(\n nn.ReflectionPad2d(3),\n nn.Conv2d(in_ch, out_ch, kernel_size=7, padding=0),\n nn.Tanh()\n )\n\n def forward(self, x):\n x = self.outconv(x)\n return x\n\n\ndef define_D(input_nc, ndf, netD,\n n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', init_gain=0.02, gpu_id='cuda:0'):\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n if netD == 'basic':\n net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid)\n elif netD == 'n_layers':\n net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid)\n elif netD == 'pixel':\n net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid)\n else:\n raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)\n\n return init_net(net, init_type, init_gain, gpu_id)\n\n\n# Defines the PatchGAN discriminator with the specified arguments.\nclass NLayerDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False):\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2**n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2**n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]\n\n if use_sigmoid:\n sequence += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n return self.model(input)\n\n\nclass PixelDiscriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False):\n super(PixelDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.net = [\n nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),\n norm_layer(ndf * 2),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]\n\n if use_sigmoid:\n self.net.append(nn.Sigmoid())\n\n self.net = nn.Sequential(*self.net)\n\n def forward(self, input):\n return self.net(input)\n\n\nclass GANLoss(nn.Module):\n def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0):\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n if use_lsgan:\n self.loss = nn.MSELoss()\n else:\n self.loss = nn.BCELoss()\n\n def get_target_tensor(self, input, target_is_real):\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(input)\n\n def __call__(self, input, target_is_real):\n target_tensor = self.get_target_tensor(input, target_is_real)\n return self.loss(input, target_tensor)\n"
] | [
[
"torch.optim.lr_scheduler.LambdaLR",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.nn.ReplicationPad2d",
"torch.nn.Dropout",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.tensor",
"torch.nn.Sigmoid",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.init.xavier_normal_",
"torch.nn.BCELoss",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.nn.ReflectionPad2d",
"torch.nn.Tanh",
"torch.nn.init.orthogonal_",
"torch.nn.ReLU",
"torch.nn.MSELoss",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaksSieve/CourseProject_2nd_Year | [
"ecbe77aa33d0e87231784cdc460c24ce99278928"
] | [
"engine_tests/PiImageSearch/ball_tracking69.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# import the necessary packages\nfrom collections import deque\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\nimport time\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport RPi.GPIO as GPIO\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-v\", \"--video\",\n help=\"path to the (optional) video file\")\nap.add_argument(\"-b\", \"--buffer\", type=int, default=64,\n help=\"max buffer size\")\nargs = vars(ap.parse_args())\n\n# define the lower and upper boundaries of the \"green\"\n# ball in the HSV color space, then initialize the\n# list of tracked points\ngreenUpper = (195, 100, 153)\ngreenLower = (101, 56, 27) \npts = deque(maxlen=args[\"buffer\"])\n\n# if a video path was not supplied, grab the reference\n# to the webcam\nif not args.get(\"video\", False):\n camera = cv2.VideoCapture(0)\n\n# otherwise, grab a reference to the video file\nelse:\n camera = cv2.VideoCapture(args[\"video\"])\n\n#Creating a Pandas DataFrame To Store Data Point\nData_Features = ['x', 'y', 'time']\nData_Points = pd.DataFrame(data = None, columns = Data_Features , dtype = float)\n\n\n#Reading the time in the begining of the video.\nstart = time.time()\n\n# keep looping\nwhile True:\n # grab the current frame\n (grabbed, frame) = camera.read()\n \n #Reading The Current Time\n current_time = time.time() - start\n\n # if we are viewing a video and we did not grab a frame,\n # then we have reached the end of the video\n if args.get(\"video\") and not grabbed:\n break\n\n # resize the frame, blur it, and convert it to the HSV\n # color space\n frame = imutils.resize(frame, width=600)\n # blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # construct a mask for the color \"green\", then perform\n # a series of dilations and erosions to remove any small\n # blobs left in the mask\n mask = cv2.inRange(hsv, greenLower, greenUpper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n # find contours in the mask and initialize the current\n # (x, y) center of the ball\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n \n\n # only proceed if the radius meets a minimum size\n if (radius < 300) & (radius > 10 ) : \n # draw the circle and centroid on the frame,\n # then update the list of tracked points\n cv2.circle(frame, (int(x), int(y)), int(radius),\n (0, 255, 255), 2)\n cv2.circle(frame, center, 5, (0, 0, 255), -1)\n \n #Save The Data Points\n Data_Points.loc[Data_Points.size/3] = [x, y, current_time]\n\n # update the points queue\n pts.appendleft(center)\n\n # loop over the set of tracked points\n for i in range(1, len(pts)):\n # if either of the tracked points are None, ignore them\n if pts[i - 1] is None or pts[i] is None:\n continue\n dY = pts[i][1] - pts[i-1][1]\n GPIO.setmode(GPIO.BCM)\n GPIO.output(4, GPIO.LOW)\n GPIO.output(17, GPIO.LOW)\n GPIO.output(22, GPIO.LOW)\n GPIO.output(27, GPIO.LOW)\n if np.sign(dY) == 1:\n GPIO.output(4, GPIO.HIGH)\n GPIO.output(27, GPIO.HIGH)\n else:\n GPIO.output(17, GPIO.HIGH)\n GPIO.output(22, GPIO.HIGH)\n GPIO.cleanup()\n # otherwise, compute the thickness of the line and\n # draw the connecting lines\n thickness = int(np.sqrt(args[\"buffer\"] / float(i + 1)) * 2.5)\n cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)\n\n # show the frame to our screen\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the 'q' key is pressed, stop the loop\n if key == ord(\"q\"):\n break\n#'h' is the focal length of the camera\n#'X0' is the correction term of shifting of x-axis\n#'Y0' is the correction term of shifting of y-axis\n#'time0' is the correction term for correction of starting of time\nh = 0.2\nX0 = -3\nY0 = 20\ntime0 = 0\ntheta0 = 0.3\n\n#Applying the correction terms to obtain actual experimental data\nData_Points['x'] = Data_Points['x']- X0\nData_Points['y'] = Data_Points['y'] - Y0\nData_Points['time'] = Data_Points['time'] - time0\n\n#Calulataion of theta value\nData_Points['theta'] = 2 * np.arctan(Data_Points['y']*0.0000762/h)#the factor correspons to pixel length in real life\nData_Points['theta'] = Data_Points['theta'] - theta0\n\n#Creating the 'Theta' vs 'Time' plot\nplt.plot(Data_Points['theta'], Data_Points['time'])\nplt.xlabel('Theta')\nplt.ylabel('Time')\n\n#Export The Data Points As cvs File and plot\nData_Points.to_csv('Data_Set.csv', sep=\",\")\nplt.savefig('Time_vs_Theta_Graph.svg', transparent= True)\n\n# cleanup the camera and close any open windows\ncamera.release()\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.arctan",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"numpy.sign",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
NeveIsa/geneal | [
"064b0409912088886bf56fe9a729d74dac92a235"
] | [
"geneal/applications/fitness_functions/continuous.py"
] | [
"import numpy as np\n\n\ndef fitness_functions_continuous(function_number):\n\n if function_number == 1:\n return lambda chromosome: -(np.abs(chromosome[0]) + np.cos(chromosome[0]))\n elif function_number == 2:\n return lambda chromosome: -(np.abs(chromosome[0]) + np.sin(chromosome[0]))\n elif function_number == 3:\n return lambda chromosome: -(chromosome ** 2).sum()\n elif function_number == 4:\n return lambda chromosome: -np.sum(\n np.abs(chromosome) - 10 * np.cos(np.sqrt(np.abs(10 * chromosome)))\n )\n elif function_number == 5:\n return lambda chromosome: -(chromosome[0] ** 2 + chromosome[0]) * np.cos(\n chromosome[0]\n )\n elif function_number == 6:\n return lambda chromosome: -(\n chromosome[0] * np.sin(4 * chromosome[0])\n + 1.1 * chromosome[1] * np.sin(2 * chromosome[1])\n )\n"
] | [
[
"numpy.cos",
"numpy.abs",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
giorgiovaccarino/CSSR | [
"e62d936445abcd0e34844b93db6505e9a59bec04"
] | [
"model/modeling/resnet.py"
] | [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Dequan Wang and Xingyi Zhou\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport math\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom .DCNv2.dcn_v2 import DCN\nimport torch.utils.model_zoo as model_zoo\n\nfrom model.utils.misc import _sigmoid\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n\n}\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\ndef fill_up_weights(up):\n w = up.weight.data\n f = math.ceil(w.size(2) / 2)\n c = (2 * f - 1 - f % 2) / (2. * f)\n for i in range(w.size(2)):\n for j in range(w.size(3)):\n w[0, 0, i, j] = \\\n (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))\n for c in range(1, w.size(0)):\n w[c, 0, :, :] = w[0, 0, :, :] \n\ndef fill_fc_weights(layers):\n for m in layers.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n # torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n # torch.nn.init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\nclass PoseResNet(nn.Module):\n\n def __init__(self, block, layers, heads, head_conv):\n self.inplanes = 64\n self.heads = heads\n self.deconv_with_bias = False\n\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n # used for deconv layers\n self.deconv_layers = self._make_deconv_layer(\n 3,\n [256, 128, 64],\n [4, 4, 4],\n )\n\n for head in self.heads:\n classes = self.heads[head]\n if head_conv > 0:\n fc = nn.Sequential(\n nn.Conv2d(64, head_conv,\n kernel_size=3, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(head_conv, classes, \n kernel_size=1, stride=1, \n padding=0, bias=True))\n if 'hm' in head:\n fc[-1].bias.data.fill_(-2.19)\n else:\n fill_fc_weights(fc)\n else:\n fc = nn.Conv2d(64, classes, \n kernel_size=1, stride=1, \n padding=0, bias=True)\n if 'hm' in head:\n fc.bias.data.fill_(-2.19)\n else:\n fill_fc_weights(fc)\n self.__setattr__(head, fc)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n fc = DCN(self.inplanes, planes, \n kernel_size=(3,3), stride=1,\n padding=1, dilation=1, deformable_groups=1)\n # fc = nn.Conv2d(self.inplanes, planes,\n # kernel_size=3, stride=1, \n # padding=1, dilation=1, bias=False)\n # fill_fc_weights(fc)\n up = nn.ConvTranspose2d(\n in_channels=planes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias)\n fill_up_weights(up)\n\n layers.append(fc)\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n layers.append(up)\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.deconv_layers(x)\n ret = {}\n for head in self.heads:\n ret[head] = self.__getattr__(head)(x)\n ret['hm'] = _sigmoid(ret['hm'])\n return [ret]\n\n def init_weights(self, num_layers):\n if 1:\n url = model_urls['resnet{}'.format(num_layers)]\n pretrained_state_dict = model_zoo.load_url(url)\n print('=> loading pretrained model {}'.format(url))\n self.load_state_dict(pretrained_state_dict, strict=False)\n print('=> init deconv weights from normal distribution')\n for name, m in self.deconv_layers.named_modules():\n if isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\nresnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])}\n\n\ndef get_pose_net():\n num_layers = 18\n heads = {'hm': 80, 'wh': 2, 'reg': 2}\n head_conv = 64\n block_class, layers = resnet_spec[num_layers]\n\n model = PoseResNet(block_class, layers, heads, head_conv=head_conv)\n model.init_weights(num_layers)\n return model\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ojas-Singh/oOo | [
"ef3be64693c7698d0d34022a1b93cb8dab5c766c"
] | [
"xemia.py"
] | [
"from threading import stack_size\nfrom ximea import xiapi\nfrom imutils.video import FPS\nimport cv2\nimport numpy as np\nimport time\nimport multiprocessing\nfrom multiprocessing import Pool, Queue\nimport sys,os\nimport pickle\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import leastsq\nfrom numba import jit\nmatplotlib.use(\"Qt5agg\")\n\n@jit(nopython=True)\ndef correlation_coefficient( patch1, patch2):\n product = np.mean((patch1 - patch1.mean()) * (patch2 - patch2.mean()))\n stds = patch1.std() * patch2.std()\n if stds == 0:\n return 0\n else:\n product /= stds\n return product\n@jit(nopython=True)\ndef gauss_erf(p,x,y):#p = [height, mean, sigma]\n\treturn y - p[0] * np.exp(-(x-p[1])**2 /(2.0 * p[2]**2))\n@jit(nopython=True)\ndef gauss_eval(x,p):\n\treturn p[0] * np.exp(-(x-p[1])**2 /(2.0 * p[2]**2))\n\n\ndef gaussianFit(X,Y):\n\tsize = len(X)\n\tmaxy = max(Y)\n\thalfmaxy = maxy / 2.0\n\tmean = sum(X*Y)/sum(Y)\n\n\thalfmaxima = X[int(len(X)/2)]\n\tfor k in range(size):\n\t\tif abs(Y[k] - halfmaxy) < halfmaxy/10:\n\t\t\thalfmaxima = X[k]\n\t\t\tbreak\n\tsigma = mean - halfmaxima\n\tpar = [maxy, mean, sigma] # Amplitude, mean, sigma\t\t\t\t\n\ttry:\n\t\tplsq = leastsq(gauss_erf, par,args=(X,Y))\n\texcept:\n\t\treturn None\n\tif plsq[1] > 4:\n\t\tprint('fit failed')\n\t\treturn None\n\n\tpar = plsq[0]\n\tXmore = np.linspace(X[0],X[-1],100)\n\tY = gauss_eval(Xmore, par)\n\n\treturn par[1],Xmore,Y\n\ndef worker(input_q, output_q,stack):\n RESIZE = 128\n while True:\n frameinfo = input_q.get() \n\n\n f = np.fft.fft2(frameinfo[1])\n fshift = np.fft.fftshift(f)\n magnitude_spectrum = 20*np.log(np.abs(fshift))\n magnitude_spectrum = np.asarray(magnitude_spectrum, dtype=np.uint8)\n centroid = None\n R = 4 * RESIZE / 10\n corr = []\n\n for img in stack:\n # corr.append(correlation_coefficient(img, comp_roi.getArrayRegion(magnitude_spectrum)))\n corr.append(correlation_coefficient(img, magnitude_spectrum))\n\n X = np.array(range(len(stack)))\n corr = np.array(corr)\n corr -= min(corr)\n #self.extracted_view.setData(X, corr)\n try:\n centroid, X, corr = gaussianFit(X, corr)\n #self.fitted_view.setData(X, corr)\n output_q.put([frameinfo[0],centroid])\n except Exception as error:\n print(error)\n \n \n\ndef graphdisplayworker(graph_q):\n fig = plt.figure()\n data = [[],[]]\n ax = fig.add_subplot(111)\n fig.show()\n timestart = time.time()\n while True:\n \n if quit:\n break\n for j in range(graph_q.qsize()):\n timestamp,centroid = graph_q.get()\n data[0].append(timestamp-timestart)\n data[1].append(centroid)\n timenowplot = time.time()\n ax.plot(data[0], data[1], color='b')\n plt.pause(0.02)\n ax.set_xlim(left=max(0, timenowplot-timestart-3), right=timenowplot-timestart+1)\n # plt.pause(0.05)\n plt.show(block=False)\n time.sleep(.005)\n cv2.waitKey(1)\n \ndef record(display_q):\n results = []\n quit_state = False\n while not quit_state:\n data = display_q.get()\n timestamp,centroid = data[1]\n results.append((timestamp,centroid))\n graph_q.put((timestamp,centroid))\n quit_state = data[0]\n with open('results.pkl', 'wb') as f:\n pickle.dump(results, f)\n print(\"written to file results.pkl !\")\n\n\nif __name__ == '__main__':\n cam = xiapi.Camera()\n print('Opening first camera...')\n cam.open_device()\n cam.set_exposure(1000)\n cam.set_param('width',128)\n cam.set_param('height',128)\n cam.set_param('downsampling_type', 'XI_SKIPPING')\n cam.set_acq_timing_mode('XI_ACQ_TIMING_MODE_FREE_RUN')\n qu_limit = 10\n workers = 12\n threadn = cv2.getNumberOfCPUs() \n print(\"Threads : \", threadn)\n print(\"Workers Spawned : \", workers)\n input_q = Queue(qu_limit) # fps is better if queue is higher but then more lags\n frame_count = 0\n stacksize = 200\n stack=[]\n output_q = Queue()\n display_q = Queue()\n graph_q = Queue()\n quit = False\n all_processes = []\n \n D = multiprocessing.Process(target=graphdisplayworker, args=[graph_q],daemon = False)\n R = multiprocessing.Process(target=record, args=[display_q],daemon = False)\n \n\n \n \n img = xiapi.Image()\n print('Starting data acquisition...')\n cam.start_acquisition()\n fps = FPS().start()\n cam.get_image(img)\n frame = 20*img.get_image_data_numpy()\n roi=cv2.selectROI(frame)\n cv2.destroyAllWindows()\n for i in range(stacksize):\n cam.get_image(img)\n frame = 20*img.get_image_data_numpy()\n stack.append(frame[int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2])])\n cv2.waitKey(1)\n for i in range(workers):\n p = multiprocessing.Process(target=worker, args=[input_q, output_q,stack],daemon = True)\n p.start()\n all_processes.append(p)\n cv2.waitKey(2)\n R.start()\n D.start()\n \n while quit == False and frame_count <500:\n cam.get_image(img)\n frame = 20*img.get_image_data_numpy()\n input_q.put([time.time(),frame[int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2])]])\n \n \n if output_q.empty():\n pass # fill up queue\n else:\n frame_count += 1\n dummylist=[]\n for i in range(output_q.qsize()):\n dummylist.append((quit,output_q.get()))\n dummylist.sort()\n for i in dummylist:\n display_q.put(i)\n fps.update() \n \n \n fps.stop() \n quit = True\n \n print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))\n print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))\n display_q.put((quit,output_q.get()))\n time.sleep(4)\n D.terminate()\n R.terminate()\n\n for process in all_processes:\n process.terminate()\n cam.stop_acquisition()\n cam.close_device() \n os._exit(1)\n # sys.exit()\n \n \n \n \n\n \n \n"
] | [
[
"numpy.fft.fft2",
"numpy.abs",
"numpy.linspace",
"numpy.asarray",
"matplotlib.use",
"numpy.fft.fftshift",
"scipy.optimize.leastsq",
"numpy.array",
"numpy.exp",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
SiggiGue/sigfeat | [
"86bb94200dcd4b33c21de1abc01814bf85f97b38",
"86bb94200dcd4b33c21de1abc01814bf85f97b38"
] | [
"examples/example2.py",
"tests/feature/test_delta.py"
] | [
"from sigfeat import Extractor\nfrom sigfeat import feature as fts\n\n\nextractor = Extractor(\n fts.SpectralFlux(),\n fts.SpectralCentroid(),\n fts.SpectralFlatness(),\n fts.SpectralRolloff(),\n fts.SpectralCrestFactor(),\n fts.CrestFactor(),\n fts.ZeroCrossingRate(),\n fts.RootMeanSquare(),\n fts.Peak(),\n)\n\n\nif __name__ == '__main__':\n from pylab import plt\n import pandas as pd\n from pandas.tools.plotting import scatter_matrix\n\n from sigfeat.source.soundfile import SoundFileSource\n from sigfeat.preprocess import MeanMix\n from sigfeat.sink import DefaultDictSink\n\n src = MeanMix(SoundFileSource(\n 'Test.wav',\n blocksize=4096,\n overlap=2048))\n sink = DefaultDictSink()\n extractor.extract(src, sink)\n\n plt.figure(src.source.name)\n for l, r in sink['results'].items():\n plt.plot(r, 'o-', label=str(l))\n plt.legend()\n\n df = pd.DataFrame(sink['results'])\n scatter_matrix(df)\n plt.show()\n",
"import numpy as np\n\nfrom sigfeat.base import Feature\nfrom sigfeat.feature.delta import Delta\nfrom sigfeat.source.array import ArraySource\nfrom sigfeat.extractor import Extractor\nfrom sigfeat.sink import DefaultDictSink\n\n\ndef test_delta():\n class A(Feature):\n def process(self, data, res):\n return float(data[0])\n ex = Extractor(\n Delta(A()),\n Delta(A(), order=2)\n )\n x = np.arange(0.0, 10.0, 2)\n src = ArraySource(x, samplerate=1, blocksize=1, channels=1)\n snk = ex.extract(src, DefaultDictSink())\n res = snk['results']\n assert np.allclose(np.array(res['A']).flatten(), x)\n assert np.mean(np.array(res['dA']).flatten()[1:]) == 2.0\n assert np.mean(np.array(res['ddA']).flatten()[2:]) == 0.0\n assert res['dA'][0] is None\n assert res['ddA'][0] is None\n\n\nif __name__ == '__main__':\n import pytest\n pytest.main() # pragma: no coverage\n"
] | [
[
"pandas.DataFrame",
"pandas.tools.plotting.scatter_matrix"
],
[
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.19"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bsburnham/striplog | [
"0c68f63d645c5bb7a5cc73b9bdaa197c4fb3cc33"
] | [
"striplog/striplog.py"
] | [
"\"\"\"\nA striplog is a sequence of intervals.\n\n:copyright: 2019 Agile Geoscience\n:license: Apache 2.0\n\"\"\"\nimport re\nfrom io import StringIO\nimport csv\nimport operator\nimport warnings\nfrom collections import defaultdict\nfrom collections import OrderedDict\nfrom functools import reduce\nfrom copy import deepcopy\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport requests\nimport json\n\nfrom .interval import Interval, IntervalError\nfrom .component import Component\nfrom .legend import Legend\nfrom .canstrat import parse_canstrat\nfrom .markov import Markov_chain\nfrom . import utils\nfrom . import templates\n\n\nclass StriplogError(Exception):\n \"\"\"\n Generic error class.\n \"\"\"\n pass\n\n\nclass Striplog(object):\n \"\"\"\n A Striplog is a sequence of intervals.\n\n We will build them from LAS files or CSVs.\n\n Args:\n list_of_Intervals (list): A list of Interval objects.\n source (str): A source for the data. Default None.\n order (str): 'auto', 'depth', 'elevation', or 'none'. Please refer to\n the documentation for details. Best idea is to let the default\n work. Default: 'auto'.\n \"\"\"\n def __init__(self, list_of_Intervals, source=None, order='auto'):\n\n list_of_Intervals = deepcopy(list_of_Intervals)\n\n if not list_of_Intervals:\n m = \"Cannot create an empty Striplog.\"\n raise StriplogError(m)\n\n if order.lower()[0] == 'a': # Auto\n # If bases == tops, then this is a bunch of 'points'.\n if all([iv.base.z == iv.top.z for iv in list_of_Intervals]):\n order = 'none'\n self.order = 'none'\n # We will tolerate zero-thickness intervals mixed in.\n elif all([iv.base.z >= iv.top.z for iv in list_of_Intervals]):\n order = 'depth'\n self.order = 'depth'\n elif all([iv.base.z <= iv.top.z for iv in list_of_Intervals]):\n order = 'elevation'\n self.order = 'elevation'\n else:\n m = \"Could not determine order from tops and bases.\"\n raise StriplogError(m)\n\n if order.lower()[0] == 'n':\n self.order = 'none'\n # Sanity check\n fail = any([iv.base.z != iv.top.z for iv in list_of_Intervals])\n if fail:\n m = \"'None' order specified but tops != bases.\"\n raise StriplogError(m)\n # Order force\n list_of_Intervals.sort(key=operator.attrgetter('top'))\n\n elif order.lower()[0] == 'd':\n self.order = 'depth'\n # Sanity check\n fail = any([iv.base.z < iv.top.z for iv in list_of_Intervals])\n if fail:\n m = \"Depth order specified but base above top.\"\n raise StriplogError(m)\n # Order force\n list_of_Intervals.sort(key=operator.attrgetter('top'))\n\n else:\n self.order = 'elevation'\n fail = any([iv.base.z > iv.top.z for iv in list_of_Intervals])\n if fail:\n m = \"Elevation order specified but base above top.\"\n raise StriplogError(m)\n # Order force\n r = True\n list_of_Intervals.sort(key=operator.attrgetter('top'), reverse=r)\n\n self.source = source\n\n self.__list = list_of_Intervals\n self.__index = 0 # Set up iterable.\n\n def __repr__(self):\n length = len(self.__list)\n details = \"start={}, stop={}\".format(self.start.z, self.stop.z)\n return \"Striplog({0} Intervals, {1})\".format(length, details)\n\n def __str__(self):\n s = [str(i) for i in self.__list]\n return '\\n'.join(s)\n\n def __getitem__(self, key):\n if type(key) is slice:\n i = key.indices(len(self.__list))\n result = [self.__list[n] for n in range(*i)]\n if result:\n return Striplog(result)\n else:\n return None\n elif type(key) is list:\n result = []\n for j in key:\n result.append(self.__list[j])\n if result:\n return Striplog(result)\n else:\n return None\n else:\n return self.__list[key]\n\n def __delitem__(self, key):\n if (type(key) is list) or (type(key) is tuple):\n # Have to compute what the indices *will* be as\n # the initial ones are deleted.\n indices = [x-i for i, x in enumerate(key)]\n for k in indices:\n del self.__list[k]\n else:\n del self.__list[key]\n return\n\n def __len__(self):\n return len(self.__list)\n\n def __setitem__(self, key, value):\n if not key:\n return\n try:\n for i, j in enumerate(key):\n self.__list[j] = value[i]\n except TypeError:\n self.__list[key] = value\n except IndexError:\n raise StriplogError(\"There must be one Interval for each index.\")\n\n def __iter__(self):\n return iter(self.__list)\n\n def __next__(self):\n \"\"\"\n Supports iterable.\n \"\"\"\n try:\n result = self.__list[self.__index]\n except IndexError:\n self.__index = 0\n raise StopIteration\n self.__index += 1\n return result\n\n def next(self):\n \"\"\"\n For Python 2 compatibility.\n \"\"\"\n return self.__next__()\n\n def __contains__(self, item):\n for r in self.__list:\n if item in r.components:\n return True\n return False\n\n def __reversed__(self):\n return Striplog(self.__list[::-1])\n\n def __add__(self, other):\n if isinstance(other, self.__class__):\n result = self.__list + other.__list\n return Striplog(result)\n elif isinstance(other, Interval):\n result = self.__list + [other]\n return Striplog(result)\n else:\n raise StriplogError(\"You can only add striplogs or intervals.\")\n\n def insert(self, index, item):\n if isinstance(item, self.__class__):\n for i, iv in enumerate(item):\n self.__list.insert(index+i, iv)\n elif isinstance(item, Interval):\n self.__list.insert(index, item)\n return\n else:\n raise StriplogError(\"You can only insert striplogs or intervals.\")\n\n def append(self, item):\n \"\"\"\n Implements list-like `append()` method.\n \"\"\"\n if isinstance(item, Interval):\n self.__list.append(item)\n return\n else:\n m = \"You can only append an Interval to a Striplog.\"\n raise StriplogError(m)\n\n def extend(self, item):\n \"\"\"\n Implements list-like `extend()` method.\n \"\"\"\n if isinstance(item, self.__class__):\n self.__list += item\n return\n else:\n m = \"You can only extend a Striplog with another Striplog.\"\n raise StriplogError(m)\n\n def pop(self, index):\n \"\"\"\n Implements list-like `pop()` method.\n \"\"\"\n self.__list.pop(index)\n\n @property\n def start(self):\n \"\"\"\n Property. The closest Position to the datum.\n\n Returns:\n Position.\n \"\"\"\n if self.order == 'depth':\n # Too naive if intervals can overlap:\n # return self[0].top\n return min(i.top for i in self)\n else:\n return min(i.base for i in self)\n\n @property\n def stop(self):\n \"\"\"\n Property. The furthest Position from the datum.\n\n Returns:\n Position.\n \"\"\"\n if self.order == 'depth':\n return max(i.base for i in self)\n else:\n return max(i.top for i in self)\n\n def __sort(self):\n \"\"\"\n Private method. Sorts into 'natural' order: top-down for depth-ordered\n striplogs; bottom-up for elevation-ordered.\n\n Sorts in place.\n\n Returns:\n None.\n \"\"\"\n self.__list.sort(key=operator.attrgetter('top'))\n return\n\n def __strict(self):\n \"\"\"\n Private method. Checks if striplog is monotonically increasing in\n depth.\n\n Returns:\n Bool.\n \"\"\"\n def conc(a, b):\n return a + b\n\n # Check boundaries, b\n b = np.array(reduce(conc, [[i.top.z, i.base.z] for i in self]))\n\n return all(np.diff(b) >= 0)\n\n @property\n def cum(self):\n \"\"\"\n Property. Gives the cumulative thickness of all filled intervals.\n\n It would be nice to use sum() for this (by defining __radd__),\n but I quite like the ability to add striplogs and get a striplog\n and I don't think we can have both, it's too confusing.\n\n Not calling it sum, because that's a keyword.\n\n Returns:\n Float. The cumulative thickness.\n \"\"\"\n total = 0.0\n for i in self:\n total += i.thickness\n return total\n\n @property\n def mean(self):\n \"\"\"\n Property. Returns the mean thickness of all filled intervals.\n\n Returns:\n Float. The mean average of interval thickness.\n \"\"\"\n return self.cum / len(self)\n\n @property\n def components(self):\n \"\"\"\n Property. Returns the list of compenents in the striplog.\n\n Returns:\n List. A list of the unique components.\n \"\"\"\n return [i[0] for i in self.unique if i[0]]\n\n @property\n def unique(self):\n \"\"\"\n Property. Summarize a Striplog with some statistics.\n\n Returns:\n List. A list of (Component, total thickness thickness) tuples.\n \"\"\"\n all_rx = set([iv.primary for iv in self])\n table = {r: 0 for r in all_rx}\n for iv in self:\n table[iv.primary] += iv.thickness\n\n return sorted(table.items(), key=operator.itemgetter(1), reverse=True)\n\n @property\n def top(self):\n \"\"\"\n Property.\n \"\"\"\n # For backwards compatibility.\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"Striplog.top is deprecated; please use Striplog.unique\"\n warnings.warn(w, DeprecationWarning, stacklevel=2)\n return self.unique\n\n @classmethod\n def __intervals_from_tops(self,\n tops,\n values,\n basis,\n components,\n field=None,\n ignore_nan=True):\n \"\"\"\n Private method. Take a sequence of tops in an arbitrary dimension,\n and provide a list of intervals from which a striplog can be made.\n\n This is only intended to be used by ``from_image()``.\n\n Args:\n tops (iterable). A list of floats.\n values (iterable). A list of values to look up.\n basis (iterable). A list of components.\n components (iterable). A list of Components.\n\n Returns:\n List. A list of Intervals.\n \"\"\"\n # Scale tops to actual depths.\n length = float(basis.size)\n start, stop = basis[0], basis[-1]\n tops = [start + (p/(length-1)) * (stop-start) for p in tops]\n bases = tops[1:] + [stop]\n\n list_of_Intervals = []\n for i, t in enumerate(tops):\n\n v, c, d = values[i], [], {}\n\n if ignore_nan and np.isnan(v):\n continue\n\n if (field is not None):\n d = {field: v}\n\n if components is not None:\n try:\n c = [deepcopy(components[int(v)])]\n except IndexError:\n c = []\n\n if c and (c[0] is None):\n c = []\n\n interval = Interval(t, bases[i], data=d, components=c)\n list_of_Intervals.append(interval)\n\n return list_of_Intervals\n\n @classmethod\n def _clean_longitudinal_data(cls, data, null=None):\n \"\"\"\n Private function. Make sure we have what we need to make a striplog.\n \"\"\"\n\n # Rename 'depth' or 'MD'\n if ('top' not in data.keys()):\n data['top'] = data.pop('depth', data.pop('MD', None))\n\n # Sort everything\n idx = list(data.keys()).index('top')\n values = sorted(zip(*data.values()), key=lambda x: x[idx])\n data = {k: list(v) for k, v in zip(data.keys(), zip(*values))}\n\n if data['top'] is None:\n raise StriplogError('Could not get tops.')\n\n # Get rid of null-like values if specified.\n if null is not None:\n for k, v in data.items():\n data[k] = [i if i != null else None for i in v]\n\n return data\n\n @classmethod\n def from_petrel(cls, filename,\n stop=None,\n points=False,\n null=None,\n function=None,\n include=None,\n exclude=None,\n remap=None,\n ignore=None):\n\n \"\"\"\n Makes a striplog from a Petrel text file.\n\n Returns:\n striplog.\n \"\"\"\n result = utils.read_petrel(filename,\n function=function,\n remap=remap,\n )\n\n data = cls._clean_longitudinal_data(result,\n null=null\n )\n\n list_of_Intervals = cls._build_list_of_Intervals(data,\n stop=stop,\n points=points,\n include=include,\n exclude=exclude,\n ignore=ignore\n )\n if list_of_Intervals:\n return cls(list_of_Intervals)\n return None\n\n @classmethod\n def _build_list_of_Intervals(cls,\n data_dict,\n stop=None,\n points=False,\n include=None,\n exclude=None,\n ignore=None,\n lexicon=None):\n \"\"\"\n Private function. Takes a data dictionary and constructs a list\n of Intervals from it.\n\n Args:\n data_dict (dict)\n stop (float): Where to end the last interval.\n points (bool)\n include (dict)\n exclude (dict)\n ignore (list)\n lexicon (Lexicon)\n\n Returns:\n list.\n \"\"\"\n\n include = include or {}\n exclude = exclude or {}\n ignore = ignore or []\n\n # Reassemble as list of dicts\n all_data = []\n for data in zip(*data_dict.values()):\n all_data.append({k: v for k, v in zip(data_dict.keys(), data)})\n\n # Sort\n all_data = sorted(all_data, key=lambda x: x['top'])\n\n # Filter down:\n wanted_data = []\n for dictionary in all_data:\n keep = True\n delete = []\n for k, v in dictionary.items():\n incl = include.get(k, utils.null_default(True))\n excl = exclude.get(k, utils.null_default(False))\n if k in ignore:\n delete.append(k)\n if not incl(v):\n keep = False\n if excl(v):\n keep = False\n if delete:\n for key in delete:\n _ = dictionary.pop(key, None)\n if keep:\n wanted_data.append(dictionary)\n\n # Fill in\n if not points:\n for i, iv in enumerate(wanted_data):\n if iv.get('base', None) is None:\n try: # To set from next interval\n iv['base'] = wanted_data[i+1]['top']\n except (IndexError, KeyError):\n # It's the last interval\n if stop is not None:\n thick = stop - iv['top']\n else:\n thick = 1\n iv['base'] = iv['top'] + thick\n\n # Build the list of intervals to pass to __init__()\n list_of_Intervals = []\n for iv in wanted_data:\n top = iv.pop('top')\n base = iv.pop('base', None)\n descr = iv.pop('description', '')\n if iv:\n c, d = {}, {}\n for k, v in iv.items():\n match1 = (k[:9].lower() == 'component')\n match2 = (k[:5].lower() == 'comp ')\n if match1 or match2:\n k = re.sub(r'comp(?:onent)? ', '', k, flags=re.I)\n c[k] = v # It's a component\n else:\n if v is not None:\n d[k] = v # It's data\n comp = [Component(c)] if c else None\n this = Interval(**{'top': top,\n 'base': base,\n 'description': descr,\n 'data': d,\n 'components': comp})\n else:\n this = Interval(**{'top': top,\n 'base': base,\n 'description': descr,\n 'lexicon': lexicon})\n list_of_Intervals.append(this)\n\n return list_of_Intervals\n\n @classmethod\n def from_csv(cls, filename=None,\n text=None,\n dlm=',',\n lexicon=None,\n points=False,\n include=None,\n exclude=None,\n remap=None,\n function=None,\n null=None,\n ignore=None,\n source=None,\n stop=None,\n fieldnames=None):\n \"\"\"\n Load from a CSV file or text.\n\n Args\n filename (str): The filename, or use `text`.\n text (str): CSV data as a string, or use `filename`.\n dlm (str): The delimiter, default ','.\n lexicon (Lexicon): The lexicon to use, optional. Only needed if \\\n parsing descriptions (e.g. cuttings).\n points (bool): Whether to make a point dataset (as opposed to \\\n ordinary intervals with top and base. Default is False.\n include: Default is None.\n exclude: Default is None.\n remap: Default is None.\n function: Default is None.\n null: Default is None.\n ignore: Default is None.\n source: Default is None.\n stop: Default is None.\n fieldnames: Default is None.\n\n Returns\n Striplog. A new instance.\n \"\"\"\n if (filename is None) and (text is None):\n raise StriplogError(\"You must provide a filename or CSV text.\")\n\n if (filename is not None):\n if source is None:\n source = filename\n with open(filename, 'r') as f:\n text = f.read()\n\n source = source or 'CSV'\n\n # Deal with multiple spaces in space delimited file.\n if dlm == ' ':\n text = re.sub(r'[ \\t]+', ' ', text)\n\n if fieldnames is not None:\n text = dlm.join(fieldnames) + '\\n' + text\n\n try:\n f = StringIO(text) # Python 3\n except TypeError:\n f = StringIO(unicode(text)) # Python 2\n\n reader = csv.DictReader(f, delimiter=dlm)\n\n # Reorganize the data to make fixing it easier.\n reorg = {k.strip().lower(): []\n for k in reader.fieldnames\n if k is not None}\n t = f.tell()\n for key in reorg:\n f.seek(t)\n for r in reader:\n s = {k.strip().lower(): v.strip() for k, v in r.items()}\n try:\n reorg[key].append(float(s[key]))\n except ValueError:\n reorg[key].append(s[key])\n\n f.close()\n\n remap = remap or {}\n for k, v in remap.items():\n reorg[v] = reorg.pop(k)\n\n data = cls._clean_longitudinal_data(reorg, null=null)\n\n list_of_Intervals = cls._build_list_of_Intervals(data,\n points=points,\n lexicon=lexicon,\n include=include,\n exclude=exclude,\n ignore=ignore,\n stop=stop)\n\n return cls(list_of_Intervals, source=source)\n\n @classmethod\n def from_dict(cls, dictionary):\n \"\"\"\n Take a dictionary of the form name:depth and return a striplog of\n complete intervals.\n \"\"\"\n d_sorted = sorted(dictionary.items(), key=lambda i: i[1])\n names = [i[0] for i in d_sorted]\n tops_ = [i[1] for i in d_sorted]\n bases_ = tops_[1:] + [tops_[-1]+1]\n comps_ = [Component({'formation': name}) for name in names]\n\n list_of_Intervals = []\n for top, base, comp in zip(tops_, bases_, comps_):\n iv = Interval(top=top, base=base, components=[comp])\n list_of_Intervals.append(iv)\n\n return cls(list_of_Intervals)\n\n @classmethod\n def from_descriptions(cls, text,\n lexicon=None,\n source='CSV',\n dlm=',',\n points=False,\n abbreviations=False,\n complete=False,\n order='depth',\n columns=None,\n ):\n \"\"\"\n Convert a CSV string into a striplog. Expects 2 or 3 fields:\n top, description\n OR\n top, base, description\n\n Args:\n text (str): The input text, given by ``well.other``.\n lexicon (Lexicon): A lexicon, required to extract components.\n source (str): A source. Default: 'CSV'.\n dlm (str): The delimiter, given by ``well.dlm``. Default: ','\n points (bool): Whether to treat as points or as intervals.\n abbreviations (bool): Whether to expand abbreviations in the\n description. Default: False.\n complete (bool): Whether to make 'blank' intervals, or just leave\n gaps. Default: False.\n order (str): The order, 'depth' or 'elevation'. Default: 'depth'.\n columns (tuple or list): The names of the columns.\n\n Returns:\n Striplog: A ``striplog`` object.\n\n Example:\n # TOP BOT LITH\n 312.34, 459.61, Sandstone\n 459.71, 589.61, Limestone\n 589.71, 827.50, Green shale\n 827.60, 1010.84, Fine sandstone\n \"\"\"\n\n text = re.sub(r'(\\n+|\\r\\n|\\r)', '\\n', text.strip())\n\n as_strings = []\n try:\n f = StringIO(text) # Python 3\n except TypeError:\n f = StringIO(unicode(text)) # Python 2\n reader = csv.reader(f, delimiter=dlm, skipinitialspace=True)\n for row in reader:\n as_strings.append(row)\n f.close()\n\n if not columns:\n if order[0].lower() == 'e':\n columns = ('base', 'top', 'description')\n else:\n columns = ('top', 'base', 'description')\n\n result = {k: [] for k in columns}\n\n # Set the indices for the fields.\n tix = columns.index('top')\n bix = columns.index('base')\n dix = columns.index('description')\n\n for i, row in enumerate(as_strings):\n\n # THIS ONLY WORKS FOR MISSING TOPS!\n if len(row) == 2:\n row = [row[0], None, row[1]]\n\n # TOP\n this_top = float(row[tix])\n\n # THIS ONLY WORKS FOR MISSING TOPS!\n # BASE\n # Base is null: use next top if this isn't the end.\n if row[1] is None:\n if i < len(as_strings)-1:\n this_base = float(as_strings[i+1][0]) # Next top.\n else:\n this_base = this_top + 1 # Default to 1 m thick at end.\n else:\n this_base = float(row[bix])\n\n # DESCRIPTION\n this_descr = row[dix].strip()\n\n # Deal with making intervals or points...\n if not points:\n # Insert intervals where needed.\n if complete and (i > 0) and (this_top != result['base'][-1]):\n result['top'].append(result['base'][-1])\n result['base'].append(this_top)\n result['description'].append('')\n else:\n this_base = None # Gets set to Top in striplog creation\n\n # ASSIGN\n result['top'].append(this_top)\n result['base'].append(this_base)\n result['description'].append(this_descr)\n\n # Build the list.\n list_of_Intervals = []\n for i, t in enumerate(result['top']):\n b = result['base'][i]\n d = result['description'][i]\n interval = Interval(t, b, description=d,\n lexicon=lexicon,\n abbreviations=abbreviations)\n list_of_Intervals.append(interval)\n\n return cls(list_of_Intervals, source=source)\n\n @classmethod\n def from_image(cls, filename, start, stop, legend,\n source=\"Image\",\n col_offset=0.1,\n row_offset=2,\n tolerance=0,\n background=None):\n \"\"\"\n Read an image and generate Striplog.\n\n Args:\n filename (str): An image file, preferably high-res PNG.\n start (float or int): The depth at the top of the image.\n stop (float or int): The depth at the bottom of the image.\n legend (Legend): A legend to look up the components in.\n source (str): A source for the data. Default: 'Image'.\n col_offset (Number): The proportion of the way across the image\n from which to extract the pixel column. Default: 0.1 (ie 10%).\n row_offset (int): The number of pixels to skip at the top of\n each change in colour. Default: 2.\n tolerance (float): The Euclidean distance between hex colours,\n which has a maximum (black to white) of 441.67 in base 10.\n Default: 0.\n background (array): A background colour (as hex) to ignore.\n\n Returns:\n Striplog: The ``striplog`` object.\n \"\"\"\n if background is None:\n bg = \"#xxxxxx\"\n else:\n bg = background\n rgb = utils.loglike_from_image(filename, col_offset)\n loglike = np.array([utils.rgb_to_hex(t) for t in rgb if utils.rgb_to_hex(t) != bg])\n\n # Get the pixels and colour values at 'tops' (i.e. changes).\n tops, hexes = utils.tops_from_loglike(loglike, offset=row_offset)\n\n # If there are consecutive tops, we assume it's because there is a\n # single-pixel row that we don't want. So take the second one only.\n # We used to do this reduction in ``utils.tops_from_loglike()`` but\n # it was preventing us from making intervals only one sample thick.\n nonconsecutive = np.append(np.diff(tops), 2)\n tops = tops[nonconsecutive > 1]\n hexes = hexes[nonconsecutive > 1]\n\n # Get the set of unique colours.\n hexes_reduced = list(set(hexes))\n\n # Get the components corresponding to the colours.\n components = [legend.get_component(h, tolerance=tolerance)\n for h in hexes_reduced]\n\n # Turn them into integers.\n values = [hexes_reduced.index(i) for i in hexes]\n\n basis = np.linspace(start, stop, loglike.size)\n\n list_of_Intervals = cls.__intervals_from_tops(tops,\n values,\n basis,\n components)\n\n list_of_Intervals = [iv for iv in list_of_Intervals\n if isinstance(iv.primary, Component)]\n\n return cls(list_of_Intervals, source=\"Image\")\n\n @classmethod\n def from_img(cls, *args, **kwargs):\n \"\"\"\n For backwards compatibility.\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"from_img() is deprecated; please use from_image()\"\n warnings.warn(w)\n return cls.from_image(*args, **kwargs)\n\n @classmethod\n def _from_array(cls, a,\n lexicon=None,\n source=\"\",\n points=False,\n abbreviations=False):\n \"\"\"\n DEPRECATING.\n\n Turn an array-like into a Striplog. It should have the following\n format (where ``base`` is optional):\n\n [(top, base, description),\n (top, base, description),\n ...\n ]\n\n Args:\n a (array-like): A list of lists or of tuples, or an array.\n lexicon (Lexicon): A language dictionary to extract structured\n objects from the descriptions.\n source (str): The source of the data. Default: ''.\n points (bool): Whether to treat as point data. Default: False.\n\n Returns:\n Striplog: The ``striplog`` object.\n \"\"\"\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"from_array() is deprecated.\"\n warnings.warn(w, DeprecationWarning, stacklevel=2)\n\n csv_text = ''\n for interval in a:\n interval = [str(i) for i in interval]\n if (len(interval) < 2) or (len(interval) > 3):\n raise StriplogError('Elements must have 2 or 3 items')\n descr = interval[-1].strip('\" ')\n interval[-1] = '\"' + descr + '\"'\n csv_text += ', '.join(interval) + '\\n'\n\n return cls.from_descriptions(csv_text,\n lexicon,\n source=source,\n points=points,\n abbreviations=abbreviations)\n\n @classmethod\n def from_log(cls, log,\n cutoff=None,\n components=None,\n legend=None,\n legend_field=None,\n field=None,\n right=False,\n basis=None,\n source='Log'):\n \"\"\"\n Turn a 1D array into a striplog, given a cutoff.\n\n Args:\n log (array-like): A 1D array or a list of integers.\n cutoff (number or array-like): The log value(s) at which to bin\n the log. Optional.\n components (array-like): A list of components. Use this or\n ``legend``.\n legend (``Legend``): A legend object. Use this or ``components``.\n legend_field ('str'): If you're not trying to match against\n components, then you can match the log values to this field in\n the Decors.\n field (str): The field in the Interval's ``data`` to store the log\n values as.\n right (bool): Which side of the cutoff to send things that are\n equal to, i.e. right on, the cutoff.\n basis (array-like): A depth basis for the log, so striplog knows\n where to put the boundaries.\n source (str): The source of the data. Default 'Log'.\n\n Returns:\n Striplog: The ``striplog`` object.\n \"\"\"\n if (components is None) and (legend is None) and (field is None):\n m = 'You must provide a list of components and legend, or a field.'\n raise StriplogError(m)\n\n if (legend is not None) and (legend_field is None):\n try: # To treat it like a legend.\n components = [deepcopy(decor.component) for decor in legend]\n except AttributeError: # It's just a list of components.\n pass\n\n if legend_field is not None:\n field_values = [getattr(d, legend_field, 0) for d in legend]\n components = [Component() for i in range(int(max(field_values)+1))]\n for i, decor in enumerate(legend):\n components[i] = deepcopy(decor.component)\n\n if cutoff is not None:\n\n # First make sure we have enough components.\n try:\n n = len(cutoff)\n except TypeError:\n n = 1\n if len(components) < n+1:\n m = 'For n cutoffs, you need to provide at least'\n m += 'n+1 components.'\n raise StriplogError(m)\n\n # Digitize.\n try: # To use cutoff as a list.\n a = np.digitize(log, cutoff, right)\n except ValueError: # It's just a number.\n a = np.digitize(log, [cutoff], right)\n\n else:\n a = np.copy(log)\n\n tops, values = utils.tops_from_loglike(a)\n\n if basis is None:\n m = 'You must provide a depth or elevation basis.'\n raise StriplogError(m)\n\n list_of_Intervals = cls.__intervals_from_tops(tops,\n values,\n basis,\n components,\n field=field\n )\n\n return cls(list_of_Intervals, source=source)\n\n @classmethod\n def from_las3(cls, string, lexicon=None,\n source=\"LAS\",\n dlm=',',\n abbreviations=False):\n \"\"\"\n Turn LAS3 'lithology' section into a Striplog.\n\n Args:\n string (str): A section from an LAS3 file.\n lexicon (Lexicon): The language for conversion to components.\n source (str): A source for the data.\n dlm (str): The delimiter.\n abbreviations (bool): Whether to expand abbreviations.\n\n Returns:\n Striplog: The ``striplog`` object.\n\n Note:\n Handles multiple 'Data' sections. It would be smarter for it\n to handle one at a time, and to deal with parsing the multiple\n sections in the Well object.\n\n Does not read an actual LAS file. Use the Well object for that.\n \"\"\"\n f = re.DOTALL | re.IGNORECASE\n regex = r'\\~\\w+?_Data.+?\\n(.+?)(?:\\n\\n+|\\n*\\~|\\n*$)'\n pattern = re.compile(regex, flags=f)\n text = pattern.search(string).group(1)\n\n s = re.search(r'\\.(.+?)\\: ?.+?source', string)\n if s:\n source = s.group(1).strip()\n\n return cls.from_descriptions(text, lexicon,\n source=source,\n dlm=dlm,\n abbreviations=abbreviations)\n\n @classmethod\n def from_canstrat(cls, filename, source='canstrat'):\n \"\"\"\n Eat a Canstrat DAT file and make a striplog.\n \"\"\"\n with open(filename) as f:\n dat = f.read()\n\n data = parse_canstrat(dat)\n\n list_of_Intervals = []\n for d in data[7]: # 7 is the 'card type' for lithology info.\n if d.pop('skip'):\n continue\n top = d.pop('top')\n base = d.pop('base')\n comps = [Component({'lithology': d['rtc'],\n 'colour': d['colour_name']\n })]\n iv = Interval(top=top, base=base, components=comps, data=d)\n list_of_Intervals.append(iv)\n\n return cls(list_of_Intervals, source=source)\n\n def copy(self):\n \"\"\"Returns a shallow copy.\"\"\"\n return Striplog([i.copy() for i in self],\n order=self.order,\n source=self.source)\n\n \n\n\n\n # Outputter\n def to_canstrat(self, filename, params):\n \"\"\"\n Write a Canstrat ASCII file.\n\n Args:\n filename (str)\n params (dict): The well details. You can use a ``welly`` header\n object.\n\n Returns:\n\n \"\"\"\n\n return None\n\n # Outputter\n def to_csv(self,\n filename=None,\n as_text=True,\n use_descriptions=False,\n dlm=\",\",\n header=True):\n \"\"\"\n Returns a CSV string built from the summaries of the Intervals.\n\n Args:\n use_descriptions (bool): Whether to use descriptions instead\n of summaries, if available.\n dlm (str): The delimiter.\n header (bool): Whether to form a header row.\n\n Returns:\n str: A string of comma-separated values.\n \"\"\"\n if (filename is None):\n if (not as_text):\n m = \"You must provide a filename or set as_text to True.\"\n raise StriplogError(m)\n else:\n as_text = False\n\n if as_text:\n output = StringIO()\n else:\n output = open(filename, 'w')\n\n fieldnames = ['Top', 'Base', 'Component']\n writer = csv.DictWriter(output,\n delimiter=dlm,\n fieldnames=fieldnames,\n quoting=csv.QUOTE_MINIMAL)\n\n if header:\n writer.writeheader()\n\n for i in self.__list:\n if use_descriptions and i.description:\n text = i.description\n elif i.primary:\n text = i.primary.summary()\n else:\n text = ''\n d = {j: k for j, k in zip(fieldnames, [i.top.z, i.base.z, text])}\n writer.writerow(d)\n\n if as_text:\n return output.getvalue()\n else:\n output.close\n return None\n\n # Outputter\n def to_las3(self, use_descriptions=False, dlm=\",\", source=\"Striplog\"):\n \"\"\"\n Returns an LAS 3.0 section string.\n\n Args:\n use_descriptions (bool): Whether to use descriptions instead\n of summaries, if available.\n dlm (str): The delimiter.\n source (str): The sourse of the data.\n\n Returns:\n str: A string forming Lithology section of an LAS3 file.\n \"\"\"\n data = self.to_csv(use_descriptions=use_descriptions,\n dlm=dlm,\n header=False)\n\n return templates.section.format(name='Lithology',\n short=\"LITH\",\n source=source,\n data=data)\n\n # Outputter\n def to_log(self,\n step=1.0,\n start=None,\n stop=None,\n basis=None,\n field=None,\n field_function=None,\n bins=True,\n dtype='float',\n table=None,\n sort_table=False,\n legend=None,\n legend_field=None,\n match_only=None,\n undefined=0,\n return_meta=False\n ):\n \"\"\"\n Return a fully sampled log from a striplog. Useful for crossplotting\n with log data, for example.\n\n Args:\n step (float): The step size. Default: 1.0.\n start (float): The start depth of the new log. You will want to\n match the logs, so use the start depth from the LAS file.\n Default: The basis if provided, else the start of the striplog.\n stop (float): The stop depth of the new log. Use the stop depth\n of the LAS file. Default: The basis if provided, else the stop\n depth of the striplog.\n field (str): If you want the data to come from one of the\n attributes of the components in the striplog, provide it.\n field_function (function): Provide a function to apply to the field\n you are asking for. It's up to you to make sure the function\n does what you want.\n bins (bool): Whether to return the index of the items from the\n lookup table. If False, then the item itself will be returned. \n dtype (str): The NumPy dtype string for the output log.\n table (list): Provide a look-up table of values if you want. If you\n don't, then it will be constructed from the data.\n sort_table (bool): Whether to sort the table or not. Default: False.\n legend (Legend): If you want the codes to come from a legend,\n provide one. Otherwise the codes come from the log, using\n integers in the order of prevalence. If you use a legend,\n they are assigned in the order of the legend.\n legend_field (str): If you want to get a log representing one of\n the fields in the legend, such as 'width' or 'grainsize'.\n match_only (list): If you only want to match some attributes of\n the Components (e.g. lithology), provide a list of those\n you want to match.\n undefined (number): What to fill in where no value can be\n determined, e.g. ``-999.25`` or ``np.nan``. Default 0.\n return_meta (bool): If ``True``, also return the depth basis\n (np.linspace), and the component table.\n\n Returns:\n ndarray: If ``return_meta`` was ``True``, you get:\n\n * The log data as an array of ints.\n * The depth basis as an array of floats.\n * A list of the components in the order matching the ints.\n\n If ``return_meta`` was ``False`` (the default), you only get\n the log data.\n \"\"\"\n # Make the preparations.\n if basis is not None:\n start, stop = basis[0], basis[-1]\n step = basis[1] - start\n else:\n start = start or self.start.z\n stop = stop or self.stop.z\n pts = np.ceil((stop - start)/step) + 1\n basis = np.linspace(start, stop, int(pts))\n\n if (field is not None) or (legend_field is not None):\n result = np.zeros_like(basis, dtype=dtype)\n else:\n result = np.zeros_like(basis, dtype=np.int)\n\n if np.isnan(undefined):\n try:\n result[:] = np.nan\n except:\n pass # array type is int\n\n # If needed, make a look-up table for the log values.\n if table is None:\n if legend:\n table = [j.component for j in legend]\n elif field:\n s = set([iv.data.get(field) for iv in self])\n table = list(filter(None, s))\n else:\n table = [j[0] for j in self.unique]\n\n # Adjust the table if necessary. Go over all the components in the\n # table list, and remove elements that are not in the match list.\n # Careful! This results in a new table, with components that may not\n # be in the original list of components.\n if match_only is not None:\n if not isinstance(match_only, (list, tuple, set,)):\n raise StriplogError(\"match_only should be type list not str.\")\n table_new = []\n for c in table:\n if c == '':\n continue # No idea why sometimes there's a ''\n c_new = Component({k: v for k, v in c.__dict__.items()\n if k in match_only})\n # Only add unique, and preserve order.\n if c_new not in table_new:\n table_new.append(c_new)\n table = table_new\n else:\n match_only = []\n\n if sort_table:\n table.sort()\n\n start_ix = self.read_at(start, index=True)\n stop_ix = self.read_at(stop, index=True)\n if stop_ix is not None:\n stop_ix += 1\n\n # Assign the values.\n for i in self[start_ix:stop_ix]:\n c = i.primary\n if match_only:\n c = Component({k: getattr(c, k, None)\n for k in match_only})\n\n if legend and legend_field: # Use the legend field.\n try:\n key = legend.getattr(c, legend_field, undefined)\n key = key or undefined\n except ValueError:\n key = undefined\n elif field: # Get data directly from that field in iv.data.\n f = field_function or utils.null\n try:\n v = f(i.data.get(field, undefined)) or undefined\n if bins:\n # Then return the bin we're in...\n key = (table.index(v) + 1) or undefined\n else:\n # ...else return the actual value.\n key = v\n except ValueError:\n key = undefined\n else: # Use the lookup table.\n try:\n key = (table.index(c) + 1) or undefined\n except ValueError:\n key = undefined\n\n top_index = int(np.ceil((max(start, i.top.z)-start)/step))\n base_index = int(np.ceil((min(stop, i.base.z)-start)/step))\n\n try:\n result[top_index:base_index+1] = key\n except: # Have a list or array or something.\n result[top_index:base_index+1] = key[0]\n\n if return_meta:\n return result, basis, table\n else:\n return result\n\n def to_flag(self, **kwargs):\n \"\"\"\n A wrapper for ``to_log()`` that returns a boolean array.\n Useful for masking. Has the same interface as ``to_log()``.\n \"\"\"\n return self.to_log(**kwargs).astype(bool)\n\n def plot_points(self, ax,\n legend=None,\n field=None,\n field_function=None,\n undefined=0,\n **kwargs):\n \"\"\"\n Plotting, but only for points (as opposed to intervals).\n \"\"\"\n\n ys = [iv.top.z for iv in self]\n\n if field is not None:\n f = field_function or utils.null\n xs = [f(iv.data.get(field, undefined)) for iv in self]\n else:\n xs = [1 for iv in self]\n\n ax.set_xlim((min(xs), max(xs)))\n for x, y in zip(xs, ys):\n ax.axhline(y, color='lightgray', zorder=0)\n\n ax.scatter(xs, ys, clip_on=False, **kwargs)\n\n return ax\n\n def plot_tops(self, ax, legend=None, field=None, **kwargs):\n \"\"\"\n Plotting, but only for tops (as opposed to intervals).\n \"\"\"\n if field is None:\n raise StriplogError('You must provide a field to plot.')\n\n ys = [iv.top.z for iv in self]\n\n try:\n try:\n ts = [getattr(iv.primary, field) for iv in self]\n except:\n ts = [iv.data.get(field) for iv in self]\n except:\n raise StriplogError('Could not retrieve field.')\n\n for y, t in zip(ys, ts):\n ax.axhline(y, color='lightblue', lw=3, zorder=0)\n ax.text(0.1, y-max(ys)/200, t, ha='left')\n\n return ax\n\n def plot_field(self, ax, legend=None, field=None, **kwargs):\n \"\"\"\n Plotting, but only for tops (as opposed to intervals).\n \"\"\"\n if field is None:\n raise StriplogError('You must provide a field to plot.')\n\n try:\n try:\n xs = [getattr(iv.primary, field) for iv in self]\n except:\n xs = [iv.data.get(field) for iv in self]\n except:\n raise StriplogError('Could not retrieve field.')\n\n for iv, x in zip(self.__list, xs):\n _, ymin = utils.axis_transform(ax, 0, iv.base.z, ylim=(self.start.z, self.stop.z), inverse=True)\n _, ymax = utils.axis_transform(ax, 0, iv.top.z, ylim=(self.start.z, self.stop.z), inverse=True)\n ax.axvline(x, ymin=ymin, ymax=ymax)\n\n return ax\n\n def max_field(self, field):\n return max(filter(None, [iv.data.get(field) for iv in self]))\n\n def plot_axis(self,\n ax,\n legend,\n ladder=False,\n default_width=1,\n match_only=None,\n colour=None,\n colour_function=None,\n cmap=None,\n default=None,\n width_field=None,\n **kwargs\n ):\n \"\"\"\n Plotting, but only the Rectangles. You have to set up the figure.\n Returns a matplotlib axis object.\n\n Args:\n ax (axis): The matplotlib axis to plot into.\n legend (Legend): The Legend to use for colours, etc.\n ladder (bool): Whether to use widths or not. Default False.\n default_width (int): A width for the plot if not using widths.\n Default 1.\n match_only (list): A list of strings matching the attributes you\n want to compare when plotting.\n colour (str): Which data field to use for colours.\n cmap (cmap): Matplotlib colourmap. Default ``viridis``.\n default (float): The default (null) value.\n width_field (str): The field to use for the width of the patches.\n **kwargs are passed through to matplotlib's ``patches.Rectangle``.\n\n Returns:\n axis: The matplotlib.pyplot axis.\n \"\"\"\n default_c = None\n patches = []\n for iv in self.__list:\n origin = (0, iv.top.z)\n d = legend.get_decor(iv.primary, match_only=match_only)\n thick = iv.base.z - iv.top.z\n\n if ladder:\n if width_field is not None:\n w = iv.data.get(width_field, 1)\n w = default_width * w/self.max_field(width_field)\n default_c = 'gray'\n elif legend is not None:\n w = d.width or default_width\n try:\n w = default_width * w/legend.max_width\n except:\n w = default_width\n else:\n w = default_width\n\n # Allow override of lw\n this_patch_kwargs = kwargs.copy()\n lw = this_patch_kwargs.pop('lw', 0)\n ec = this_patch_kwargs.pop('ec', 'k')\n fc = this_patch_kwargs.pop('fc', None) or default_c or d.colour\n\n if colour is None:\n rect = mpl.patches.Rectangle(origin,\n w,\n thick,\n fc=fc,\n lw=lw,\n hatch=d.hatch,\n ec=ec, # edgecolour for hatching\n **this_patch_kwargs)\n ax.add_patch(rect)\n else:\n rect = mpl.patches.Rectangle(origin,\n w,\n thick,\n lw=lw,\n ec=ec, # edgecolour for hatching\n **this_patch_kwargs)\n patches.append(rect)\n\n if colour is not None:\n cmap = cmap or 'viridis'\n p = mpl.collections.PatchCollection(patches, cmap=cmap, lw=lw)\n p.set_array(self.get_data(colour,\n colour_function,\n default=default\n ))\n ax.add_collection(p)\n cb = plt.colorbar(p)\n cb.outline.set_linewidth(0)\n\n return ax\n\n def get_data(self, field, function=None, default=None):\n \"\"\"\n Get data from the striplog.\n \"\"\"\n f = function or utils.null\n data = []\n for iv in self:\n d = iv.data.get(field)\n if d is None:\n if default is not None:\n d = default\n else:\n d = np.nan\n data.append(f(d))\n\n return np.array(data)\n\n # Outputter\n def plot(self,\n legend=None,\n width=1.5,\n ladder=True,\n aspect=10,\n ticks=(1, 10),\n match_only=None,\n ax=None,\n return_fig=False,\n colour=None,\n cmap='viridis',\n default=None,\n style='intervals',\n field=None,\n label=None,\n **kwargs):\n \"\"\"\n Hands-free plotting.\n\n Args:\n legend (Legend): The Legend to use for colours, etc.\n width (int): The width of the plot, in inches. Default 1.\n ladder (bool): Whether to use widths or not. Default False.\n aspect (int): The aspect ratio of the plot. Default 10.\n ticks (int or tuple): The (minor,major) tick interval for depth.\n Only the major interval is labeled. Default (1,10).\n match_only (list): A list of strings matching the attributes you\n want to compare when plotting.\n ax (ax): A maplotlib axis to plot onto. If you pass this, it will\n be returned. Optional.\n return_fig (bool): Whether or not to return the maplotlib ``fig``\n object. Default False.\n colour (str): Which data field to use for colours.\n cmap (cmap): Matplotlib colourmap. Default ``viridis``.\n **kwargs are passed through to matplotlib's ``patches.Rectangle``.\n\n Returns:\n None. Unless you specify ``return_fig=True`` or pass in an ``ax``.\n \"\"\"\n if legend is None:\n legend = Legend.random(self.components)\n\n if style.lower() == 'tops':\n # Make sure width is at least 3 for 'tops' style\n width = max([3, width])\n\n if ax is None:\n return_ax = False\n fig = plt.figure(figsize=(width, aspect*width))\n ax = fig.add_axes([0.35, 0.05, 0.6, 0.95])\n else:\n return_ax = True\n\n if (self.order == 'none') or (style.lower() == 'points'):\n # Then this is a set of points.\n ax = self.plot_points(ax=ax, legend=legend, field=field, **kwargs)\n elif style.lower() == 'field':\n if field is None:\n raise StriplogError('You must provide a field to plot.')\n ax = self.plot_field(ax=ax, legend=legend, field=field)\n elif style.lower() == 'tops':\n ax = self.plot_tops(ax=ax, legend=legend, field=field)\n ax.set_xticks([])\n else:\n ax = self.plot_axis(ax=ax,\n legend=legend,\n ladder=ladder,\n default_width=width,\n match_only=kwargs.get('match_only',\n match_only),\n colour=colour,\n cmap=cmap,\n default=default,\n width_field=field,\n **kwargs\n )\n\n ax.set_xlim([0, width])\n ax.set_xticks([])\n\n # Rely on interval order.\n if self.order == 'depth':\n upper, lower = self.start.z, self.stop.z\n else:\n upper, lower = self.stop.z, self.start.z\n rng = abs(upper - lower)\n\n ax.set_ylim([lower, upper])\n\n if label is not None:\n for iv in self.__list:\n plt.text(1.6, iv.middle, iv.primary[label], ha='left', va='center', size=10)\n\n # Make sure ticks is a tuple.\n try:\n ticks = tuple(ticks)\n except TypeError:\n ticks = (1, ticks)\n\n # Avoid MAXTICKS error.\n while rng/ticks[0] > 250:\n mi, ma = 10*ticks[0], ticks[1]\n if ma <= mi:\n ma = 10 * mi\n ticks = (mi, ma)\n\n # Carry on plotting...\n minorLocator = mpl.ticker.MultipleLocator(ticks[0])\n ax.yaxis.set_minor_locator(minorLocator)\n\n majorLocator = mpl.ticker.MultipleLocator(ticks[1])\n majorFormatter = mpl.ticker.FormatStrFormatter('%d')\n ax.yaxis.set_major_locator(majorLocator)\n ax.yaxis.set_major_formatter(majorFormatter)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.get_yaxis().set_tick_params(which='both', direction='out')\n\n # Optional title.\n title = getattr(self, 'title', None)\n if title is not None:\n ax.set_title(title)\n\n ax.patch.set_alpha(0)\n\n if return_ax:\n return ax\n elif return_fig:\n return fig\n else:\n return\n\n def shift(self, delta=None, start=None):\n \"\"\"\n Shift all the intervals by `delta` (negative numbers are 'up')\n or by setting a new start depth.\n\n Returns a copy of the striplog.\n \"\"\"\n new_strip = self.copy()\n if delta is None:\n if start is None:\n raise StriplogError(\"You must provide a delta or a new start.\")\n delta = start - self.start.z\n for iv in new_strip:\n iv.top = iv.top.z + delta\n iv.base = iv.base.z + delta\n return new_strip\n\n def read_at(self, d, index=False):\n \"\"\"\n Get the index of the interval at a particular 'depth' (though this\n might be an elevation or age or anything).\n\n Args:\n d (Number): The 'depth' to query.\n index (bool): Whether to return the index instead of the interval.\n\n Returns:\n Interval: The interval, or if ``index==True`` the index of the\n interval, at the specified 'depth', or ``None`` if the depth is\n outside the striplog's range.\n \"\"\"\n for i, iv in enumerate(self):\n if iv.spans(d):\n return i if index else iv\n return None\n\n def depth(self, d):\n \"\"\"\n For backwards compatibility.\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"depth() is deprecated; please use read_at()\"\n warnings.warn(w)\n return self.read_at(d)\n\n def extract(self, log, basis, name, function=None):\n \"\"\"\n 'Extract' a log into the components of a striplog.\n\n Args:\n log (array_like). A log or other 1D data.\n basis (array_like). The depths or elevations of the log samples.\n name (str). The name of the attribute to store in the components.\n function (function). A function that takes an array as the only\n input, and returns whatever you want to store in the 'name'\n attribute of the primary component.\n\n Returns:\n A copy of the striplog.\n \"\"\"\n # Build a dict of {index: [log values]} to keep track.\n intervals = {}\n previous_ix = -1\n for i, z in enumerate(basis):\n ix = self.read_at(z, index=True)\n if ix is None:\n continue\n if ix == previous_ix:\n intervals[ix].append(log[i])\n else:\n intervals[ix] = [log[i]]\n previous_ix = ix\n\n # Set the requested attribute in the primary comp of each interval.\n new_strip = self.copy()\n for ix, data in intervals.items():\n f = function or utils.null\n d = f(np.array(data))\n new_strip[ix].data[name] = d\n\n return new_strip\n\n def find(self, search_term, index=False):\n \"\"\"\n Look for a regex expression in the descriptions of the striplog.\n If there's no description, it looks in the summaries.\n\n If you pass a Component, then it will search the components, not the\n descriptions or summaries.\n\n Case insensitive.\n\n Args:\n search_term (string or Component): The thing you want to search\n for. Strings are treated as regular expressions.\n index (bool): Whether to return the index instead of the interval.\n\n Returns:\n Striplog: A striplog that contains only the 'hit' Intervals.\n However, if ``index`` was ``True``, then that's what you get.\n \"\"\"\n hits = []\n for i, iv in enumerate(self):\n try:\n search_text = iv.description or iv.primary.summary()\n pattern = re.compile(search_term, flags=re.IGNORECASE)\n if pattern.search(search_text):\n hits.append(i)\n except TypeError:\n if search_term in iv.components:\n hits.append(i)\n if hits and index:\n return hits\n elif hits:\n return self[hits]\n else:\n return\n\n def __find_incongruities(self, op, index):\n \"\"\"\n Private method. Finds gaps and overlaps in a striplog. Called by\n find_gaps() and find_overlaps().\n\n Args:\n op (operator): ``operator.gt`` or ``operator.lt``\n index (bool): If ``True``, returns indices of intervals with\n gaps after them.\n\n Returns:\n Striplog: A striplog of all the gaps. A sort of anti-striplog.\n \"\"\"\n if len(self) == 1:\n return\n\n hits = []\n intervals = []\n\n if self.order == 'depth':\n one, two = 'base', 'top'\n else:\n one, two = 'top', 'base'\n\n for i, iv in enumerate(self[:-1]):\n next_iv = self[i+1]\n if op(getattr(iv, one), getattr(next_iv, two)):\n hits.append(i)\n\n top = getattr(iv, one)\n base = getattr(next_iv, two)\n iv_gap = Interval(top, base)\n intervals.append(iv_gap)\n\n if index and hits:\n return hits\n elif intervals:\n return Striplog(intervals)\n else:\n return\n\n def find_overlaps(self, index=False):\n \"\"\"\n Find overlaps in a striplog.\n\n Args:\n index (bool): If True, returns indices of intervals with\n gaps after them.\n\n Returns:\n Striplog: A striplog of all the overlaps as intervals.\n \"\"\"\n return self.__find_incongruities(op=operator.gt, index=index)\n\n def find_gaps(self, index=False):\n \"\"\"\n Finds gaps in a striplog.\n\n Args:\n index (bool): If True, returns indices of intervals with\n gaps after them.\n\n Returns:\n Striplog: A striplog of all the gaps. A sort of anti-striplog.\n \"\"\"\n return self.__find_incongruities(op=operator.lt, index=index)\n\n def prune(self, limit=None, n=None, percentile=None, keep_ends=False):\n \"\"\"\n Remove intervals below a certain limit thickness. In place.\n\n Args:\n limit (float): Anything thinner than this will be pruned.\n n (int): The n thinnest beds will be pruned.\n percentile (float): The thinnest specified percentile will be\n pruned.\n keep_ends (bool): Whether to keep the first and last, regardless\n of whether they meet the pruning criteria.\n \"\"\"\n strip = self.copy()\n\n if not (limit or n or percentile):\n m = \"You must provide a limit or n or percentile for pruning.\"\n raise StriplogError(m)\n if limit:\n prune = [i for i, iv in enumerate(strip) if iv.thickness < limit]\n if n:\n prune = strip.thinnest(n=n, index=True)\n if percentile:\n n = np.floor(len(strip)*percentile/100)\n prune = strip.thinnest(n=n, index=True)\n\n if keep_ends:\n first, last = 0, len(strip) - 1\n if first in prune:\n prune.remove(first)\n if last in prune:\n prune.remove(last)\n\n del strip[prune]\n\n return strip\n\n def anneal(self, mode='middle'):\n \"\"\"\n Fill in empty intervals by growing from top and base.\n\n Note that this operation happens in-place and destroys any information\n about the ``Position`` (e.g. metadata associated with the top or base).\n See GitHub issue #54.\n\n If there are overlaps in your striplog, then this method may have\n unexpected results.\n\n Args\n mode (str): One of ['down', 'middle', 'up']. Which way to 'flood'\n into the gaps.\n\n Returns\n Striplog. A new instance of the Striplog class.\n \"\"\"\n strip = deepcopy(self)\n\n gaps = strip.find_gaps(index=True)\n\n if not gaps:\n return\n\n for gap in gaps:\n before = strip[gap]\n after = strip[gap + 1]\n\n if mode == 'middle':\n if strip.order == 'depth':\n t = (after.top.z-before.base.z)/2\n before.base = before.base.z + t\n after.top = after.top.z - t\n else:\n t = (after.base-before.top)/2\n before.top = before.top.z + t\n after.base = after.base.z - t\n\n elif mode == 'down':\n if strip.order == 'depth':\n before.base = after.top.z\n else:\n before.top = after.base.z\n\n elif mode == 'up':\n if strip.order == 'depth':\n after.top = before.base.z\n else:\n after.base = before.top.z\n\n return strip\n\n def fill(self, component=None):\n \"\"\"\n Fill gaps with the component provided.\n\n Example\n t = s.fill(Component({'lithology': 'cheese'}))\n \"\"\"\n c = [component] if component is not None else []\n\n # Make the intervals to go in the gaps.\n gaps = self.find_gaps()\n if not gaps:\n return self\n for iv in gaps:\n iv.components = c\n\n return deepcopy(self) + gaps\n\n def union(self, other):\n \"\"\"\n Makes a striplog of all unions.\n\n Args:\n Striplog. The striplog instance to union with.\n\n Returns:\n Striplog. The result of the union.\n \"\"\"\n if not isinstance(other, self.__class__):\n m = \"You can only union striplogs with each other.\"\n raise StriplogError(m)\n\n result = []\n for iv in deepcopy(self):\n for jv in other:\n if iv.any_overlaps(jv):\n iv = iv.union(jv)\n result.append(iv)\n return Striplog(result)\n\n def intersect(self, other):\n \"\"\"\n Makes a striplog of all intersections.\n\n Args:\n Striplog. The striplog instance to intersect with.\n\n Returns:\n Striplog. The result of the intersection.\n \"\"\"\n if not isinstance(other, self.__class__):\n m = \"You can only intersect striplogs with each other.\"\n raise StriplogError(m)\n\n result = []\n for iv in self:\n for jv in other:\n try:\n result.append(iv.intersect(jv))\n except IntervalError:\n # The intervals don't overlap\n pass\n return Striplog(result)\n\n def merge_overlaps(self):\n \"\"\"\n Merges overlaps by merging overlapping Intervals.\n\n The function takes no arguments and returns ``None``. It operates on\n the striplog 'in place'\n\n TODO: This function will not work if any interval overlaps more than\n one other intervals at either its base or top.\n \"\"\"\n overlaps = np.array(self.find_overlaps(index=True))\n\n if not overlaps.any():\n return\n\n for overlap in overlaps:\n before = self[overlap].copy()\n after = self[overlap + 1].copy()\n\n # Get rid of the before and after pieces.\n del self[overlap]\n del self[overlap]\n\n # Make the new piece.\n new_segment = before.merge(after)\n\n # Insert it.\n self.insert(overlap, new_segment)\n\n overlaps += 1\n\n return\n\n def merge_neighbours(self, strict=True):\n \"\"\"\n Makes a new striplog in which matching neighbours (for which the\n components are the same) are unioned. That is, they are replaced by\n a new Interval with the same top as the uppermost and the same bottom\n as the lowermost.\n\n Args\n strict (bool): If True, then all of the components must match.\n If False, then only the primary must match.\n\n Returns:\n Striplog. A new striplog.\n\n TODO:\n Might need to be tweaked to deal with 'binary striplogs' if those\n aren't implemented with components.\n \"\"\"\n new_strip = [self[0].copy()]\n\n for lower in self[1:]:\n\n # Determine if touching.\n touching = new_strip[-1].touches(lower)\n\n # Decide if match.\n if strict:\n similar = new_strip[-1].components == lower.components\n else:\n similar = new_strip[-1].primary == lower.primary\n\n # Union if both criteria met.\n if touching and similar:\n new_strip[-1] = new_strip[-1].union(lower)\n else:\n new_strip.append(lower.copy())\n\n return Striplog(new_strip)\n\n def thickest(self, n=1, index=False):\n \"\"\"\n Returns the thickest interval(s) as a striplog.\n\n Args:\n n (int): The number of thickest intervals to return. Default: 1.\n index (bool): If True, only the indices of the intervals are\n returned. You can use this to index into the striplog.\n\n Returns:\n Interval. The thickest interval. Or, if ``index`` was ``True``,\n the index of the thickest interval.\n \"\"\"\n s = sorted(range(len(self)), key=lambda k: self[k].thickness)\n indices = s[-n:]\n if index:\n return indices\n else:\n if n == 1:\n # Then return an interval.\n i = indices[0]\n return self[i]\n else:\n return self[indices]\n\n def thinnest(self, n=1, index=False):\n \"\"\"\n Returns the thinnest interval(s) as a striplog.\n\n Args:\n n (int): The number of thickest intervals to return. Default: 1.\n index (bool): If True, only the indices of the intervals are\n returned. You can use this to index into the striplog.\n\n Returns:\n Interval. The thickest interval. Or, if ``index`` was ``True``,\n the index of the thickest interval.\n\n TODO:\n If you ask for the thinnest bed and there's a tie, you will\n get the last in the ordered list.\n \"\"\"\n s = sorted(range(len(self)), key=lambda k: self[k].thickness)\n indices = s[:n]\n if index:\n return indices\n else:\n if n == 1:\n i = indices[0]\n return self[i]\n else:\n return self[indices]\n\n def hist(self,\n lumping=None,\n summary=False,\n sort=True,\n plot=True,\n legend=None,\n ax=None,\n rotation=0,\n ha='center',\n ):\n \"\"\"\n Plots a histogram and returns the data for it.\n\n Args:\n lumping (str): If given, the bins will be lumped based on this\n attribute of the primary components of the intervals\n encountered.\n summary (bool): If True, the summaries of the components are\n returned as the bins. Otherwise, the default behaviour is to\n return the Components themselves.\n sort (bool): If True (default), the histogram is sorted by value,\n starting with the largest.\n plot (bool): If True (default), produce a bar plot.\n legend (Legend): The legend with which to colour the bars.\n ax (axis): An axis object, which will be returned if provided.\n If you don't provide one, it will be created but not returned.\n rotation (int): The rotation angle of the x-axis tick labels.\n Default is 0 but -45 is useful.\n ha (str): The horizontal alignment of the x-axis tick labels.\n Default is 'center' but 'left' is good for -ve rotation.\n\n Returns:\n Tuple: A tuple of tuples of entities and counts.\n\n TODO:\n Deal with numeric properties, so I can histogram 'Vp' values, say.\n \"\"\"\n # This seems like overkill, but collecting all this stuff gives\n # the user some choice about what they get back.\n entries = OrderedDict()\n for i in self:\n if lumping:\n k = i.primary[lumping]\n else:\n if summary:\n k = i.primary.summary()\n else:\n k = i.primary\n v = entries.get(k, {'thick': 0}).get('thick', 0)\n \n entries[k] = {\n 'label': i.primary.summary(),\n 'colour': legend.get_colour(i.primary) if legend else None,\n 'thick': v + i.thickness,\n }\n\n if sort:\n allitems = sorted(entries.items(),\n key=lambda i: i[1]['thick'],\n reverse=True\n )\n ents, data = zip(*allitems)\n else:\n ents, data = tuple(entries.keys()), tuple(entries.values())\n \n counts = [d['thick'] for d in data]\n\n # Make plot.\n if plot:\n if ax is None:\n fig, ax = plt.subplots()\n return_ax = False\n else:\n return_ax = True\n\n ind = np.arange(len(ents))\n bars = ax.bar(ind, counts, align='center')\n ax.set_xticks(ind)\n ax.set_xticklabels([d['label'] for d in data],\n rotation=rotation,\n ha=ha)\n if legend:\n colours = [d['colour'] for d in data]\n for b, c in zip(bars, colours):\n b.set_color(c)\n ax.set_ylabel('Thickness [m]')\n else:\n bars = []\n\n if plot and return_ax:\n return counts, ents, ax\n\n return counts, ents, bars\n\n histogram = hist\n\n def bar(self, height='thickness', sort=False, reverse=False,\n legend=None, ax=None, figsize=None, **kwargs):\n \"\"\"\n Make a bar plot of thickness per interval.\n\n Args:\n height (str): The property of the primary component to plot.\n sort (bool or function): Either pass a boolean indicating whether\n to reverse sort by thickness, or pass a function to be used as\n the sort key.\n reverse (bool): Reverses the sort order.\n legend (Legend): The legend to plot with.\n ax (axis): Optional axis to plot to.\n figsize (tuple): A figure size, (width, height), optional.\n **kwargs: passed to the matplotlib bar plot command, ax.bar().\n\n Returns:\n axis: If you sent an axis in, you get it back.\n \"\"\"\n if sort:\n if sort is True:\n def func(x): return x.thickness\n reverse = True\n data = sorted(self, key=func, reverse=reverse)\n else:\n data = self[:]\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n heights = [getattr(i, height) for i in data]\n\n comps = [i[0] for i in self.unique]\n\n if legend is None:\n legend = Legend.random(comps)\n\n colors = [legend.get_colour(i.primary) for i in data]\n\n bars = ax.bar(range(len(data)), height=heights, color=colors, **kwargs)\n\n # Legend.\n colourables = [i.primary.summary() for i in data]\n unique_bars = dict(zip(colourables, bars))\n ax.legend(list(unique_bars.values()), list(unique_bars.keys()))\n\n ax.set_ylabel(height.title())\n\n return ax\n\n def invert(self, copy=False):\n \"\"\"\n Inverts the striplog, changing its order and the order of its contents.\n\n Operates in place by default.\n\n Args:\n copy (bool): Whether to operate in place or make a copy.\n\n Returns:\n None if operating in-place, or an inverted copy of the striplog\n if not.\n \"\"\"\n if copy:\n return Striplog([i.invert(copy=True) for i in self])\n else:\n for i in self:\n i.invert()\n self.__sort()\n o = self.order\n self.order = {'depth': 'elevation', 'elevation': 'depth'}[o]\n return\n\n def crop(self, extent, copy=False):\n \"\"\"\n Crop to a new depth range.\n\n Args:\n extent (tuple): The new start and stop depth. Must be 'inside'\n existing striplog.\n copy (bool): Whether to operate in place or make a copy.\n\n Returns:\n Operates in place by deault; if copy is True, returns a striplog.\n \"\"\"\n try:\n if extent[0] is None:\n extent = (self.start.z, extent[1])\n if extent[1] is None:\n extent = (extent[0], self.stop.z)\n except:\n m = \"You must provide a 2-tuple for the new extents. Use None for\"\n m += \" the existing start or stop.\"\n raise StriplogError(m)\n\n first_ix = self.read_at(extent[0], index=True)\n last_ix = self.read_at(extent[1], index=True)\n\n first = self[first_ix].split_at(extent[0])[1]\n last = self[last_ix].split_at(extent[1])[0]\n\n new_list = self.__list[first_ix:last_ix+1].copy()\n new_list[0] = first\n new_list[-1] = last\n\n if copy:\n return Striplog(new_list)\n else:\n self.__list = new_list\n return\n\n def net_to_gross(strip, attr):\n \"\"\"\n Compute the ratio of intervals having that attribute as `True` to the\n total thickness.\n\n TODO\n Allow user to give a cut-off value to apply to the attribute,\n if it's a continuous scalar and not boolean.\n\n Args\n attr (str): Which attribute to use. Must have boolean values.\n\n Returns\n float. The net:gross ratio.\n \"\"\"\n net = non = 0\n for c, x in strip.unique:\n if getattr(c, attr):\n net = x\n else:\n non = x\n return net / (net + non)\n\n def quality(self, tests, alias=None):\n \"\"\"\n Run a series of tests and return the corresponding results.\n\n Based on curve testing for ``welly``.\n\n Args:\n tests (list): a list of functions.\n\n Returns:\n list. The results. Stick to booleans (True = pass) or ints.\n \"\"\"\n # This is hacky... striplog should probably merge with welly...\n\n # Ignore aliases\n alias = alias or {}\n alias = alias.get('striplog', alias.get('Striplog', []))\n\n # Gather the tests.\n # First, anything called 'all', 'All', or 'ALL'.\n # Second, anything with the name of the curve we're in now.\n # Third, anything that the alias list has for this curve.\n # (This requires a reverse look-up so it's a bit messy.)\n this_tests =\\\n tests.get('all', [])+tests.get('All', [])+tests.get('ALL', [])\\\n + tests.get('striplog', tests.get('Striplog', []))\\\n + utils.flatten_list([tests.get(a) for a in alias])\n this_tests = filter(None, this_tests)\n\n # If we explicitly set zero tests for a particular key, then this\n # overrides the 'all' tests.\n if not tests.get('striplog', tests.get('Striplog', 1)):\n this_tests = []\n\n return {test.__name__: test(self) for test in this_tests}\n\n @property\n def _table(self):\n \"\"\"\n A table (list of tuples) of the tops and bases we encounter, starting\n at the top. We will need to know 3 things: whether it's a top or a\n base, the depth it's at, and which interval in the striplog it\n corresponds to.\n \"\"\"\n table = []\n for i, interval in enumerate(self):\n table.append(('T', interval.top.middle, i))\n table.append(('B', interval.base.middle, i))\n table = sorted(table, key=lambda x: x[1])\n return table\n\n def _merge_table(self, attr, reverse=False):\n \"\"\"\n Do the merge operation on a table, and return a new table with\n no nesting / overlaps.\n\n Args\n attr (str): The attribute of the component you want to use. You\n must provide an attribute.\n reverse (bool): Whether to reverse the condition.\n\n Returns\n list: The merged table.\n \"\"\"\n merged, stack = [], []\n op = operator.le if reverse else operator.ge\n\n for interface in self._table:\n\n tb, depth, idx = interface\n\n if stack:\n # 'this' is the top or base we're on in this loop iteration.\n try:\n this = getattr(self[idx], attr)\n except AttributeError:\n this = getattr(self[idx].primary, attr)\n\n # 'current' is the highest priority unit in the stack.\n try:\n current = getattr(self[stack[-1]], attr)\n except AttributeError:\n current = getattr(self[stack[-1]].primary, attr)\n\n # Compare 'this' to 'current' to decide what to do.\n merge = op(this, current)\n else:\n merge = True\n\n if tb == 'T':\n\n # If this one meets the condition, merge it.\n if merge:\n # End the current unit, if any.\n if stack:\n merged.append(('B', depth, stack[-1]))\n # Start the new top.\n merged.append(interface)\n\n # Insert this unit into stack and re-sort.\n # (This is easier than trying to insert in the right place.)\n stack.append(idx)\n try:\n stack = sorted(stack,\n key=lambda i: getattr(self[i], attr),\n reverse=reverse)\n except AttributeError:\n stack = sorted(stack,\n key=lambda i: getattr(self[i].primary, attr),\n reverse=reverse)\n\n elif tb == 'B':\n have_merged = False\n\n # If this is the current unit's base, append it to the merge.\n if idx == stack[-1]:\n merged.append(interface)\n have_merged = True\n\n # End this unit in the stack.\n stack.remove(idx)\n\n # Add a top for the new current unit, if any, but only if we\n # did a merge.\n if stack and have_merged:\n merged.append(('T', depth, stack[-1]))\n\n return merged\n\n def _striplog_from_merge_table(self, table):\n \"\"\"\n Make a merge table into a Striplog instance.\n\n Args\n table (list). The table of tops and bases, represented as tuples.\n\n Returns\n Striplog. A new Striplog instance.\n \"\"\"\n m = []\n for top, bot in zip(table[::2], table[1::2]):\n\n # If zero thickness, discard.\n if top[1] == bot[1]:\n continue\n\n i = self[top[2]].copy()\n i.top = top[1]\n i.base = bot[1]\n m.append(i)\n\n return Striplog(m)\n\n def merge(self, attr, reverse=False):\n \"\"\"\n Merge the intervals in a striplog, using an attribute of the primary\n component for priority ordering.\n\n Args\n attr (str): The attribute of the component you want to use. You \\\n must provide an attribute.\n reverse (bool): Whether to reverse the condition.\n\n Returns\n Striplog: The merged striplog.\n \"\"\"\n m = self._merge_table(attr, reverse=reverse)\n return self._striplog_from_merge_table(m)\n\n def is_binary(self, attr=None):\n \"\"\"\n Determine if `attr`, which must be an attribute of every primary\n component, allows this striplog to be interpreted as a binary striplog.\n If no `attr` is provided, the first attribute of the primary comp-\n onent is used.\n \"\"\"\n try:\n primaries = [getattr(i.primary, attr) for i in self]\n except:\n primaries = [list(i.primary.__dict__.values())[0] for i in self]\n return all(map(lambda x: isinstance(x, bool), primaries))\n\n def to_binary_log(self, attr, step):\n \"\"\"\n Adaptation of `to_log` but deals with binary attributes of striplogs.\n\n Args\n attr (str): Which attribute to make into a log.\n \"\"\"\n log, basis, comps = self.to_log(step=step,\n match_only=[attr],\n undefined=-1,\n return_meta=True)\n if -1 in log:\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"We have undefined values, there might be a problem.\"\n warnings.warn(w)\n return log - 1, basis, comps\n\n def binary_morphology(self, attr, operation, step=1.0, p=3):\n \"\"\"\n Perform a discrete binary morphology operation on the striplog.\n\n Args\n attr (str): The attribute to use for the filtering. Must have\n boolean values.\n operation (str): One of `erosion`, `dilation`, `opening` or\n `closing`.\n step (float): The step size to use in discretization. Default is\n 1 but you might want to use something smaller, e.g. 0.1.\n p (int): The length of the structuring element, in samples (not\n natual units). Odd numbers are symmetrical and more intuitive.\n Default is 3.\n\n Returns\n Striplog. A new striplog instance.\n \"\"\"\n ops = {\n 'erosion': utils.binary_erosion,\n 'dilation': utils.binary_dilation,\n 'opening': utils.binary_opening,\n 'closing': utils.binary_closing,\n }\n if not self.is_binary():\n print(\"Cannot interpret striplog as binary.\")\n log, basis, comps = self.to_binary_log(step=step, attr=attr)\n proc = ops[operation](log, p)\n if operation == 'closing':\n proc = proc | log\n\n return Striplog.from_log(proc, components=comps, basis=basis)\n\n @classmethod\n def from_macrostrat(cls, lng, lat, buffer_size=0.2):\n \"\"\"\n Create a striplog from components derived using the MacroStrat API.\n This is simply a helper function to make things easier, but it\n works because we know what our data looks like in advance.\n\n Note: In order to plot this, you will need to add space for text and \n other decoration. This simply gives a Striplog back which _can_\n be plotted.\n\n Args:\n components (list):\n\n Returns:\n Tuple of:\n strip (striplog.Striplog)\n legend (striplog.Legend)\n\n Example:\n lng = -64.3573186\n lat = 44.4454632\n buffer_size = 0.3\n striplog.striplog.from_macrostrat(lng, lat, buffer_size)\n {'top': Position({'middle': 358.9, 'units': 'm'}), \n 'base': Position({'middle': 419.2, 'units': 'm'}), \n 'description': '', 'data': {}, 'components': [Component({\n 'map_id': 948660.0, 'scale': 'small', 'source_id': 7.0,\n 'name': 'Devonian plutonic: undivided granitic rocks',\n 'age': 'devonian', 'lith': 'plutonic: undivided granitic rocks',\n 'best_age_top': 358.9, 'best_age_bottom': 419.2, 't_int': 94.0,\n 'b_int': 94.0, 'color': '#cb8c37', 'source': 'MacroStrat.org (CC-BY)})]}\n {'top': Position({'middle': 358.9, 'units': 'm'}),\n 'base': Position({'middle': 541.0, 'units': 'm'}),\n 'description': '', 'data': {}, 'components': [Component({\n 'map_id': 948228.0, 'scale': 'small', 'source_id': 7.0,\n 'name': 'Cambrian-Devonian sedimentary', 'age': 'cambrian-devonian',\n 'lith': 'sedimentary', 'best_age_top': 358.9, 'best_age_bottom': 541.0,\n 't_int': 94.0, 'b_int': 122.0, 'color': '#99c08d',\n 'source': 'MacroStrat.org (CC-BY)})]}\n {'top': Position({'middle': 443.8, 'units': 'm'}),\n 'base': Position({'middle': 541.0, 'units': 'm'}),\n 'description': '', 'data': {}, 'components': [Component({\n 'map_id': 973359.0, 'scale': 'small', 'source_id': 7.0,\n 'name': 'Cambrian-Ordovician sedimentary', 'age': 'cambrian-ordovician',\n 'lith': 'sedimentary', 'best_age_top': 443.8, 'best_age_bottom': 541.0,\n 't_int': 112.0, 'b_int': 122.0, 'color': '#409963',\n 'source': 'MacroStrat.org (CC-BY)})]}\n \"\"\"\n # Get the \n features = utils.geology_from_macrostrat(lng=lng, lat=lat,\n buffer_size=buffer_size)\n\n columns = ('color', 'lith', 'age')\n\n intervals = []\n\n for feature in features:\n if feature['geometry'] is None:\n continue\n\n components = []\n for lith in utils.get_liths_from_macrostrat(feature['properties']['lith']):\n c = Component({'lithology': lith})\n components.append(c)\n\n intervals.append(Interval(\n top=feature['properties']['best_age_top'],\n base=feature['properties']['best_age_bottom'],\n components=components,\n description=feature['properties']['descrip'])\n )\n\n return cls(intervals, source='Macrostrat [CC-BY]', order='age')\n"
] | [
[
"matplotlib.pyplot.text",
"matplotlib.ticker.MultipleLocator",
"matplotlib.collections.PatchCollection",
"numpy.linspace",
"numpy.isnan",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"numpy.copy",
"numpy.ceil",
"numpy.diff",
"numpy.zeros_like",
"matplotlib.ticker.FormatStrFormatter",
"numpy.digitize",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NLeSC/spreading_dye_sampler | [
"4282f7609959a31d1b2a4832f3ed643b15c46cb6"
] | [
"spreading_dye_sampler/test/test_dye_blot.py"
] | [
"import os\nimport sys\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))\n\nimport spreading_dye_sampler.dye_blot\n\nimport numpy as np\nfrom numpy.random import random\nimport pytest\n\[email protected]\ndef blot():\n num_cells = 100\n grid_width = 100\n grid_height = 100\n\n blot = None\n while blot is None:\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [10, 10], num_cells)\n return blot\n\ndef test_make_dye_blot():\n num_cells = 10\n grid_width = 100\n grid_height = 100\n\n for i in range(100):\n # make a blot\n blot = None\n while blot is None:\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [10, 20], num_cells)\n\n # check size\n assert blot.num_cells() == num_cells\n\n # check that the blot is in range\n for x, y in blot._cells:\n assert 0 <= x\n assert x < grid_width\n assert 0 <= y\n assert y < grid_height\n\ndef test_for_each_cell(blot):\n def test_forward(x, y):\n assert x, y in blot._cells\n\n blot.for_each_cell(test_forward)\n\ndef test_aspect():\n grid_width = 1000\n grid_height = 10\n num_cells = 500\n\n blot = None\n while blot is None:\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [1, 100], num_cells)\n\n assert blot.num_cells() == num_cells\n x_min = min([x for x, y in blot._cells])\n x_max = max([x for x, y in blot._cells])\n y_min = min([y for x, y in blot._cells])\n y_max = max([y for x, y in blot._cells])\n\n x_size = x_max - x_min\n y_size = y_max - y_min\n # This may fail occasionally. Need to figure out.\n assert x_size / y_size > 5\n\ndef test_squeeze():\n grid_width = 10\n grid_height = 10\n num_cells = grid_width * grid_height\n\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [10, 10], num_cells, squeeze=True)\n\n assert blot is not None\n assert blot.num_cells() == num_cells\n\n for y in range(grid_height):\n for x in range(grid_width):\n assert (x, y) in blot._cells\n\ndef test_masking():\n grid_width = 10\n grid_height = 10\n num_cells = 20\n\n # Make a mask with 45 permitted cells\n mask = np.zeros([grid_width, grid_height], dtype=bool)\n for y in range(grid_height):\n for x in range(grid_width):\n dx = x - grid_width / 2\n dy = y - grid_height / 2\n mask[x, y] = dx**2 + dy**2 < 4*4\n\n for i in range(100):\n num_cells = int(np.floor(random() * 44) + 1.0)\n blot = None\n while blot is None:\n blot = spreading_dye_sampler.dye_blot.make_blot(\n [grid_width, grid_height], [10, 10], num_cells, mask, squeeze=True)\n\n assert blot.num_cells() == num_cells\n\n def check(x, y):\n assert mask[x, y]\n\n blot.for_each_cell(check)\n"
] | [
[
"numpy.random.random",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hrnciar/NiaPy | [
"d1e70924577cc90455c52701f2696bcb0a064438"
] | [
"examples/advanced_example_custom_pop.py"
] | [
"# encoding=utf8\n# This is temporary fix to import module from parent folder\n# It will be removed when package is published on PyPI\nimport sys\n\nsys.path.append('../')\n\nfrom niapy.task import StoppingTask, OptimizationType\nfrom niapy.benchmarks import Benchmark\nfrom niapy.algorithms.basic import GreyWolfOptimizer\nfrom numpy import random as rand, apply_along_axis\n\n\n# our custom benchmark class\nclass MyBenchmark(Benchmark):\n def __init__(self):\n\n Benchmark.__init__(self, -10, 10)\n\n def function(self):\n def evaluate(D, sol):\n val = 0.0\n for i in range(D): val += sol[i] ** 2\n return val\n\n return evaluate\n\n\n# custom initialization population function\ndef MyInit(task, NP, rnd=rand, **kwargs):\n pop = 0.2 + rnd.rand(NP, task.dimension) * task.range\n fpop = apply_along_axis(task.eval, 1, pop)\n return pop, fpop\n\n\n# we will run 10 repetitions of Grey Wolf Optimizer against our custom MyBenchmark benchmark function\nfor i in range(10):\n task = StoppingTask(max_iters=100, dimension=20, optimization_type=OptimizationType.MINIMIZATION,\n benchmark=MyBenchmark())\n\n # parameter is population size\n algo = GreyWolfOptimizer(population_size=20, initialization_function=MyInit)\n\n # running algorithm returns best found minimum\n best = algo.run(task)\n\n # printing best minimum\n print(best[-1])\n"
] | [
[
"numpy.apply_along_axis"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hexylena/tools-iuc | [
"811337eaab815f54f0fd93a3dd23a1153993ea2a"
] | [
"tools/cwpair2/cwpair2_util.py"
] | [
"import bisect\nimport csv\nimport os\nimport sys\nimport traceback\n\nimport matplotlib\nmatplotlib.use('Agg') # noqa\nfrom matplotlib import pyplot\n\n# Data outputs\nDETAILS = 'D'\nMATCHED_PAIRS = 'MP'\nORPHANS = 'O'\n# Data output formats\nGFF_EXT = 'gff'\nTABULAR_EXT = 'tabular'\n# Statistics historgrams output directory.\nHISTOGRAM = 'H'\n# Statistics outputs\nFINAL_PLOTS = 'F'\nPREVIEW_PLOTS = 'P'\nSTATS_GRAPH = 'C'\n\n# Graph settings.\nCOLORS = 'krg'\nY_LABEL = 'Peak-pair counts'\nX_LABEL = 'Peak-pair distance (bp)'\nTICK_WIDTH = 3\nADJUST = [0.140, 0.9, 0.9, 0.1]\nPLOT_FORMAT = 'pdf'\npyplot.rc('xtick.major', size=10.00)\npyplot.rc('ytick.major', size=10.00)\npyplot.rc('lines', linewidth=4.00)\npyplot.rc('axes', linewidth=3.00)\npyplot.rc('font', family='Bitstream Vera Sans', size=32.0)\n\n\nclass FrequencyDistribution(object):\n\n def __init__(self, start, end, binsize=10, d=None):\n self.start = start\n self.end = end\n self.dist = d or {}\n self.binsize = binsize\n\n def get_bin(self, x):\n \"\"\"\n Returns the bin in which a data point falls\n \"\"\"\n return self.start + (x - self.start) // self.binsize * self.binsize + self.binsize / 2.0\n\n def add(self, x):\n x = self.get_bin(x)\n self.dist[x] = self.dist.get(x, 0) + 1\n\n def graph_series(self):\n x = []\n y = []\n for i in range(self.start, self.end, self.binsize):\n center = self.get_bin(i)\n x.append(center)\n y.append(self.dist.get(center, 0))\n return x, y\n\n def mode(self):\n return max(self.dist.items(), key=lambda data: data[1])[0]\n\n def size(self):\n return sum(self.dist.values())\n\n\ndef stop_err(msg):\n sys.stderr.write(msg)\n sys.exit(1)\n\n\ndef distance(peak1, peak2):\n return (peak2[1] + peak2[2]) / 2 - (peak1[1] + peak1[2]) / 2\n\n\ndef gff_row(cname, start, end, score, source, type='.', strand='.', phase='.', attrs={}):\n return (cname, source, type, start, end, score, strand, phase, gff_attrs(attrs))\n\n\ndef gff_attrs(d):\n if not d:\n return '.'\n return ';'.join('%s=%s' % item for item in d.items())\n\n\ndef parse_chromosomes(reader):\n # This version of cwpair2 accepts only gff format as input.\n chromosomes = {}\n reader.next()\n for line in reader:\n cname, junk, junk, start, end, value, strand, junk, junk = line\n start = int(start)\n end = int(end)\n value = float(value)\n if cname not in chromosomes:\n chromosomes[cname] = []\n peaks = chromosomes[cname]\n peaks.append((strand, start, end, value))\n return chromosomes\n\n\ndef perc95(chromosomes):\n \"\"\"\n Returns the 95th percentile value of the given chromosomes.\n \"\"\"\n values = []\n for peaks in chromosomes.values():\n for peak in peaks:\n values.append(peak[3])\n values.sort()\n # Get 95% value\n return values[int(len(values) * 0.95)]\n\n\ndef filter(chromosomes, threshold=0.05):\n \"\"\"\n Filters the peaks to those above a threshold. Threshold < 1.0 is interpreted\n as a proportion of the maximum, >=1.0 as an absolute value.\n \"\"\"\n if threshold < 1:\n p95 = perc95(chromosomes)\n threshold = p95 * threshold\n # Make the threshold a proportion of the\n for cname, peaks in chromosomes.items():\n chromosomes[cname] = [peak for peak in peaks if peak[3] > threshold]\n\n\ndef split_strands(chromosome):\n watson = [peak for peak in chromosome if peak[0] == '+']\n crick = [peak for peak in chromosome if peak[0] == '-']\n return watson, crick\n\n\ndef all_pair_distribution(chromosomes, up_distance, down_distance, binsize):\n dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)\n for cname, data in chromosomes.items():\n watson, crick = split_strands(data)\n crick.sort(key=lambda data: float(data[1]))\n keys = make_keys(crick)\n for peak in watson:\n for cpeak in get_window(crick, peak, up_distance, down_distance, keys):\n dist.add(distance(peak, cpeak))\n return dist\n\n\ndef make_keys(crick):\n return [(data[1] + data[2]) // 2 for data in crick]\n\n\ndef get_window(crick, peak, up_distance, down_distance, keys=None):\n \"\"\"\n Returns a window of all crick peaks within a distance of a watson peak.\n crick strand MUST be sorted by distance\n \"\"\"\n strand, start, end, value = peak\n midpoint = (start + end) // 2\n lower = midpoint - up_distance\n upper = midpoint + down_distance\n keys = keys or make_keys(crick)\n start_index = bisect.bisect_left(keys, lower)\n end_index = bisect.bisect_right(keys, upper)\n return [cpeak for cpeak in crick[start_index:end_index]]\n\n\ndef match_largest(window, peak):\n if not window:\n return None\n return max(window, key=lambda cpeak: cpeak[3])\n\n\ndef match_closest(window, peak):\n if not window:\n return None\n\n def key(cpeak):\n d = distance(peak, cpeak)\n # Search negative distances last\n if d < 0:\n # And then prefer less negative distances\n d = 10000 - d\n return d\n return min(window, key=key)\n\n\ndef match_mode(window, peak, mode):\n if not window:\n return None\n return min(window, key=lambda cpeak: abs(distance(peak, cpeak) - mode))\n\nMETHODS = {'mode': match_mode, 'closest': match_closest, 'largest': match_largest}\n\n\ndef frequency_plot(freqs, fname, labels=[], title=''):\n pyplot.clf()\n pyplot.figure(figsize=(10, 10))\n for i, freq in enumerate(freqs):\n x, y = freq.graph_series()\n pyplot.plot(x, y, '%s-' % COLORS[i])\n if len(freqs) > 1:\n pyplot.legend(labels)\n pyplot.xlim(freq.start, freq.end)\n pyplot.ylim(ymin=0)\n pyplot.ylabel(Y_LABEL)\n pyplot.xlabel(X_LABEL)\n pyplot.subplots_adjust(left=ADJUST[0], right=ADJUST[1], top=ADJUST[2], bottom=ADJUST[3])\n # Get the current axes\n ax = pyplot.gca()\n for l in ax.get_xticklines() + ax.get_yticklines():\n l.set_markeredgewidth(TICK_WIDTH)\n pyplot.savefig(fname)\n\n\ndef create_directories():\n # Output histograms in pdf.\n os.mkdir(HISTOGRAM)\n os.mkdir('data_%s' % DETAILS)\n os.mkdir('data_%s' % ORPHANS)\n os.mkdir('data_%s' % MATCHED_PAIRS)\n\n\ndef process_file(dataset_path, galaxy_hid, method, threshold, up_distance,\n down_distance, binsize, output_files):\n if method == 'all':\n match_methods = METHODS.keys()\n else:\n match_methods = [method]\n statistics = []\n for match_method in match_methods:\n stats = perform_process(dataset_path,\n galaxy_hid,\n match_method,\n threshold,\n up_distance,\n down_distance,\n binsize,\n output_files)\n statistics.append(stats)\n if output_files == 'all' and method == 'all':\n frequency_plot([s['dist'] for s in statistics],\n statistics[0]['graph_path'],\n labels=METHODS.keys())\n return statistics\n\n\ndef perform_process(dataset_path, galaxy_hid, method, threshold, up_distance,\n down_distance, binsize, output_files):\n output_details = output_files in [\"all\", \"matched_pair_orphan_detail\"]\n output_plots = output_files in [\"all\"]\n output_orphans = output_files in [\"all\", \"matched_pair_orphan\", \"matched_pair_orphan_detail\"]\n # Keep track of statistics for the output file\n statistics = {}\n input = csv.reader(open(dataset_path, 'rt'), delimiter='\\t')\n fpath, fname = os.path.split(dataset_path)\n statistics['fname'] = '%s: data %s' % (method, str(galaxy_hid))\n statistics['dir'] = fpath\n if threshold >= 1:\n filter_string = 'fa%d' % threshold\n else:\n filter_string = 'f%d' % (threshold * 100)\n fname = '%s_%su%dd%d_on_data_%s' % (method, filter_string, up_distance, down_distance, galaxy_hid)\n\n def make_histogram_path(output_type, fname):\n return os.path.join(HISTOGRAM, 'histogram_%s_%s.%s' % (output_type, fname, PLOT_FORMAT))\n\n def make_path(output_type, extension, fname):\n # Returns the full path for an output.\n return os.path.join(output_type, '%s_%s.%s' % (output_type, fname, extension))\n\n def td_writer(output_type, extension, fname):\n # Returns a tab-delimited writer for a specified output.\n output_file_path = make_path(output_type, extension, fname)\n return csv.writer(open(output_file_path, 'wt'), delimiter='\\t')\n\n try:\n chromosomes = parse_chromosomes(input)\n except Exception:\n stop_err('Unable to parse file \"%s\".\\n%s' % (dataset_path, traceback.format_exc()))\n if output_details:\n # Details\n detailed_output = td_writer('data_%s' % DETAILS, TABULAR_EXT, fname)\n detailed_output.writerow(('chrom', 'start', 'end', 'value', 'strand') * 2 + ('midpoint', 'c-w reads sum', 'c-w distance (bp)'))\n if output_plots:\n # Final Plot\n final_plot_path = make_histogram_path(FINAL_PLOTS, fname)\n if output_orphans:\n # Orphans\n orphan_output = td_writer('data_%s' % ORPHANS, TABULAR_EXT, fname)\n orphan_output.writerow(('chrom', 'strand', 'start', 'end', 'value'))\n if output_plots:\n # Preview Plot\n preview_plot_path = make_histogram_path(PREVIEW_PLOTS, fname)\n # Matched Pairs.\n matched_pairs_output = td_writer('data_%s' % MATCHED_PAIRS, GFF_EXT, fname)\n statistics['stats_path'] = 'statistics.%s' % TABULAR_EXT\n if output_plots:\n statistics['graph_path'] = make_histogram_path(STATS_GRAPH, fname)\n statistics['perc95'] = perc95(chromosomes)\n if threshold > 0:\n # Apply filter\n filter(chromosomes, threshold)\n if method == 'mode':\n freq = all_pair_distribution(chromosomes, up_distance, down_distance, binsize)\n mode = freq.mode()\n statistics['preview_mode'] = mode\n if output_plots:\n frequency_plot([freq], preview_plot_path, title='Preview frequency plot')\n else:\n statistics['preview_mode'] = 'NA'\n dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)\n orphans = 0\n # x will be used to archive the summary dataset\n x = []\n for cname, chromosome in chromosomes.items():\n # Each peak is (strand, start, end, value)\n watson, crick = split_strands(chromosome)\n # Sort by value of each peak\n watson.sort(key=lambda data: -float(data[3]))\n # Sort by position to facilitate binary search\n crick.sort(key=lambda data: float(data[1]))\n keys = make_keys(crick)\n for peak in watson:\n window = get_window(crick, peak, up_distance, down_distance, keys)\n if method == 'mode':\n match = match_mode(window, peak, mode)\n else:\n match = METHODS[method](window, peak)\n if match:\n midpoint = (match[1] + match[2] + peak[1] + peak[2]) // 4\n d = distance(peak, match)\n dist.add(d)\n # Simple output in gff format.\n x.append(gff_row(cname,\n source='cwpair',\n start=midpoint,\n end=midpoint + 1,\n score=peak[3] + match[3],\n attrs={'cw_distance': d}))\n if output_details:\n detailed_output.writerow((cname,\n peak[1],\n peak[2],\n peak[3],\n '+',\n cname,\n match[1],\n match[2],\n match[3], '-',\n midpoint,\n peak[3] + match[3],\n d))\n i = bisect.bisect_left(keys, (match[1] + match[2]) / 2)\n del crick[i]\n del keys[i]\n else:\n if output_orphans:\n orphan_output.writerow((cname, peak[0], peak[1], peak[2], peak[3]))\n # Keep track of orphans for statistics.\n orphans += 1\n # Remaining crick peaks are orphans\n if output_orphans:\n for cpeak in crick:\n orphan_output.writerow((cname, cpeak[0], cpeak[1], cpeak[2], cpeak[3]))\n # Keep track of orphans for statistics.\n orphans += len(crick)\n # Sort output descending by score.\n x.sort(key=lambda data: float(data[5]), reverse=True)\n # Writing a summary to gff format file\n for row in x:\n row_tmp = list(row)\n # Dataset in tuple cannot be modified in Python, so row will\n # be converted to list format to add 'chr'.\n if row_tmp[0] == \"999\":\n row_tmp[0] = 'chrM'\n elif row_tmp[0] == \"998\":\n row_tmp[0] = 'chrY'\n elif row_tmp[0] == \"997\":\n row_tmp[0] = 'chrX'\n else:\n row_tmp[0] = row_tmp[0]\n # Print row_tmp.\n matched_pairs_output.writerow(row_tmp)\n statistics['paired'] = dist.size() * 2\n statistics['orphans'] = orphans\n statistics['final_mode'] = dist.mode()\n if output_plots:\n frequency_plot([dist], final_plot_path, title='Frequency distribution')\n statistics['dist'] = dist\n return statistics\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"matplotlib.use",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ExpectationMax/pymc3 | [
"7988d0bd023c8ba05a2d97bcbb563a67ed9ed82a",
"7988d0bd023c8ba05a2d97bcbb563a67ed9ed82a"
] | [
"pymc3/step_methods/hmc/quadpotential.py",
"pymc3/step_methods/arraystep.py"
] | [
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\n\nimport aesara\nimport numpy as np\nimport scipy.linalg\n\nfrom numpy.random import normal\nfrom scipy.sparse import issparse\n\nfrom pymc3.aesaraf import floatX\n\n__all__ = [\n \"quad_potential\",\n \"QuadPotentialDiag\",\n \"QuadPotentialFull\",\n \"QuadPotentialFullInv\",\n \"QuadPotentialDiagAdapt\",\n \"QuadPotentialFullAdapt\",\n \"isquadpotential\",\n]\n\n\ndef quad_potential(C, is_cov):\n \"\"\"\n Compute a QuadPotential object from a scaling matrix.\n\n Parameters\n ----------\n C: arraylike, 0 <= ndim <= 2\n scaling matrix for the potential\n vector treated as diagonal matrix.\n is_cov: Boolean\n whether C is provided as a covariance matrix or hessian\n\n Returns\n -------\n q: Quadpotential\n \"\"\"\n if issparse(C):\n if not chol_available:\n raise ImportError(\"Sparse mass matrices require scikits.sparse\")\n elif is_cov:\n return QuadPotentialSparse(C)\n else:\n raise ValueError(\"Sparse precision matrices are not supported\")\n\n partial_check_positive_definite(C)\n if C.ndim == 1:\n if is_cov:\n return QuadPotentialDiag(C)\n else:\n return QuadPotentialDiag(1.0 / C)\n else:\n if is_cov:\n return QuadPotentialFull(C)\n else:\n return QuadPotentialFullInv(C)\n\n\ndef partial_check_positive_definite(C):\n \"\"\"Make a simple but partial check for Positive Definiteness.\"\"\"\n if C.ndim == 1:\n d = C\n else:\n d = np.diag(C)\n (i,) = np.nonzero(np.logical_or(np.isnan(d), d <= 0))\n\n if len(i):\n raise PositiveDefiniteError(\"Simple check failed. Diagonal contains negatives\", i)\n\n\nclass PositiveDefiniteError(ValueError):\n def __init__(self, msg, idx):\n super().__init__(msg)\n self.idx = idx\n self.msg = msg\n\n def __str__(self):\n return f\"Scaling is not positive definite: {self.msg}. Check indexes {self.idx}.\"\n\n\nclass QuadPotential:\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n def energy(self, x, velocity=None):\n raise NotImplementedError(\"Abstract method\")\n\n def random(self, x):\n raise NotImplementedError(\"Abstract method\")\n\n def velocity_energy(self, x, v_out):\n raise NotImplementedError(\"Abstract method\")\n\n def update(self, sample, grad, tune):\n \"\"\"Inform the potential about a new sample during tuning.\n\n This can be used by adaptive potentials to change the\n mass matrix.\n \"\"\"\n pass\n\n def raise_ok(self, vmap=None):\n \"\"\"Check if the mass matrix is ok, and raise ValueError if not.\n\n Parameters\n ----------\n vmap: blocking.ArrayOrdering.vmap\n List of `VarMap`s, which are namedtuples with var, slc, shp, dtyp\n\n Raises\n ------\n ValueError if any standard deviations are 0 or infinite\n\n Returns\n -------\n None\n \"\"\"\n return None\n\n def reset(self):\n pass\n\n\ndef isquadpotential(value):\n \"\"\"Check whether an object might be a QuadPotential object.\"\"\"\n return isinstance(value, QuadPotential)\n\n\nclass QuadPotentialDiagAdapt(QuadPotential):\n \"\"\"Adapt a diagonal mass matrix from the sample variances.\"\"\"\n\n def __init__(\n self,\n n,\n initial_mean,\n initial_diag=None,\n initial_weight=0,\n adaptation_window=101,\n adaptation_window_multiplier=1,\n dtype=None,\n ):\n \"\"\"Set up a diagonal mass matrix.\"\"\"\n if initial_diag is not None and initial_diag.ndim != 1:\n raise ValueError(\"Initial diagonal must be one-dimensional.\")\n if initial_mean.ndim != 1:\n raise ValueError(\"Initial mean must be one-dimensional.\")\n if initial_diag is not None and len(initial_diag) != n:\n raise ValueError(\n \"Wrong shape for initial_diag: expected {} got {}\".format(n, len(initial_diag))\n )\n if len(initial_mean) != n:\n raise ValueError(\n \"Wrong shape for initial_mean: expected {} got {}\".format(n, len(initial_mean))\n )\n\n if dtype is None:\n dtype = aesara.config.floatX\n\n if initial_diag is None:\n initial_diag = np.ones(n, dtype=dtype)\n initial_weight = 1\n\n self.dtype = dtype\n self._n = n\n\n self._initial_mean = initial_mean\n self._initial_diag = initial_diag\n self._initial_weight = initial_weight\n self.adaptation_window = adaptation_window\n self.adaptation_window_multiplier = float(adaptation_window_multiplier)\n\n self.reset()\n\n def reset(self):\n self._var = np.array(self._initial_diag, dtype=self.dtype, copy=True)\n self._var_aesara = aesara.shared(self._var)\n self._stds = np.sqrt(self._initial_diag)\n self._inv_stds = floatX(1.0) / self._stds\n self._foreground_var = _WeightedVariance(\n self._n, self._initial_mean, self._initial_diag, self._initial_weight, self.dtype\n )\n self._background_var = _WeightedVariance(self._n, dtype=self.dtype)\n self._n_samples = 0\n\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n return np.multiply(self._var, x, out=out)\n\n def energy(self, x, velocity=None):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n if velocity is not None:\n return 0.5 * x.dot(velocity)\n return 0.5 * x.dot(self._var * x)\n\n def velocity_energy(self, x, v_out):\n \"\"\"Compute velocity and return kinetic energy at a position in parameter space.\"\"\"\n self.velocity(x, out=v_out)\n return 0.5 * np.dot(x, v_out)\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n vals = normal(size=self._n).astype(self.dtype)\n return self._inv_stds * vals\n\n def _update_from_weightvar(self, weightvar):\n weightvar.current_variance(out=self._var)\n np.sqrt(self._var, out=self._stds)\n np.divide(1, self._stds, out=self._inv_stds)\n self._var_aesara.set_value(self._var)\n\n def update(self, sample, grad, tune):\n \"\"\"Inform the potential about a new sample during tuning.\"\"\"\n if not tune:\n return\n\n self._foreground_var.add_sample(sample, weight=1)\n self._background_var.add_sample(sample, weight=1)\n self._update_from_weightvar(self._foreground_var)\n\n if self._n_samples > 0 and self._n_samples % self.adaptation_window == 0:\n self._foreground_var = self._background_var\n self._background_var = _WeightedVariance(self._n, dtype=self.dtype)\n self.adaptation_window = int(self.adaptation_window * self.adaptation_window_multiplier)\n\n self._n_samples += 1\n\n def raise_ok(self, vmap):\n \"\"\"Check if the mass matrix is ok, and raise ValueError if not.\n\n Parameters\n ----------\n vmap: blocking.ArrayOrdering.vmap\n List of `VarMap`s, which are namedtuples with var, slc, shp, dtyp\n\n Raises\n ------\n ValueError if any standard deviations are 0 or infinite\n\n Returns\n -------\n None\n \"\"\"\n if np.any(self._stds == 0):\n name_slc = []\n tmp_hold = list(range(self._stds.size))\n for vmap_ in vmap:\n slclen = len(tmp_hold[vmap_.slc])\n for i in range(slclen):\n name_slc.append((vmap_.var, i))\n index = np.where(self._stds == 0)[0]\n errmsg = [\"Mass matrix contains zeros on the diagonal. \"]\n for ii in index:\n errmsg.append(\n \"The derivative of RV `{}`.ravel()[{}] is zero.\".format(*name_slc[ii])\n )\n raise ValueError(\"\\n\".join(errmsg))\n\n if np.any(~np.isfinite(self._stds)):\n name_slc = []\n tmp_hold = list(range(self._stds.size))\n for vmap_ in vmap:\n slclen = len(tmp_hold[vmap_.slc])\n for i in range(slclen):\n name_slc.append((vmap_.var, i))\n index = np.where(~np.isfinite(self._stds))[0]\n errmsg = [\"Mass matrix contains non-finite values on the diagonal. \"]\n for ii in index:\n errmsg.append(\n \"The derivative of RV `{}`.ravel()[{}] is non-finite.\".format(*name_slc[ii])\n )\n raise ValueError(\"\\n\".join(errmsg))\n\n\nclass QuadPotentialDiagAdaptGrad(QuadPotentialDiagAdapt):\n \"\"\"Adapt a diagonal mass matrix from the variances of the gradients.\n\n This is experimental, and may be removed without prior deprication.\n \"\"\"\n\n def reset(self):\n super().reset()\n self._grads1 = np.zeros(self._n, dtype=self.dtype)\n self._ngrads1 = 0\n self._grads2 = np.zeros(self._n, dtype=self.dtype)\n self._ngrads2 = 0\n\n def _update(self, var):\n self._var[:] = var\n np.sqrt(self._var, out=self._stds)\n np.divide(1, self._stds, out=self._inv_stds)\n self._var_aesara.set_value(self._var)\n\n def update(self, sample, grad, tune):\n \"\"\"Inform the potential about a new sample during tuning.\"\"\"\n if not tune:\n return\n\n self._grads1[:] += np.abs(grad)\n self._grads2[:] += np.abs(grad)\n self._ngrads1 += 1\n self._ngrads2 += 1\n\n if self._n_samples <= 150:\n super().update(sample, grad, tune)\n else:\n self._update((self._ngrads1 / self._grads1) ** 2)\n\n if self._n_samples > 100 and self._n_samples % 100 == 50:\n self._ngrads1 = self._ngrads2\n self._ngrads2 = 1\n self._grads1[:] = self._grads2\n self._grads2[:] = 1\n\n\nclass _WeightedVariance:\n \"\"\"Online algorithm for computing mean of variance.\"\"\"\n\n def __init__(\n self, nelem, initial_mean=None, initial_variance=None, initial_weight=0, dtype=\"d\"\n ):\n self._dtype = dtype\n self.n_samples = float(initial_weight)\n if initial_mean is None:\n self.mean = np.zeros(nelem, dtype=\"d\")\n else:\n self.mean = np.array(initial_mean, dtype=\"d\", copy=True)\n if initial_variance is None:\n self.raw_var = np.zeros(nelem, dtype=\"d\")\n else:\n self.raw_var = np.array(initial_variance, dtype=\"d\", copy=True)\n\n self.raw_var[:] *= self.n_samples\n\n if self.raw_var.shape != (nelem,):\n raise ValueError(\"Invalid shape for initial variance.\")\n if self.mean.shape != (nelem,):\n raise ValueError(\"Invalid shape for initial mean.\")\n\n def add_sample(self, x, weight):\n x = np.asarray(x)\n self.n_samples += 1\n old_diff = x - self.mean\n self.mean[:] += old_diff / self.n_samples\n new_diff = x - self.mean\n self.raw_var[:] += weight * old_diff * new_diff\n\n def current_variance(self, out=None):\n if self.n_samples == 0:\n raise ValueError(\"Can not compute variance without samples.\")\n if out is not None:\n return np.divide(self.raw_var, self.n_samples, out=out)\n else:\n return (self.raw_var / self.n_samples).astype(self._dtype)\n\n def current_mean(self):\n return self.mean.copy(dtype=self._dtype)\n\n\nclass QuadPotentialDiag(QuadPotential):\n \"\"\"Quad potential using a diagonal covariance matrix.\"\"\"\n\n def __init__(self, v, dtype=None):\n \"\"\"Use a vector to represent a diagonal matrix for a covariance matrix.\n\n Parameters\n ----------\n v: vector, 0 <= ndim <= 1\n Diagonal of covariance matrix for the potential vector\n \"\"\"\n if dtype is None:\n dtype = aesara.config.floatX\n self.dtype = dtype\n v = v.astype(self.dtype)\n s = v ** 0.5\n\n self.s = s\n self.inv_s = 1.0 / s\n self.v = v\n\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n if out is not None:\n np.multiply(x, self.v, out=out)\n return\n return self.v * x\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n return floatX(normal(size=self.s.shape)) * self.inv_s\n\n def energy(self, x, velocity=None):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n if velocity is not None:\n return 0.5 * np.dot(x, velocity)\n return 0.5 * x.dot(self.v * x)\n\n def velocity_energy(self, x, v_out):\n \"\"\"Compute velocity and return kinetic energy at a position in parameter space.\"\"\"\n np.multiply(x, self.v, out=v_out)\n return 0.5 * np.dot(x, v_out)\n\n\nclass QuadPotentialFullInv(QuadPotential):\n \"\"\"QuadPotential object for Hamiltonian calculations using inverse of covariance matrix.\"\"\"\n\n def __init__(self, A, dtype=None):\n \"\"\"Compute the lower cholesky decomposition of the potential.\n\n Parameters\n ----------\n A: matrix, ndim = 2\n Inverse of covariance matrix for the potential vector\n \"\"\"\n if dtype is None:\n dtype = aesara.config.floatX\n self.dtype = dtype\n self.L = floatX(scipy.linalg.cholesky(A, lower=True))\n\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n vel = scipy.linalg.cho_solve((self.L, True), x)\n if out is None:\n return vel\n out[:] = vel\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n n = floatX(normal(size=self.L.shape[0]))\n return np.dot(self.L, n)\n\n def energy(self, x, velocity=None):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n if velocity is None:\n velocity = self.velocity(x)\n return 0.5 * x.dot(velocity)\n\n def velocity_energy(self, x, v_out):\n \"\"\"Compute velocity and return kinetic energy at a position in parameter space.\"\"\"\n self.velocity(x, out=v_out)\n return 0.5 * np.dot(x, v_out)\n\n\nclass QuadPotentialFull(QuadPotential):\n \"\"\"Basic QuadPotential object for Hamiltonian calculations.\"\"\"\n\n def __init__(self, cov, dtype=None):\n \"\"\"Compute the lower cholesky decomposition of the potential.\n\n Parameters\n ----------\n A: matrix, ndim = 2\n scaling matrix for the potential vector\n \"\"\"\n if dtype is None:\n dtype = aesara.config.floatX\n self.dtype = dtype\n self._cov = np.array(cov, dtype=self.dtype, copy=True)\n self._chol = scipy.linalg.cholesky(self._cov, lower=True)\n self._n = len(self._cov)\n\n def velocity(self, x, out=None):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n return np.dot(self._cov, x, out=out)\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n vals = np.random.normal(size=self._n).astype(self.dtype)\n return scipy.linalg.solve_triangular(self._chol.T, vals, overwrite_b=True)\n\n def energy(self, x, velocity=None):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n if velocity is None:\n velocity = self.velocity(x)\n return 0.5 * np.dot(x, velocity)\n\n def velocity_energy(self, x, v_out):\n \"\"\"Compute velocity and return kinetic energy at a position in parameter space.\"\"\"\n self.velocity(x, out=v_out)\n return self.energy(x, v_out)\n\n __call__ = random\n\n\nclass QuadPotentialFullAdapt(QuadPotentialFull):\n \"\"\"Adapt a dense mass matrix using the sample covariances.\"\"\"\n\n def __init__(\n self,\n n,\n initial_mean,\n initial_cov=None,\n initial_weight=0,\n adaptation_window=101,\n adaptation_window_multiplier=2,\n update_window=1,\n dtype=None,\n ):\n warnings.warn(\"QuadPotentialFullAdapt is an experimental feature\")\n\n if initial_cov is not None and initial_cov.ndim != 2:\n raise ValueError(\"Initial covariance must be two-dimensional.\")\n if initial_mean.ndim != 1:\n raise ValueError(\"Initial mean must be one-dimensional.\")\n if initial_cov is not None and initial_cov.shape != (n, n):\n raise ValueError(f\"Wrong shape for initial_cov: expected {n} got {initial_cov.shape}\")\n if len(initial_mean) != n:\n raise ValueError(\n \"Wrong shape for initial_mean: expected {} got {}\".format(n, len(initial_mean))\n )\n\n if dtype is None:\n dtype = aesara.config.floatX\n\n if initial_cov is None:\n initial_cov = np.eye(n, dtype=dtype)\n initial_weight = 1\n\n self.dtype = dtype\n self._n = n\n self._initial_mean = initial_mean\n self._initial_cov = initial_cov\n self._initial_weight = initial_weight\n\n self.adaptation_window = int(adaptation_window)\n self.adaptation_window_multiplier = float(adaptation_window_multiplier)\n self._update_window = int(update_window)\n\n self.reset()\n\n def reset(self):\n self._previous_update = 0\n self._cov = np.array(self._initial_cov, dtype=self.dtype, copy=True)\n self._chol = scipy.linalg.cholesky(self._cov, lower=True)\n self._chol_error = None\n self._foreground_cov = _WeightedCovariance(\n self._n, self._initial_mean, self._initial_cov, self._initial_weight, self.dtype\n )\n self._background_cov = _WeightedCovariance(self._n, dtype=self.dtype)\n self._n_samples = 0\n\n def _update_from_weightvar(self, weightvar):\n weightvar.current_covariance(out=self._cov)\n try:\n self._chol = scipy.linalg.cholesky(self._cov, lower=True)\n except (scipy.linalg.LinAlgError, ValueError) as error:\n self._chol_error = error\n\n def update(self, sample, grad, tune):\n if not tune:\n return\n\n # Steps since previous update\n delta = self._n_samples - self._previous_update\n\n self._foreground_cov.add_sample(sample, weight=1)\n self._background_cov.add_sample(sample, weight=1)\n\n # Update the covariance matrix and recompute the Cholesky factorization\n # every \"update_window\" steps\n if (delta + 1) % self._update_window == 0:\n self._update_from_weightvar(self._foreground_cov)\n\n # Reset the background covariance if we are at the end of the adaptation\n # window.\n if delta >= self.adaptation_window:\n self._foreground_cov = self._background_cov\n self._background_cov = _WeightedCovariance(self._n, dtype=self.dtype)\n\n self._previous_update = self._n_samples\n self.adaptation_window = int(self.adaptation_window * self.adaptation_window_multiplier)\n\n self._n_samples += 1\n\n def raise_ok(self, vmap):\n if self._chol_error is not None:\n raise ValueError(str(self._chol_error))\n\n\nclass _WeightedCovariance:\n \"\"\"Online algorithm for computing mean and covariance\n\n This implements the `Welford's algorithm\n <https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance>`_ based\n on the implementation in `the Stan math library\n <https://github.com/stan-dev/math>`_.\n\n \"\"\"\n\n def __init__(\n self,\n nelem,\n initial_mean=None,\n initial_covariance=None,\n initial_weight=0,\n dtype=\"d\",\n ):\n self._dtype = dtype\n self.n_samples = float(initial_weight)\n if initial_mean is None:\n self.mean = np.zeros(nelem, dtype=\"d\")\n else:\n self.mean = np.array(initial_mean, dtype=\"d\", copy=True)\n if initial_covariance is None:\n self.raw_cov = np.eye(nelem, dtype=\"d\")\n else:\n self.raw_cov = np.array(initial_covariance, dtype=\"d\", copy=True)\n\n self.raw_cov[:] *= self.n_samples\n\n if self.raw_cov.shape != (nelem, nelem):\n raise ValueError(\"Invalid shape for initial covariance.\")\n if self.mean.shape != (nelem,):\n raise ValueError(\"Invalid shape for initial mean.\")\n\n def add_sample(self, x, weight):\n x = np.asarray(x)\n self.n_samples += 1\n old_diff = x - self.mean\n self.mean[:] += old_diff / self.n_samples\n new_diff = x - self.mean\n self.raw_cov[:] += weight * new_diff[:, None] * old_diff[None, :]\n\n def current_covariance(self, out=None):\n if self.n_samples == 0:\n raise ValueError(\"Can not compute covariance without samples.\")\n if out is not None:\n return np.divide(self.raw_cov, self.n_samples - 1, out=out)\n else:\n return (self.raw_cov / (self.n_samples - 1)).astype(self._dtype)\n\n def current_mean(self):\n return np.array(self.mean, dtype=self._dtype)\n\n\ntry:\n import sksparse.cholmod as cholmod\n\n chol_available = True\nexcept ImportError:\n chol_available = False\n\nif chol_available:\n __all__ += [\"QuadPotentialSparse\"]\n\n import aesara.sparse\n\n class QuadPotentialSparse(QuadPotential):\n def __init__(self, A):\n \"\"\"Compute a sparse cholesky decomposition of the potential.\n\n Parameters\n ----------\n A: matrix, ndim = 2\n scaling matrix for the potential vector\n \"\"\"\n self.A = A\n self.size = A.shape[0]\n self.factor = factor = cholmod.cholesky(A)\n self.d_sqrt = np.sqrt(factor.D())\n\n def velocity(self, x):\n \"\"\"Compute the current velocity at a position in parameter space.\"\"\"\n A = aesara.sparse.as_sparse(self.A)\n return aesara.sparse.dot(A, x)\n\n def random(self):\n \"\"\"Draw random value from QuadPotential.\"\"\"\n n = floatX(normal(size=self.size))\n n /= self.d_sqrt\n n = self.factor.solve_Lt(n)\n n = self.factor.apply_Pt(n)\n return n\n\n def energy(self, x):\n \"\"\"Compute kinetic energy at a position in parameter space.\"\"\"\n return 0.5 * x.T.dot(self.velocity(x))\n",
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom enum import IntEnum, unique\nfrom typing import Dict, List\n\nimport numpy as np\n\nfrom numpy.random import uniform\n\nfrom pymc3.aesaraf import inputvars\nfrom pymc3.blocking import ArrayOrdering, DictToArrayBijection\nfrom pymc3.model import PyMC3Variable, modelcontext\nfrom pymc3.step_methods.compound import CompoundStep\nfrom pymc3.util import get_var_name\n\n__all__ = [\"ArrayStep\", \"ArrayStepShared\", \"metrop_select\", \"Competence\"]\n\n\n@unique\nclass Competence(IntEnum):\n \"\"\"Enum for charaterizing competence classes of step methods.\n Values include:\n 0: INCOMPATIBLE\n 1: COMPATIBLE\n 2: PREFERRED\n 3: IDEAL\n \"\"\"\n\n INCOMPATIBLE = 0\n COMPATIBLE = 1\n PREFERRED = 2\n IDEAL = 3\n\n\nclass BlockedStep:\n\n generates_stats = False\n stats_dtypes: List[Dict[str, np.dtype]] = []\n vars: List[PyMC3Variable] = []\n\n def __new__(cls, *args, **kwargs):\n blocked = kwargs.get(\"blocked\")\n if blocked is None:\n # Try to look up default value from class\n blocked = getattr(cls, \"default_blocked\", True)\n kwargs[\"blocked\"] = blocked\n\n model = modelcontext(kwargs.get(\"model\"))\n kwargs.update({\"model\": model})\n\n # vars can either be first arg or a kwarg\n if \"vars\" not in kwargs and len(args) >= 1:\n vars = args[0]\n args = args[1:]\n elif \"vars\" in kwargs:\n vars = kwargs.pop(\"vars\")\n else: # Assume all model variables\n vars = model.vars\n\n # get the actual inputs from the vars\n vars = inputvars(vars)\n\n if len(vars) == 0:\n raise ValueError(\"No free random variables to sample.\")\n\n if not blocked and len(vars) > 1:\n # In this case we create a separate sampler for each var\n # and append them to a CompoundStep\n steps = []\n for var in vars:\n step = super().__new__(cls)\n # If we don't return the instance we have to manually\n # call __init__\n step.__init__([var], *args, **kwargs)\n # Hack for creating the class correctly when unpickling.\n step.__newargs = ([var],) + args, kwargs\n steps.append(step)\n\n return CompoundStep(steps)\n else:\n step = super().__new__(cls)\n # Hack for creating the class correctly when unpickling.\n step.__newargs = (vars,) + args, kwargs\n return step\n\n # Hack for creating the class correctly when unpickling.\n def __getnewargs_ex__(self):\n return self.__newargs\n\n @staticmethod\n def competence(var, has_grad):\n return Competence.INCOMPATIBLE\n\n @classmethod\n def _competence(cls, vars, have_grad):\n vars = np.atleast_1d(vars)\n have_grad = np.atleast_1d(have_grad)\n competences = []\n for var, has_grad in zip(vars, have_grad):\n try:\n competences.append(cls.competence(var, has_grad))\n except TypeError:\n competences.append(cls.competence(var))\n return competences\n\n @property\n def vars_shape_dtype(self):\n shape_dtypes = {}\n for var in self.vars:\n dtype = np.dtype(var.dtype)\n shape = var.dshape\n shape_dtypes[var.name] = (shape, dtype)\n return shape_dtypes\n\n def stop_tuning(self):\n if hasattr(self, \"tune\"):\n self.tune = False\n\n\nclass ArrayStep(BlockedStep):\n \"\"\"\n Blocked step method that is generalized to accept vectors of variables.\n\n Parameters\n ----------\n vars: list\n List of variables for sampler.\n fs: list of logp aesara functions\n allvars: Boolean (default False)\n blocked: Boolean (default True)\n \"\"\"\n\n def __init__(self, vars, fs, allvars=False, blocked=True):\n self.vars = vars\n self.ordering = ArrayOrdering(vars)\n self.fs = fs\n self.allvars = allvars\n self.blocked = blocked\n\n def step(self, point):\n bij = DictToArrayBijection(self.ordering, point)\n\n inputs = [bij.mapf(x) for x in self.fs]\n if self.allvars:\n inputs.append(point)\n\n if self.generates_stats:\n apoint, stats = self.astep(bij.map(point), *inputs)\n return bij.rmap(apoint), stats\n else:\n apoint = self.astep(bij.map(point), *inputs)\n return bij.rmap(apoint)\n\n\nclass ArrayStepShared(BlockedStep):\n \"\"\"Faster version of ArrayStep that requires the substep method that does not wrap\n the functions the step method uses.\n\n Works by setting shared variables before using the step. This eliminates the mapping\n and unmapping overhead as well as moving fewer variables around.\n \"\"\"\n\n def __init__(self, vars, shared, blocked=True):\n \"\"\"\n Parameters\n ----------\n vars: list of sampling variables\n shared: dict of aesara variable -> shared variable\n blocked: Boolean (default True)\n \"\"\"\n self.vars = vars\n self.ordering = ArrayOrdering(vars)\n self.shared = {get_var_name(var): shared for var, shared in shared.items()}\n self.blocked = blocked\n self.bij = None\n\n def step(self, point):\n for var, share in self.shared.items():\n share.set_value(point[var])\n\n self.bij = DictToArrayBijection(self.ordering, point)\n\n if self.generates_stats:\n apoint, stats = self.astep(self.bij.map(point))\n return self.bij.rmap(apoint), stats\n else:\n apoint = self.astep(self.bij.map(point))\n return self.bij.rmap(apoint)\n\n\nclass PopulationArrayStepShared(ArrayStepShared):\n \"\"\"Version of ArrayStepShared that allows samplers to access the states\n of other chains in the population.\n\n Works by linking a list of Points that is updated as the chains are iterated.\n \"\"\"\n\n def __init__(self, vars, shared, blocked=True):\n \"\"\"\n Parameters\n ----------\n vars: list of sampling variables\n shared: dict of aesara variable -> shared variable\n blocked: Boolean (default True)\n \"\"\"\n self.population = None\n self.this_chain = None\n self.other_chains = None\n return super().__init__(vars, shared, blocked)\n\n def link_population(self, population, chain_index):\n \"\"\"Links the sampler to the population.\n\n Parameters\n ----------\n population: list of Points. (The elements of this list must be\n replaced with current chain states in every iteration.)\n chain_index: int of the index of this sampler in the population\n \"\"\"\n self.population = population\n self.this_chain = chain_index\n self.other_chains = [c for c in range(len(population)) if c != chain_index]\n if not len(self.other_chains) > 1:\n raise ValueError(\n \"Population is just {} + {}. \"\n \"This is too small and the error should have been raised earlier.\".format(\n self.this_chain, self.other_chains\n )\n )\n return\n\n\nclass GradientSharedStep(BlockedStep):\n def __init__(\n self, vars, model=None, blocked=True, dtype=None, logp_dlogp_func=None, **aesara_kwargs\n ):\n model = modelcontext(model)\n self.vars = vars\n self.blocked = blocked\n\n if logp_dlogp_func is None:\n func = model.logp_dlogp_function(vars, dtype=dtype, **aesara_kwargs)\n else:\n func = logp_dlogp_func\n\n # handle edge case discovered in #2948\n try:\n func.set_extra_values(model.test_point)\n q = func.dict_to_array(model.test_point)\n logp, dlogp = func(q)\n except ValueError:\n if logp_dlogp_func is not None:\n raise\n aesara_kwargs.update(mode=\"FAST_COMPILE\")\n func = model.logp_dlogp_function(vars, dtype=dtype, **aesara_kwargs)\n\n self._logp_dlogp_func = func\n\n def step(self, point):\n self._logp_dlogp_func.set_extra_values(point)\n array = self._logp_dlogp_func.dict_to_array(point)\n\n if self.generates_stats:\n apoint, stats = self.astep(array)\n point = self._logp_dlogp_func.array_to_full_dict(apoint)\n return point, stats\n else:\n apoint = self.astep(array)\n point = self._logp_dlogp_func.array_to_full_dict(apoint)\n return point\n\n\ndef metrop_select(mr, q, q0):\n \"\"\"Perform rejection/acceptance step for Metropolis class samplers.\n\n Returns the new sample q if a uniform random number is less than the\n metropolis acceptance rate (`mr`), and the old sample otherwise, along\n with a boolean indicating whether the sample was accepted.\n\n Parameters\n ----------\n mr: float, Metropolis acceptance rate\n q: proposed sample\n q0: current sample\n\n Returns\n -------\n q or q0\n \"\"\"\n # Compare acceptance ratio to uniform random number\n if np.isfinite(mr) and np.log(uniform()) < mr:\n return q, True\n else:\n return q0, False\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"scipy.sparse.issparse",
"numpy.multiply",
"numpy.abs",
"numpy.asarray",
"numpy.isnan",
"numpy.eye",
"numpy.isfinite",
"numpy.ones",
"numpy.random.normal",
"numpy.any",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.divide"
],
[
"numpy.atleast_1d",
"numpy.random.uniform",
"numpy.dtype",
"numpy.isfinite"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Vikicsizmadia/ctp | [
"d88fdfecf4b90ee42e6137a9767226c0d35b19a3",
"d88fdfecf4b90ee42e6137a9767226c0d35b19a3",
"d88fdfecf4b90ee42e6137a9767226c0d35b19a3",
"d88fdfecf4b90ee42e6137a9767226c0d35b19a3",
"d88fdfecf4b90ee42e6137a9767226c0d35b19a3"
] | [
"ctp/evaluation/slow.py",
"ctp/indexing/np.py",
"ctp/visualization/hinton.py",
"simple/ctp_original/clutrr-cli_final.py",
"simple/model_simple_cleaned_hetero_simple.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport numpy as np\n# from tqdm import tqdm\n\nimport torch\nfrom torch import nn\n\nfrom ctp.util import make_batches\nfrom ctp.models import BaseLatentFeatureModel\n\nfrom typing import Tuple, Dict\n\n\ndef evaluate_slow(entity_embeddings: nn.Embedding,\n predicate_embeddings: nn.Embedding,\n test_triples: Tuple[str, str, str],\n all_triples: Tuple[str, str, str],\n entity_to_index: Dict[str, int],\n predicate_to_index: Dict[str, int],\n model: BaseLatentFeatureModel,\n batch_size: int,\n device: torch.device):\n\n xs = np.array([entity_to_index.get(s) for (s, _, _) in test_triples])\n xp = np.array([predicate_to_index.get(p) for (_, p, _) in test_triples])\n xo = np.array([entity_to_index.get(o) for (_, _, o) in test_triples])\n\n sp_to_o, po_to_s = {}, {}\n for s, p, o in all_triples:\n s_idx, p_idx, o_idx = entity_to_index.get(s), predicate_to_index.get(p), entity_to_index.get(o)\n sp_key = (s_idx, p_idx)\n po_key = (p_idx, o_idx)\n\n if sp_key not in sp_to_o:\n sp_to_o[sp_key] = []\n if po_key not in po_to_s:\n po_to_s[po_key] = []\n\n sp_to_o[sp_key] += [o_idx]\n po_to_s[po_key] += [s_idx]\n\n assert xs.shape == xp.shape == xo.shape\n nb_test_triples = xs.shape[0]\n\n batches = make_batches(nb_test_triples, batch_size)\n\n hits = dict()\n hits_at = [1, 3, 5, 10]\n\n for hits_at_value in hits_at:\n hits[hits_at_value] = 0.0\n\n def hits_at_n(n_, rank):\n if rank <= n_:\n hits[n_] = hits.get(n_, 0) + 1\n\n counter = 0\n mrr = 0.0\n\n ranks_l, ranks_r = [], []\n for start, end in batches:\n batch_xs = xs[start:end]\n batch_xp = xp[start:end]\n batch_xo = xo[start:end]\n\n batch_size = batch_xs.shape[0]\n counter += batch_size * 2\n\n with torch.no_grad():\n tensor_xs = torch.tensor(batch_xs, dtype=torch.long, device=device)\n tensor_xp = torch.tensor(batch_xp, dtype=torch.long, device=device)\n tensor_xo = torch.tensor(batch_xo, dtype=torch.long, device=device)\n\n tensor_xs_emb = entity_embeddings(tensor_xs)\n tensor_xp_emb = predicate_embeddings(tensor_xp)\n tensor_xo_emb = entity_embeddings(tensor_xo)\n # print(entity_embeddings.weight.shape)\n\n if model.model.facts[0].shape[0] < 90000:\n res_sp, res_po = model.forward_(tensor_xp_emb, tensor_xs_emb, tensor_xo_emb)\n else:\n res_sp, res_po = model.forward__(tensor_xp_emb, tensor_xs_emb, tensor_xo_emb)\n\n _scores_sp, _ = res_sp\n _scores_po, _ = res_po\n\n scores_sp, scores_po = _scores_sp.cpu().numpy(), _scores_po.cpu().numpy()\n\n del _scores_sp, _scores_po\n del tensor_xs, tensor_xp, tensor_xo\n del tensor_xs_emb, tensor_xp_emb, tensor_xo_emb\n del res_sp, res_po\n # print(scores_sp.shape, scores_po.shape)\n\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n batch_size = batch_xs.shape[0]\n for elem_idx in range(batch_size):\n s_idx, p_idx, o_idx = batch_xs[elem_idx], batch_xp[elem_idx], batch_xo[elem_idx]\n\n # Code for the filtered setting\n sp_key = (s_idx, p_idx)\n po_key = (p_idx, o_idx)\n\n o_to_remove = sp_to_o[sp_key]\n s_to_remove = po_to_s[po_key]\n\n for tmp_o_idx in o_to_remove:\n if tmp_o_idx != o_idx:\n scores_sp[elem_idx, tmp_o_idx] = - np.infty\n\n for tmp_s_idx in s_to_remove:\n if tmp_s_idx != s_idx:\n scores_po[elem_idx, tmp_s_idx] = - np.infty\n # End of code for the filtered setting\n\n rank_l = 1 + np.argsort(np.argsort(- scores_po[elem_idx, :]))[s_idx]\n rank_r = 1 + np.argsort(np.argsort(- scores_sp[elem_idx, :]))[o_idx]\n\n ranks_l += [rank_l]\n ranks_r += [rank_r]\n\n mrr += 1.0 / rank_l\n mrr += 1.0 / rank_r\n\n for n in hits_at:\n hits_at_n(n, rank_l)\n\n for n in hits_at:\n hits_at_n(n, rank_r)\n\n counter = float(counter)\n\n mrr /= counter\n\n for n in hits_at:\n hits[n] /= counter\n\n metrics = dict()\n metrics['MRR'] = mrr\n for n in hits_at:\n metrics['hits@{}'.format(n)] = hits[n]\n\n return metrics\n",
"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom ctp.indexing.base import Index\n\n\nclass NPSearchIndex(Index):\n def __init__(self):\n super().__init__()\n self.data = None\n\n def build(self,\n data: np.ndarray):\n self.data = data\n\n def query(self,\n data: np.ndarray,\n k: int = 5) -> np.ndarray:\n nb_instances = data.shape[0]\n res = []\n for i in range(nb_instances):\n sqd = np.sqrt(((self.data - data[i, :]) ** 2).sum(axis=1))\n indices = np.argsort(sqd)\n top_k_indices = indices[:k].tolist()\n res += [top_k_indices]\n res = np.array(res)\n return res\n\n",
"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom colorclass import Color\nfrom terminaltables import SingleTable\n\n\nclass HintonDiagram:\n def __init__(self, max_arr=None):\n self.max_arr = max_arr\n\n def __call__(self, data):\n return hinton_diagram(data, self.max_arr)\n\n\ndef hinton_diagram(arr, max_arr=None):\n max_arr = arr if max_arr is None else max_arr\n max_val = max(abs(np.max(max_arr)), abs(np.min(max_arr)))\n diagram = [list([_hinton_diagram_value(x, max_val) for x in _arr]) for _arr in arr]\n\n table = SingleTable(diagram)\n table.inner_heading_row_border = False\n table.inner_footing_row_border = False\n table.inner_column_border = False\n table.inner_row_border = False\n table.column_max_width = 1\n\n return table.table\n\n\ndef _hinton_diagram_value(val, max_val):\n chars = [' ', '▁', '▂', '▃', '▄', '▅']\n step = len(chars) - 1\n if abs(abs(val) - max_val) >= 1e-8:\n step = int(abs(float(val) / max_val) * len(chars))\n attr = 'red' if val < 0 else 'green'\n return Color('{auto' + attr + '}' + str(chars[step]) + '{/auto' + attr + '}')\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nfrom os.path import join, dirname, abspath\nimport sys\n\nimport multiprocessing\nimport numpy as np\n\nimport torch\nfrom torch import nn, optim, Tensor\nimport torch.nn.functional as F\n\nfrom ctp.util import make_batches\nfrom ctp.clutrr import Fact, Data, Instance, accuracy\n\nfrom ctp.clutrr.models import BatchNeuralKB\nfrom model_final import BatchHoppy\n\nfrom ctp.reformulators import BaseReformulator\nfrom ctp.reformulators import LinearReformulator\nfrom ctp.reformulators import MemoryReformulator\n\nfrom ctp.kernels import BaseKernel, GaussianKernel\n\nfrom typing import List, Tuple, Dict, Optional\n\nimport logging\n\nlogger = logging.getLogger(os.path.basename(sys.argv[0]))\nnp.set_printoptions(linewidth=256, precision=4, suppress=True, threshold=sys.maxsize)\n\ntorch.set_num_threads(multiprocessing.cpu_count())\n\n\ndef decode(vector: Tensor,\n kernel: BaseKernel,\n relation_embeddings: nn.Module) -> Tuple[int, float]:\n weight = relation_embeddings.weight\n k = kernel.pairwise(vector, weight)[0, :]\n top_idx = k.argmax(dim=0).item()\n top_score = k[top_idx].item()\n return top_idx, top_score\n\n\ndef show_rules(model: BatchHoppy,\n kernel: BaseKernel,\n relation_embeddings: nn.Embedding,\n relation_to_idx: Dict[str, int],\n device: Optional[torch.device] = None):\n idx_to_relation = {i: r for r, i in relation_to_idx.items()}\n\n rel_idx_pair_lst = sorted(relation_to_idx.items(), key=lambda kv: kv[1])\n\n for r, i in rel_idx_pair_lst:\n indices = torch.tensor([i], dtype=torch.long, device=device)\n\n r_emb = relation_embeddings(indices)\n\n hops_lst = [p for p in model.hops_lst]\n\n for reformulator, _ in hops_lst:\n def _to_pair(hop: Tensor) -> Tuple[str, float]:\n idx, score = decode(hop, kernel, relation_embeddings)\n rel = idx_to_relation[idx]\n return rel, score\n\n hop_tensor_lst = [hop for hop in reformulator(r_emb)]\n\n r_hops = [_to_pair(hop) for hop in hop_tensor_lst]\n print(r, ' ← ', ', '.join(f'({a} {b:.4f})' for a, b in r_hops))\n\n if isinstance(model.model, BatchHoppy):\n for _r_emb in hop_tensor_lst:\n _hops_lst = [p for p in model.model.hops_lst]\n\n for j, (_reformulator, _) in enumerate(_hops_lst):\n _hop_tensor_lst = [_hop for _hop in _reformulator(_r_emb)]\n _r_hops = [_to_pair(_hop) for _hop in _hop_tensor_lst]\n print(j, ' ← ', ', '.join(f'({_a} {_b:.4f})' for _a, _b in _r_hops))\n\n return\n\n\nclass Batcher:\n def __init__(self,\n batch_size: int,\n nb_examples: int,\n nb_epochs: int,\n random_state: Optional[np.random.RandomState]):\n self.batch_size = batch_size\n self.nb_examples = nb_examples\n self.nb_epochs = nb_epochs\n self.random_state = random_state\n\n size = self.nb_epochs * self.nb_examples\n self.curriculum = np.zeros(size, dtype=np.int32)\n\n for epoch_no in range(nb_epochs):\n start, end = epoch_no * nb_examples, (epoch_no + 1) * nb_examples\n if self.random_state is not None:\n self.curriculum[start: end] = self.random_state.permutation(nb_examples)\n else:\n self.curriculum[start: end] = np.arange(nb_examples)\n\n self.batches = make_batches(self.curriculum.shape[0], self.batch_size)\n self.nb_batches = len(self.batches)\n\n def get_batch(self,\n batch_start: int,\n batch_end: int) -> np.ndarray:\n return self.curriculum[batch_start:batch_end]\n\n# takes in the list of facts [F,3]\n# gives back the list of the embeddings corresponding to the relations of each fact [F,E]\ndef encode_relation(facts: List[Fact], # [F,3]: [(s,r,o), (s,r,o), ..., (s,r,o)]\n relation_embeddings: Tensor,\n relation_to_idx: Dict[str, int],\n device: Optional[torch.device] = None) -> Tensor:\n indices_np = np.array([relation_to_idx[r] for _, r, _ in facts], dtype=np.int64)\n indices = torch.tensor(indices_np, dtype=torch.long, device=device)\n res = F.embedding(indices, relation_embeddings)\n return res\n\n# takes in the list of facts [F,3]\n# gives back the list of the embeddings corresponding to the subject and object of each fact [F,E],[F,E]\ndef encode_arguments(facts: List[Fact],\n entity_embeddings: Tensor,\n entity_to_idx: Dict[str, int],\n device: Optional[torch.device] = None) -> Tuple[Tensor, Tensor]:\n indices_np = np.array([[entity_to_idx[s], entity_to_idx[o]] for s, _, o in facts], dtype=np.int64)\n indices = torch.tensor(indices_np, dtype=torch.long, device=device)\n emb = F.embedding(indices, entity_embeddings)\n return emb[:, 0, :], emb[:, 1, :]\n\n# takes in the list of facts [F,3]\n# gives back the list of the embeddings corresponding to all entities that are in the facts as either\n# subject or object\n# [N,E], where N is the number of different entities in the facts\ndef encode_entities(facts: List[Fact],\n entity_embeddings: Tensor,\n entity_to_idx: Dict[str, int],\n device: Optional[torch.device]) -> Tensor:\n indices_lst = sorted({entity_to_idx[e] for s, r, o in facts for e in {s, o}})\n indices = torch.tensor(indices_lst, dtype=torch.long, device=device)\n emb = F.embedding(indices, entity_embeddings)\n return emb\n\n\ndef main():\n\n import time\n start_time = time.time()\n\n debug = False\n\n train_path = join(dirname(dirname(dirname(abspath(__file__)))),'data', 'clutrr-emnlp', 'data_test', '64.csv')\n test_path1 = join(dirname(dirname(dirname(abspath(__file__)))),'data', 'clutrr-emnlp', 'data_db9b8f04', '1.10_test.csv')\n test_path2 = join(dirname(dirname(dirname(abspath(__file__)))), 'data', 'clutrr-emnlp', 'data_db9b8f04', '1.2_test.csv')\n test_path3 = join(dirname(dirname(dirname(abspath(__file__)))), 'data', 'clutrr-emnlp', 'data_db9b8f04', '1.3_test.csv')\n test_path4 = join(dirname(dirname(dirname(abspath(__file__)))), 'data', 'clutrr-emnlp', 'data_db9b8f04', '1.4_test.csv')\n test_path5 = join(dirname(dirname(dirname(abspath(__file__)))), 'data', 'clutrr-emnlp', 'data_db9b8f04', '1.5_test.csv')\n test_path6 = join(dirname(dirname(dirname(abspath(__file__)))), 'data', 'clutrr-emnlp', 'data_db9b8f04', '1.6_test.csv')\n test_path7 = join(dirname(dirname(dirname(abspath(__file__)))), 'data', 'clutrr-emnlp', 'data_db9b8f04', '1.7_test.csv')\n test_path8 = join(dirname(dirname(dirname(abspath(__file__)))), 'data', 'clutrr-emnlp', 'data_db9b8f04', '1.8_test.csv')\n test_path9 = join(dirname(dirname(dirname(abspath(__file__)))), 'data', 'clutrr-emnlp', 'data_db9b8f04', '1.9_test.csv')\n test_paths = [test_path1, test_path2, test_path3, test_path4, test_path5, test_path6, test_path7, test_path8, test_path9]\n\n # model params\n\n # the size of the embedding of each atom and relationship\n embedding_size = 20\n # when proving the body of a rule, we consider the k best substitutions for each variable\n### OTHER\n k_max = 5 #10, 5 in suggested\n # how many times to reformulate the goal(s) --> bigger for bigger graph: this is for training\n max_depth = 2\n # how many times to reformulate the goal(s): this is for testing --> this depth can be bigger than for training\n test_max_depth = 2\n\n # the shape of the reformulation:\n # 2: goal(X,Z) -> p(X,Y), q(Y,Z) (2 elements in the body)\n # 1: goal(X,Z) -> r(X,Z) (1 element in the body)\n # 1R: goal(X,Z) -> s(Z,X) (variables in reversed order)\n # if we have multiple in the array, that means at each reformulation step we actually reformulate the same goal\n # multiple times according to the elements in the array (so we have more choice to get to a good proof)\n hops_str = ['2', '2', '2'] # ['2', '2', '1R']\n\n\n # training params\n\n nb_epochs = 50 # 100\n learning_rate = 0.1\n # training batch size\n batch_size = 32\n # testing batch size --> this can be smaller than for training\n test_batch_size = batch_size # could be other as well\n\n optimizer_name = 'adagrad' # choices = ['adagrad', 'adam', 'sgd']\n\n seed = 1 # int\n\n # how often you want to evaluate\n evaluate_every = None # int\n evaluate_every_batches = None # int\n\n # whether you want to regularize\n #argparser.add_argument('--N2', action='store', type=float, default=None)\n #argparser.add_argument('--N3', action='store', type=float, default=None)\n #argparser.add_argument('--entropy', '-E', action='store', type=float, default=None)\n\n scoring_type = 'concat' # choices = ['concat', 'min']\n\n # how to get the score combined from the conjunction of the parts of the body in a rule\n # e.g. goal(X,Y) :- p(X,Z), q(Z,Y) --> how do we combine the scores of p and q\n # I can just keep min, it works well, and later add the others potentially\n tnorm_name = 'min' # choices = ['min', 'prod', 'mean']\n # which function to use for reformulating a goal to subgoals\n # I can just keep linear for the initial version, and if it works, I can add more\n reformulator_name = 'linear' # choices = ['static', 'linear', 'attentive', 'memory', 'ntp'] --> deleted code part\n\n nb_gradient_accumulation_steps = 1 # int\n\n#### IGNORE --> code as well\n gntp_R = None # int\n\n slope = 1.0 # float\n init_size = 1.0 # float\n\n init_type = 'random' # 'uniform'\n ref_init_type = 'random'\n\n#### IGNORE\n #argparser.add_argument('--fix-relations', '--FR', action='store_true', default=False)\n is_fixed_relations = False\n # whether you want to train on the smallest graph first (it's easier to train on them)\n start_simple = None # int\n\n is_debug = False\n\n load_path = None # str\n save_path = None # str\n\n # took out predicates from the code, no need for them (they take only 1 argument, but an edge in PyG always takes 2)\n is_predicate = False\n\n# IGNORED variables:\n #N2_weight = args.N2\n #N3_weight = args.N3\n #entropy_weight = args.entropy\n\n #is_fixed_relations = args.fix_relations\n\n\n np.random.seed(seed)\n random_state = np.random.RandomState(seed)\n torch.manual_seed(seed)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n logger.info(f'Device: {device}')\n\n if torch.cuda.is_available():\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n\n# Initializing data and embeddings\n\n data = Data(train_path=train_path, test_paths=test_paths)\n entity_lst, relation_lst = data.entity_lst, data.relation_lst\n predicate_lst = data.predicate_lst\n\n relation_to_predicate = data.relation_to_predicate\n\n test_relation_lst = [\"aunt\", \"brother\", \"daughter\", \"daughter-in-law\", \"father\", \"father-in-law\", \"granddaughter\",\n \"grandfather\", \"grandmother\", \"grandson\", \"mother\", \"mother-in-law\", \"nephew\", \"niece\",\n \"sister\", \"son\", \"son-in-law\", \"uncle\"]\n\n test_predicate_lst = sorted({relation_to_predicate[r] for r in test_relation_lst})\n\n nb_entities = len(entity_lst)\n nb_relations = len(relation_lst)\n nb_predicates = len(predicate_lst)\n\n entity_to_idx = {e: i for i, e in enumerate(entity_lst)}\n relation_to_idx = {r: i for i, r in enumerate(relation_lst)}\n predicate_to_idx = {p: i for i, p in enumerate(predicate_lst)}\n\n kernel = GaussianKernel(slope=slope)\n\n entity_embeddings = nn.Embedding(nb_entities, embedding_size, sparse=True).to(device)\n nn.init.uniform_(entity_embeddings.weight, -1.0, 1.0)\n entity_embeddings.requires_grad = False\n\n relation_embeddings = nn.Embedding(nb_relations if not is_predicate else nb_predicates,\n embedding_size, sparse=True).to(device)\n\n if is_fixed_relations is True:\n relation_embeddings.requires_grad = False\n\n if init_type in {'uniform'}:\n nn.init.uniform_(relation_embeddings.weight, -1.0, 1.0)\n\n relation_embeddings.weight.data *= init_size\n\n # the model that lets you look up in the KB\n model = BatchNeuralKB(kernel=kernel, scoring_type=scoring_type).to(device)\n memory: Dict[int, MemoryReformulator.Memory] = {}\n\n # generates a reformulator of type given as an argument\n def make_hop(s: str) -> Tuple[BaseReformulator, bool]:\n nonlocal memory\n if s.isdigit():\n nb_hops, is_reversed = int(s), False\n else:\n nb_hops, is_reversed = int(s[:-1]), True\n res = None\n\n if reformulator_name in {'linear'}:\n res = LinearReformulator(nb_hops, embedding_size, init_name=ref_init_type)\n\n assert res is not None\n return res.to(device), is_reversed\n\n hops_lst = [make_hop(s) for s in hops_str] # hops_str = [2,2,1R]\n\n # \"model\" is a neural KB for checking whether the facts are true or not\n # hoppy is the model that does the reasoning, using the neural KB\n hoppy = BatchHoppy(model=model, k=k_max, depth=max_depth, tnorm_name=tnorm_name,\n hops_lst=hops_lst).to(device)\n\n def scoring_function(instances_batch: List[Instance],\n relation_lst: List[str],\n is_train: bool = False,\n _depth: Optional[int] = None) -> Tuple[Tensor, List[Tensor]]:\n\n rel_emb_lst: List[Tensor] = []\n arg1_emb_lst: List[Tensor] = []\n arg2_emb_lst: List[Tensor] = []\n\n story_rel_lst: List[Tensor] = []\n story_arg1_lst: List[Tensor] = []\n story_arg2_lst: List[Tensor] = []\n\n embeddings_lst: List[Tensor] = []\n\n label_lst: List[int] = []\n\n for i, instance in enumerate(instances_batch):\n\n if is_predicate is True:\n def _convert_fact(fact: Fact) -> Fact:\n _s, _r, _o = fact\n return _s, relation_to_predicate[_r], _o\n\n new_story = [_convert_fact(f) for f in instance.story]\n new_target = _convert_fact(instance.target)\n instance = Instance(new_story, new_target, instance.nb_nodes)\n\n story, target = instance.story, instance.target\n s, r, o = target # 1 target relation\n\n # the relation embeddings from all facts (==story)\n # [F,E], where F is the number of facts\n story_rel = encode_relation(story, relation_embeddings.weight,\n predicate_to_idx if is_predicate else relation_to_idx, device)\n # the subject,object embeddings from all facts (==story)\n # [F,E],[F,E]\n story_arg1, story_arg2 = encode_arguments(story, entity_embeddings.weight, entity_to_idx, device)\n\n # the subject,object embeddings from all facts (==story) as a list of all different entity embeddings\n # [N,E], where N is the number of different entities in the facts\n embeddings = encode_entities(story, entity_embeddings.weight, entity_to_idx, device)\n\n # target subject (s), object(o) paired with all possible relations from the relation_lst (x)\n # [R,3] where R is the number of relations\n target_lst: List[Tuple[str, str, str]] = [(s, x, o) for x in relation_lst]\n\n assert len(target_lst) == len(test_predicate_lst if is_predicate else test_relation_lst)\n\n # true_predicate = rel_to_predicate[r]\n # label_lst += [int(true_predicate == rel_to_predicate[r]) for r in relation_lst]\n\n # for each instance in the batch we are iterating through: [0,0,...,0,1,0,0,...,0] list\n # where 1 is at the relation that is the target relation\n label_lst += [int(tr == r) for tr in relation_lst]\n\n # relation embeddings of the target subject,object paired with all possible relations - R relations\n # [R,E]\n rel_emb = encode_relation(target_lst, relation_embeddings.weight,\n predicate_to_idx if is_predicate else relation_to_idx, device)\n # target subject,object embeddings\n # [R,E],[R,E] (actually all will be the same as the subject, object doesn't change just the relations)\n arg1_emb, arg2_emb = encode_arguments(target_lst, entity_embeddings.weight, entity_to_idx, device)\n\n batch_size = rel_emb.shape[0] # R - the number of relations --> B from now on\n fact_size = story_rel.shape[0] # F - the number of facts in \"story\"\n entity_size = embeddings.shape[0] # N - the number of different entities in the facts (==\"story\")\n\n # [B, E]\n # at each instance in the batch we add [B,E] to it (B==R)\n # --> rel_emb_lst will become [batch,B,E], where batch is number of instances in batch\n rel_emb_lst += [rel_emb]\n arg1_emb_lst += [arg1_emb]\n arg2_emb_lst += [arg2_emb]\n\n # [B, F, E]\n # repeat the same facts for each rel subst in instance of current batch\n # (1 batch will be all possible relation substitutions for the target relation)\n # --> story_rel_lst will become [batch,B,F,E], where batch is number of instances in batch\n story_rel_lst += [story_rel.view(1, fact_size, -1).repeat(batch_size, 1, 1)]\n story_arg1_lst += [story_arg1.view(1, fact_size, -1).repeat(batch_size, 1, 1)]\n story_arg2_lst += [story_arg2.view(1, fact_size, -1).repeat(batch_size, 1, 1)]\n\n # [B, N, E]\n # repeat the same entity embeddings for each instance in batch (== for each relation)\n # (1 batch will be all possible relation substitutions for the target relation)\n # --> embeddings_lst will become [batch,B,N,E], where batch is number of instances in batch\n embeddings_lst += [embeddings.view(1, entity_size, -1).repeat(batch_size, 1, 1)]\n\n def cat_pad(t_lst: List[Tensor]) -> Tuple[Tensor, Tensor]:\n # t: [B,F,E] --> t.shape[1] == F --> F possibly different each time\n # OR\n # t: [B,N,E] --> t.shape[1] == N --> N possibly different each time\n lengths: List[int] = [t.shape[1] for t in t_lst]\n max_len: int = max(lengths)\n\n # pad the _t tensor: [B,F,E] (which is all t in t_lst) with embeddings of 0 where F is smaller than max_len\n # OR\n # pad the _t tensor: [B,N,E] (which is all t in t_lst) with embeddings of 0 where N is smaller than max_len\n def my_pad(_t: Tensor, pad: List[int]) -> Tensor:\n return torch.transpose(F.pad(torch.transpose(_t, 1, 2), pad=pad), 1, 2)\n\n # [batch*B,F,E], where each F is the same (sometimes padded with extra 0 embeddings to reach that)\n # OR\n # [batch*B,N,E], where each N is the same (sometimes padded with extra 0 embeddings to reach that)\n res_t: Tensor = torch.cat([my_pad(t, pad=[0, max_len - lengths[idx]]) for idx, t in enumerate(t_lst)],\n dim=0)\n # [batch*B], the number of facts (before padding) in an instance\n # OR\n # [batch*B], the number of entities (before padding) in an instance\n res_l: Tensor = torch.tensor([t.shape[1] for t in t_lst for _ in range(t.shape[0])],\n dtype=torch.long, device=device)\n return res_t, res_l # [batch*B,F,E],[batch*B] OR [batch*B,N,E],[batch*B]\n\n # [batch*B,E], where batch is number of batches & B is batch_size that is number of relations\n rel_emb = torch.cat(rel_emb_lst, dim=0)\n arg1_emb = torch.cat(arg1_emb_lst, dim=0)\n arg2_emb = torch.cat(arg2_emb_lst, dim=0)\n\n # story_rel,story_arg1,story_arg2: [batch*B,F,E], where each F is the same (sometimes padded with extra 0 embeddings to reach that)\n # nb_facts: [batch*B], the number of facts (before padding) in each instance\n story_rel, nb_facts = cat_pad(story_rel_lst) # story_rel_lst: [batch,B,F,E]\n story_arg1, _ = cat_pad(story_arg1_lst)\n story_arg2, _ = cat_pad(story_arg2_lst)\n facts = [story_rel, story_arg1, story_arg2] # [3,batch*B,F,E]\n\n # _embeddings: [batch*B,N,E], where each N is the same (sometimes padded with extra 0 embeddings to reach that)\n # nb_embeddings: [batch*B], the number of entities (before padding) in each instance\n _embeddings, nb_embeddings = cat_pad(embeddings_lst)\n\n max_depth_ = hoppy.depth\n if not is_train and test_max_depth is not None:\n hoppy.depth = test_max_depth\n\n if _depth is not None:\n hoppy.depth = _depth\n\n # trying hoppy.prove instead\n scores = hoppy.score(rel_emb, arg1_emb, arg2_emb, facts, nb_facts, _embeddings, nb_embeddings)\n #scores = hoppy.prove(rel_emb, arg1_emb, arg2_emb, facts, nb_facts, _embeddings, nb_embeddings, hoppy.depth)\n\n if not is_train and test_max_depth is not None:\n hoppy.depth = max_depth_\n\n if _depth is not None:\n hoppy.depth = max_depth_ # ??\n\n return scores, [rel_emb, arg1_emb, arg2_emb]\n\n def evaluate(instances: List[Instance],\n path: str,\n sample_size: Optional[int] = None) -> float:\n res = 0.0\n if len(instances) > 0:\n res = accuracy(scoring_function=scoring_function,\n instances=instances,\n sample_size=sample_size,\n relation_lst=test_predicate_lst if is_predicate else test_relation_lst,\n batch_size=test_batch_size,\n relation_to_predicate=relation_to_predicate if is_predicate else None,\n is_debug=is_debug)\n logger.info(f'Test Accuracy on {path}: {res:.6f}')\n return res\n\n loss_function = nn.BCELoss()\n\n params_lst = [p for p in hoppy.parameters() if not torch.equal(p, entity_embeddings.weight)]\n\n if is_fixed_relations is False:\n params_lst += relation_embeddings.parameters()\n\n params = nn.ParameterList(params_lst).to(device)\n\n if load_path is not None:\n model.load_state_dict(torch.load(load_path))\n\n for tensor in params_lst:\n logger.info(f'\\t{tensor.size()}\\t{tensor.device}')\n\n # TODO: set just adam for example, no need for all these choices\n optimizer_factory = {\n 'adagrad': lambda arg: optim.Adagrad(arg, lr=learning_rate),\n 'adam': lambda arg: optim.Adam(arg, lr=learning_rate),\n 'sgd': lambda arg: optim.SGD(arg, lr=learning_rate)\n }\n\n assert optimizer_name in optimizer_factory\n optimizer = optimizer_factory[optimizer_name](params)\n\n global_step = 0\n\n loss_mean_lst = []\n scores_total = []\n scores_bigger_05 = []\n scores_bigger_03 = []\n scores_smaller_01 = []\n scores_smaller_005 = []\n scores_smaller_001 = []\n scores_smaller_0005 = []\n scores_smaller_0001 = []\n\n for epoch_no in range(1, nb_epochs + 1):\n\n training_set, is_simple = data.train, False\n if start_simple is not None and epoch_no <= start_simple:\n training_set = [ins for ins in training_set if len(ins.story) == 2]\n is_simple = True\n logger.info(f'{len(data.train)} → {len(training_set)}')\n\n batcher = Batcher(batch_size=batch_size, nb_examples=len(training_set), nb_epochs=1, random_state=random_state)\n\n nb_batches = len(batcher.batches)\n epoch_loss_values = []\n\n counter = 0\n for batch_no, (batch_start, batch_end) in enumerate(batcher.batches, start=1):\n counter += 1\n if debug:\n if counter > 1:\n break\n global_step += 1\n\n # getting current batch from the training set\n indices_batch = batcher.get_batch(batch_start, batch_end)\n instances_batch = [training_set[i] for i in indices_batch]\n\n # label_lst: list of 1s and 0s indicating which query (==target) relation/predicate is where in the test_predicate_lst\n # TODO: take out predicates, no need for them (they take only 1 argument, but an edge in PyG always takes 2)\n if is_predicate is True:\n label_lst: List[int] = [int(relation_to_predicate[ins.target[1]] == tp)\n for ins in instances_batch\n for tp in test_predicate_lst]\n else:\n label_lst: List[int] = [int(ins.target[1] == tr) for ins in instances_batch for tr in test_relation_lst]\n\n labels = torch.tensor(label_lst, dtype=torch.float32, device=device)\n\n # returns scores of what??\n scores, query_emb_lst = scoring_function(instances_batch,\n test_predicate_lst if is_predicate else test_relation_lst,\n is_train=True,\n _depth=1 if is_simple else None)\n\n scores_total.append(scores.shape[0])\n scores_bigger_05.append(int((scores > 0.5).sum()))\n scores_bigger_03.append(int((scores > 0.3).sum()))\n scores_smaller_01.append(int((scores < 0.1).sum()))\n scores_smaller_005.append(int((scores < 0.05).sum()))\n scores_smaller_001.append(int((scores < 0.01).sum()))\n scores_smaller_0005.append(int((scores < 0.005).sum()))\n scores_smaller_0001.append(int((scores < 0.001).sum()))\n\n loss = loss_function(scores, labels)\n\n factors = [hoppy.factor(e) for e in query_emb_lst]\n\n loss_value = loss.item()\n epoch_loss_values += [loss_value]\n\n if nb_gradient_accumulation_steps > 1:\n loss = loss / nb_gradient_accumulation_steps\n\n loss.backward()\n\n if nb_gradient_accumulation_steps == 1 or global_step % nb_gradient_accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n logger.info(f'Epoch {epoch_no}/{nb_epochs}\\tBatch {batch_no}/{nb_batches}\\tLoss {loss_value:.4f}')\n\n if evaluate_every_batches is not None:\n if global_step % evaluate_every_batches == 0:\n for test_path in test_paths:\n evaluate(instances=data.test[test_path], path=test_path)\n\n if evaluate_every is not None:\n if epoch_no % evaluate_every == 0:\n for test_path in test_paths:\n evaluate(instances=data.test[test_path], path=test_path)\n evaluate(instances=data.train, path=train_path)\n\n # TODO: don't need for now, but we may look at it later\n if is_debug is True:\n with torch.no_grad():\n show_rules(model=hoppy, kernel=kernel, relation_embeddings=relation_embeddings,\n relation_to_idx=predicate_to_idx if is_predicate else relation_to_idx, device=device)\n\n loss_mean, loss_std = np.mean(epoch_loss_values), np.std(epoch_loss_values)\n\n loss_mean_lst.append(loss_mean)\n\n slope = kernel.slope.item() if isinstance(kernel.slope, Tensor) else kernel.slope\n logger.info(f'Epoch {epoch_no}/{nb_epochs}\\tLoss {loss_mean:.4f} ± {loss_std:.4f}\\tSlope {slope:.4f}')\n\n end_time = time.time()\n logger.info(f'Training took {end_time - start_time} seconds.')\n\n print(f\"loss list: {loss_mean_lst}\")\n print(f\"total number of scores: {scores_total}\")\n print(f\"scores bigger than 0.5: {scores_bigger_05}\")\n print(f\"scores bigger than 0.3: {scores_bigger_03}\")\n print(f\"scores smaller than 0.1: {scores_smaller_01}\")\n print(f\"scores smaller than 0.05: {scores_smaller_005}\")\n print(f\"scores smaller than 0.01: {scores_smaller_001}\")\n print(f\"scores smaller than 0.005: {scores_smaller_0005}\")\n print(f\"scores smaller than 0.001: {scores_smaller_0001}\")\n\n start = time.time()\n\n for test_path in test_paths:\n evaluate(instances=data.test[test_path], path=test_path)\n\n end = time.time()\n logger.info(f'Evaluation took {end - start} seconds.')\n\n if save_path is not None:\n torch.save(model.state_dict(), save_path)\n\n logger.info(\"Training finished\")\n\n\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n print(' '.join(sys.argv))\n main()",
"# -*- coding: utf-8 -*-\n\nimport torch\nfrom torch import nn, Tensor\nimport torch.nn.functional as F\n\nfrom simple import BatchNeuralKB, uniform # ctp.clutrr.models.kb import BatchNeuralKB\n# from ctp.clutrr.models.util import uniform\n\nfrom ctp.reformulators import BaseReformulator\nfrom ctp.reformulators import GNTPReformulator\n\nfrom typing import Tuple, Optional, List\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass BatchHoppy(nn.Module):\n \"\"\"The class responsible for the proof score calculation part of the CTP method.\"\"\"\n\n def __init__(self,\n model: BatchNeuralKB,\n hops_lst: List[Tuple[BaseReformulator, bool]],\n k: int = 10,\n depth: int = 0,\n tnorm_name: str = 'min'):\n\n \"\"\"Initializing some parameters of the BatchHoppy instance.\n\n Args:\n model (BatchNeuralKB): The knowledge base used for scoring the queries.\n hops_lst (List[Tuple[BaseReformulator, bool]]): List of BaseReformulators and indicators if reversed.\n All BaseReformulators are of the same type given by the reformulator_name argument in main.\n Each BaseReformulator reformulates goals into different number of subgoals given by the\n corresponding element in the hops_str argument in main.\n If bool is True then the reformulated subgoal arguments are reversed.\n Shape: [H,2] - H BaseReformulators (length of hops_str) and indicators if reversed.\n k (int): When proving the body of a rule, we will consider the k best substitutions for each variable.\n Default: 10.\n depth (int): The total number of how many times to reformulate the goal(s).\n When the reformulation takes place this will be the maximum depth, and each individual subgoal\n will be reformulated to the depth giving the highest score.\n Default: 0.\n tnorm_name (str): This gives the method of how to update scores while reformulating the current\n goal into subgoals.\n 'min': The function updates scores by taking the minimum of all embedding similarity scores so far.\n 'prod': The function updates scores by taking the product of the embedding similarity scores so far.\n 'mean': The function updates scores by taking the mean of all embedding similarity scores so far.\n Default: 'min'.\n \"\"\"\n\n super().__init__()\n\n self.model: BatchNeuralKB = model\n self.k = k\n self.depth = depth\n assert self.depth >= 0\n self.tnorm_name = tnorm_name\n assert self.tnorm_name in {'min', 'prod', 'mean'}\n self.hops_lst = hops_lst\n\n self._hops_lst = nn.ModuleList([hops for hops, _ in hops_lst])\n logger.info(f'BatchHoppy(k={k}, depth={depth}, hops_lst={[h.__class__.__name__ for h in self._hops_lst]})')\n\n def _tnorm(self, x: Tensor, y: Tensor) -> Tensor:\n \"\"\"Updates previous embedding similarity scores with newly calculated similarity scores with the given method.\n\n Args:\n x (Tensor): Tensor of proof scores so far. Must be same shape as y.\n y (Tensor): Tensor of proof scores so far. Must be same shape as x.\n\n Returns:\n Tensor of the updated proof scores of the same shape as the input Tensors.\n \"\"\"\n\n res = None\n # takes the smaller of each element of both tensor\n if self.tnorm_name == 'min':\n res = torch.min(x, y)\n elif self.tnorm_name == 'prod':\n res = x * y\n elif self.tnorm_name == 'mean':\n res = (x + y) / 2\n assert res is not None\n return res\n\n def prove(self,\n rel: Tensor, arg1: Tensor, arg2: Tensor,\n facts: List[Tensor],\n entity_embeddings: Tensor,\n depth: int) -> Tensor:\n\n \"\"\"Does the main part of the CTP method by calculating the proof scores of the given queries.\n\n Args:\n rel (Tensor): rel, arg1, arg2 are the relation, subject, object embeddings.\n These are the relations of the query embeddings, each relation corresponding\n to the subject, object embeddings at the same place in the Tensors,\n making up the query \"rel(arg1,arg2)\".\n In the first call of prove every query subject and object in each batch ('batch' number\n of batches) are paired with every possible relation type ('R' different types) to give\n the query Tensor the size [batch*R,E] = [B,E].\n Shape: [B,E] - B embeddings, E embedding size.\n arg1 (Tensor): Target subject embeddings corresponding to the relations in rel.\n Shape: [B,E]\n arg2 (Tensor): Target object embeddings corresponding to the relations in rel.\n Shape: [B,E]\n facts (List[Tensor]): [fact_rel, fact_arg1, fact_arg2].\n Fact embeddings broke to 3 pieces: relation, arg1, arg2 embeddings.\n All facts correspond to all queries.\n Shape: [3,F,E] - F is the number of facts.\n entity_embeddings (Tensor): Entity embeddings corresponding to the entities in the facts.\n Shape: [batch*R,N,E] = [B,N,E] - N is the number of entity embeddings.\n depth (int): The number of how many times to reformulate the goal(s).\n When the reformulation takes place this will be the maximum depth, and each individual subgoal\n will be reformulated to the depth giving the highest score.\n This will decrease by 1 at every recursive call.\n\n Returns:\n Tensor of the maximum similarity measure (=max proof score) of the queries (rel,arg1,arg2) to the facts.\n Shape: [B] - scores for each query.\n \"\"\"\n\n # no reformulation\n\n # [B]\n scores_0 = self.model.score(rel, arg1, arg2, facts=facts)\n\n # nb_facts=nb_facts, entity_embeddings=entity_embeddings, nb_entities=nb_entities\n\n # reformulation\n\n scores_d = None\n if depth > 0:\n\n # batch_size: B, embedding_size: E, entity_emb_max_nb: N\n batch_size, embedding_size = rel.shape[0], rel.shape[1]\n entity_emb_max_nb = entity_embeddings.shape[1]\n\n # need to have the entity_embeddings of the same batch size as rel\n entity_embeddings, _ = uniform(rel, entity_embeddings)\n\n global_res = None\n\n # enumerate H times: each is 1 Reformulator (hops_generator)\n for rule_idx, (hops_generator, is_reversed) in enumerate(self.hops_lst):\n sources, scores = arg1, None # sources: [B,E]\n\n # generates the new subgoals with the current reformulator\n # --> applies the transformator to each instance in the batches\n hop_rel_lst = hops_generator(rel) # [nb_hops,B,E]\n nb_hops = len(hop_rel_lst) # usually: 2 (or 1)\n\n # enumerate through the newly generated subgoals\n # hop_rel: [B,E] - 1st (then 2nd, 3rd,...) subgoal for each of the target relations in all the batches\n for hop_idx, hop_rel in enumerate(hop_rel_lst, start=1):\n # [B * S, E], where S: 1, K^1, K^2, ... in the consecutive iterations\n sources_2d = sources.view(-1, embedding_size)\n nb_sources = sources_2d.shape[0] # B*S\n\n nb_branches = nb_sources // batch_size # called S: 1, K^1, K^2, ...\n\n # [B, E] --> [B, S, E] --> [B * S, N, E] --> [B * S * N, E]\n hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)\n hop_rel_2d = hop_rel_3d.view(-1, embedding_size)\n\n if hop_idx < nb_hops: # we are not at the last (batch of) subgoals\n # [B, N, E] --> [B, S, N, E] --> [B * S * N, E]\n all_entities_3d = entity_embeddings.view(batch_size, 1, -1, embedding_size).repeat(1, nb_branches, 1, 1)\n all_entities_2d = all_entities_3d.view(-1, embedding_size)\n\n # [B * S, E] --> [B * S, N, E] --> [B * S * N, E]\n new_sources_3d = sources_2d.view(-1, 1, embedding_size).repeat(1, entity_emb_max_nb, 1)\n new_sources_2d = new_sources_3d.view(-1, embedding_size)\n\n # [B * S, E] --> [B * S, N, E] --> [B * S * N, E]\n hop_rel_3d = hop_rel_2d.view(-1, 1, embedding_size).repeat(1, entity_emb_max_nb, 1)\n hop_rel_2d = hop_rel_3d.view(-1, embedding_size)\n\n if is_reversed:\n new_arg1, new_arg2 = all_entities_2d, new_sources_2d\n else:\n new_arg1, new_arg2 = new_sources_2d, all_entities_2d\n\n # one of the arguments is all entity embeddings\n # [B * S, N]\n new_scores = self.prove(hop_rel_2d, new_arg1, new_arg2, facts,\n entity_embeddings, depth=depth - 1) # nb_facts, nb_entities,\n new_scores = new_scores.view(-1, entity_emb_max_nb)\n\n # k (default 10), N (maximum number of entities in entity_embeddings)\n k = min(self.k, entity_emb_max_nb)\n\n # z_indices indicates which embedding substitution scored in the top k\n # chooses the top k from each row in new_scores\n # [B * S, K], [B * S, K]\n z_scores, z_indices = torch.topk(new_scores, k=k, dim=1)\n\n dim_1 = torch.arange(z_scores.shape[0], device=z_scores.device).view(-1, 1).repeat(1, k).view(-1)\n dim_2 = z_indices.view(-1)\n\n # making sure that we have enough entity embeddings by multiplicating them\n # to match the size of z_scores\n entity_embeddings, _ = uniform(z_scores, entity_embeddings)\n\n # corresponding entity embeddings to the top k scores (z_scores)\n # [B * S, K, E]\n z_emb = entity_embeddings[dim_1, dim_2].view(z_scores.shape[0], k, -1)\n\n # [B * S * K]\n z_scores_1d = z_scores.view(-1)\n # [B * S * K, E]\n z_emb_2d = z_emb.view(-1, embedding_size)\n\n # [B * S * K, E]\n sources = z_emb_2d\n # [B * S * K]\n scores = z_scores_1d if scores is None \\\n else self._tnorm(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))\n else:\n # [B, E] --> [B, S, E] --> [B * S, E]\n arg2_3d = arg2.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)\n arg2_2d = arg2_3d.view(-1, embedding_size)\n\n # [B * S]\n if is_reversed:\n new_arg1, new_arg2 = arg2_2d, sources_2d\n else:\n new_arg1, new_arg2 = sources_2d, arg2_2d\n\n # one of the arguments is the arg2 entities from the query\n # [B * S]\n z_scores_1d = self.prove(hop_rel_2d, new_arg1, new_arg2, facts,\n entity_embeddings, depth=depth - 1) # nb_facts, nb_entities,\n\n # [B * S]\n scores = z_scores_1d if scores is None else self._tnorm(z_scores_1d, scores)\n\n # finished enumerating through the new subgoals with current reformulator\n\n # take maximum scores from all branches\n # [B * S] --> [B, S] --> [B]\n if scores is not None:\n scores_2d = scores.view(batch_size, -1)\n res, _ = torch.max(scores_2d, dim=1)\n else:\n res = self.model.score(rel, arg1, arg2, facts=facts)\n # nb_facts=nb_facts, entity_embeddings=entity_embeddings, nb_entities=nb_entities\n\n # update scores with scores obtained from using the current reformulator\n # [B]\n global_res = res if global_res is None else torch.max(global_res, res)\n\n # [B]\n scores_d = global_res\n\n # [B]\n if scores_d is None:\n res = scores_0\n else:\n res = torch.max(scores_0, scores_d) # choose the one with the higher score\n return res\n\n def factor(self,\n embedding_vector: Tensor) -> Tensor:\n return self.model.factor(embedding_vector)\n\n def extra_factors(self,\n rel: Tensor, arg1: Optional[Tensor], arg2: Optional[Tensor]) -> List[Tensor]:\n return [hop_generator(rel) for hop_generators in self.hops_lst for hop_generator in hop_generators]\n"
] | [
[
"torch.cuda.empty_cache",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.argsort"
],
[
"numpy.argsort",
"numpy.array"
],
[
"numpy.max",
"numpy.min"
],
[
"torch.set_default_tensor_type",
"torch.nn.init.uniform_",
"torch.transpose",
"torch.cat",
"torch.load",
"torch.nn.Embedding",
"numpy.mean",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.arange",
"torch.equal",
"torch.tensor",
"numpy.std",
"torch.optim.SGD",
"numpy.zeros",
"torch.optim.Adam",
"torch.nn.BCELoss",
"torch.nn.ParameterList",
"numpy.array",
"numpy.random.RandomState",
"torch.nn.functional.embedding",
"torch.optim.Adagrad",
"numpy.random.seed",
"torch.manual_seed",
"numpy.set_printoptions"
],
[
"torch.max",
"torch.nn.ModuleList",
"torch.min",
"torch.arange",
"torch.topk"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dataflowr/evaluating_bdl | [
"b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2",
"b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2",
"b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2",
"b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2",
"b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2",
"b7d7e3f2b8095a0ec43118d2b69b4b49e0b910f2"
] | [
"depthCompletion/mcdropout_eval_auce.py",
"toyRegression/MC-Dropout-MAP-02-Adam/eval_plots.py",
"toyClassification/MC-Dropout-MAP-02-SGDMOM/model.py",
"toyRegression/MC-Dropout-MAP-02-SGD/datasets.py",
"toyClassification/SGHMC-64/eval_kl_div.py",
"toyRegression/Ensemble-MAP-SGD/train.py"
] | [
"# code-checked\r\n# server-checked\r\n\r\nimport os\r\n\r\nimport torch\r\nimport torch.nn.parallel\r\nimport torch.optim\r\nimport torch.utils.data\r\nfrom torch.autograd import Variable\r\n\r\nfrom model_mcdropout import DepthCompletionNet\r\n\r\nfrom datasets import DatasetKITTIVal\r\nfrom criterion import MaskedL2Gauss, RMSE\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport pickle\r\n\r\nimport scipy.stats\r\n\r\nimport matplotlib\r\nmatplotlib.use(\"Agg\")\r\nimport matplotlib.pyplot as plt\r\n\r\nimport random\r\n\r\nmodel_id = \"mcdropout_virtual\"\r\n\r\nmodel_is = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\r\nprint (len(model_is))\r\n\r\nsnapshot_dir = \"/root/evaluating_bdl/depthCompletion/training_logs/%s_eval_auce\" % model_id\r\n\r\nkitti_depth_path = \"/root/data/kitti_depth\"\r\n\r\nbatch_size = 4\r\n\r\nif not os.path.exists(snapshot_dir):\r\n os.makedirs(snapshot_dir)\r\n\r\ncolors = {}\r\ncolors[1] = \"k\"\r\ncolors[2] = \"b\"\r\ncolors[4] = \"g\"\r\ncolors[8] = \"r\"\r\ncolors[16] = \"c\"\r\ncolors[32] = \"m\"\r\ncolors[64] = \"y\"\r\n\r\nnum_model_is = len(model_is)\r\nprint (num_model_is)\r\n\r\nM_values = [1, 2, 4, 8, 16, 32]\r\nprint (M_values)\r\n\r\n# # # # # # # # # # # # # # # # # # debug START:\r\n# M_values = [1, 2, 4]\r\n# model_is = [0, 1]\r\n# # # # # # # # # # # # # # # # # # debug END:\r\n\r\nnum_runs_per_M = 1\r\n\r\ncoverage_values = {}\r\navg_length_values = {}\r\ncoverage_error_values = {}\r\nabs_coverage_error_values = {}\r\nneg_coverage_error_values = {}\r\nfor model_i in model_is:\r\n coverage_values[model_i] = {}\r\n avg_length_values[model_i] = {}\r\n coverage_error_values[model_i] = {}\r\n abs_coverage_error_values[model_i] = {}\r\n neg_coverage_error_values[model_i] = {}\r\n\r\n for M in M_values:\r\n coverage_values[model_i][M] = {}\r\n avg_length_values[model_i][M] = {}\r\n coverage_error_values[model_i][M] = {}\r\n abs_coverage_error_values[model_i][M] = {}\r\n neg_coverage_error_values[model_i][M] = {}\r\n\r\nauc_abs_error_values = {}\r\nauc_neg_error_values = {}\r\nauc_length_values = {}\r\nloss_values = {}\r\nrmse_values = {}\r\nfor M in M_values:\r\n auc_abs_error_values[M] = []\r\n auc_neg_error_values[M] = []\r\n auc_length_values[M] = []\r\n\r\n loss_values[M] = []\r\n rmse_values[M] = []\r\n\r\neval_dataset = DatasetKITTIVal(kitti_depth_path=kitti_depth_path)\r\neval_loader = torch.utils.data.DataLoader(dataset=eval_dataset, batch_size=batch_size, shuffle=False, num_workers=4)\r\n\r\ncriterion = MaskedL2Gauss().cuda()\r\nrmse_criterion = RMSE().cuda()\r\n\r\nfor model_i in model_is:\r\n print (\"model_i: %d\" % model_i)\r\n\r\n restore_from = \"/root/evaluating_bdl/depthCompletion/trained_models/%s_%d/checkpoint_40000.pth\" % (model_id, model_i)\r\n model = DepthCompletionNet().cuda()\r\n model = torch.nn.DataParallel(model)\r\n model.load_state_dict(torch.load(restore_from))\r\n model.eval()\r\n\r\n for M in M_values:\r\n M_float = float(M)\r\n print (\"M: %d\" % M)\r\n\r\n for run in range(num_runs_per_M):\r\n print (\"run: %d\" % run)\r\n\r\n batch_losses = []\r\n batch_rmses = []\r\n sigma_alea_values = np.array([])\r\n sigma_epi_values = np.array([])\r\n sigma_pred_values = np.array([])\r\n mean_values = np.array([])\r\n target_values = np.array([])\r\n for i_iter, batch in enumerate(eval_loader):\r\n with torch.no_grad(): # (corresponds to setting volatile=True in all variables, this is done during inference to reduce memory consumption)\r\n imgs, sparses, targets, file_ids = batch\r\n imgs = Variable(imgs.cuda()) # (shape: (batch_size, h, w))\r\n sparses = Variable(sparses.cuda()) # (shape: (batch_size, h, w))\r\n targets = Variable(targets.cuda()) # (shape: (batch_size, h, w))\r\n\r\n means = []\r\n sigma_2_aleas = []\r\n for i in range(M):\r\n mean, log_var = model(imgs, sparses) # (both of shape: (batch_size, 1, h, w))\r\n\r\n sigma_2_alea = torch.exp(log_var) # (sigma_alea^2) # (shape: (batch_size, 1, h, w))\r\n\r\n means.append(mean)\r\n sigma_2_aleas.append(sigma_2_alea)\r\n\r\n mean = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w))\r\n for value in means:\r\n mean = mean + value/M_float\r\n\r\n sigma_2_alea = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_alea^2)\r\n for value in sigma_2_aleas:\r\n sigma_2_alea = sigma_2_alea + value/M_float\r\n\r\n sigma_2_epi = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_epi^2)\r\n for value in means:\r\n sigma_2_epi = sigma_2_epi + torch.pow(mean - value, 2)/M_float\r\n\r\n sigma_2_pred = sigma_2_alea + sigma_2_epi # (sigma_pred^2)\r\n\r\n loss = criterion(mean, torch.log(sigma_2_pred), targets)\r\n rmse = rmse_criterion(mean, targets)\r\n\r\n print('iter = {}/{} completed, loss = {}, rmse = {}'.format(i_iter, len(eval_dataset)/batch_size, loss.data.cpu().numpy(), rmse.data.cpu().numpy()))\r\n\r\n batch_losses.append(loss.data.cpu().numpy())\r\n batch_rmses.append(rmse.data.cpu().numpy())\r\n\r\n sigma_alea = torch.sqrt(sigma_2_alea) # (shape: (batch_size, 1, h, w))\r\n sigma_epi = torch.sqrt(sigma_2_epi) # (shape: (batch_size, 1, h, w))\r\n sigma_pred = torch.sqrt(sigma_2_pred) # (shape: (batch_size, 1, h, w))\r\n\r\n target = torch.unsqueeze(targets, 1) # (shape: (batch_size, 1, h, w))\r\n\r\n valid_mask = (target > 0).detach() # (shape: (batch_size, 1, h, w))\r\n\r\n mean = mean[valid_mask] # (shape: (num_valids, ))\r\n sigma_alea = sigma_alea[valid_mask] # (shape: (num_valids, ))\r\n sigma_epi = sigma_epi[valid_mask] # (shape: (num_valids, ))\r\n sigma_pred = sigma_pred[valid_mask] # (shape: (num_valids, ))\r\n target = target[valid_mask] # (shape: (num_valids, ))\r\n\r\n sigma_alea_values = np.concatenate((sigma_alea_values, sigma_alea.data.cpu().numpy()))\r\n sigma_epi_values = np.concatenate((sigma_epi_values, sigma_epi.data.cpu().numpy()))\r\n sigma_pred_values = np.concatenate((sigma_pred_values, sigma_pred.data.cpu().numpy()))\r\n mean_values = np.concatenate((mean_values, mean.data.cpu().numpy()))\r\n target_values = np.concatenate((target_values, target.data.cpu().numpy()))\r\n\r\n # # # # # # # # # # # # # # # # # # debug START:\r\n # if i_iter > 0:\r\n # break\r\n # # # # # # # # # # # # # # # # # # debug END:\r\n\r\n val_loss = np.mean(batch_losses)\r\n print (\"val loss: %g\" % val_loss)\r\n val_rmse = np.mean(batch_rmses)\r\n print (\"val rmse: %g\" % val_rmse)\r\n loss_values[M].append(val_loss)\r\n rmse_values[M].append(val_rmse)\r\n\r\n # (sigma_alea/epi/pred_values has shape: (num_predictions_with_GT, ))\r\n # (mean_values has shape: (num_predictions_with_GT, ))\r\n # (target_values has shape: (num_predictions_with_GT, ))\r\n\r\n print (sigma_alea_values.shape)\r\n print (sigma_epi_values.shape)\r\n print (sigma_pred_values.shape)\r\n print (mean_values.shape)\r\n print (target_values.shape)\r\n\r\n num_predictions_with_GT = float(target_values.shape[0])\r\n\r\n coverage_values_alea = []\r\n coverage_values_epi = []\r\n coverage_values_pred = []\r\n avg_length_values_alea = []\r\n avg_length_values_epi = []\r\n avg_length_values_pred = []\r\n alphas = list(np.arange(start=0.01, stop=1.0, step=0.01)) # ([0.01, 0.02, ..., 0.99], 99 elements)\r\n for step, alpha in enumerate(alphas):\r\n #print (\"alpha: %d/%d\" % (step+1, len(alphas)))\r\n\r\n lower_values_alea = mean_values - scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_alea_values # (shape: (num_predictions_with_GT, ))\r\n upper_values_alea = mean_values + scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_alea_values # (shape: (num_predictions_with_GT, ))\r\n\r\n coverage_alea = np.count_nonzero(np.logical_and(target_values >= lower_values_alea, target_values <= upper_values_alea))/num_predictions_with_GT\r\n coverage_values_alea.append(coverage_alea)\r\n\r\n avg_length_alea = np.mean(upper_values_alea - lower_values_alea)\r\n avg_length_values_alea.append(avg_length_alea)\r\n #\r\n lower_values_epi = mean_values - scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_epi_values # (shape: (num_predictions_with_GT, ))\r\n upper_values_epi = mean_values + scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_epi_values # (shape: (num_predictions_with_GT, ))\r\n\r\n coverage_epi = np.count_nonzero(np.logical_and(target_values >= lower_values_epi, target_values <= upper_values_epi))/num_predictions_with_GT\r\n coverage_values_epi.append(coverage_epi)\r\n\r\n avg_length_epi = np.mean(upper_values_epi - lower_values_epi)\r\n avg_length_values_epi.append(avg_length_epi)\r\n #\r\n lower_values_pred = mean_values - scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_pred_values # (shape: (num_predictions_with_GT, ))\r\n upper_values_pred = mean_values + scipy.stats.norm.ppf(1.0 - alpha/2)*sigma_pred_values # (shape: (num_predictions_with_GT, ))\r\n\r\n coverage_pred = np.count_nonzero(np.logical_and(target_values >= lower_values_pred, target_values <= upper_values_pred))/num_predictions_with_GT\r\n coverage_values_pred.append(coverage_pred)\r\n\r\n avg_length_pred = np.mean(upper_values_pred - lower_values_pred)\r\n avg_length_values_pred.append(avg_length_pred)\r\n\r\n auc_length_alea = np.trapz(y=avg_length_values_alea, x=alphas)\r\n print (\"AUC - Length - Alea: %g\" % auc_length_alea)\r\n auc_length_epi = np.trapz(y=avg_length_values_epi, x=alphas)\r\n print (\"AUC - Length - Epi: %g\" % auc_length_epi)\r\n auc_length_pred = np.trapz(y=avg_length_values_pred, x=alphas)\r\n print (\"AUC - Length - Pred: %g\" % auc_length_pred)\r\n\r\n coverage_error_values_alea = np.array(coverage_values_alea) - (1.0 - np.array(alphas))\r\n coverage_error_values_epi = np.array(coverage_values_epi) - (1.0 - np.array(alphas))\r\n coverage_error_values_pred = np.array(coverage_values_pred) - (1.0 - np.array(alphas))\r\n\r\n abs_coverage_error_values_alea = np.abs(coverage_error_values_alea)\r\n abs_coverage_error_values_epi = np.abs(coverage_error_values_epi)\r\n abs_coverage_error_values_pred = np.abs(coverage_error_values_pred)\r\n\r\n neg_coverage_error_values_alea = (np.abs(coverage_error_values_alea) - coverage_error_values_alea)/2.0\r\n neg_coverage_error_values_epi = (np.abs(coverage_error_values_epi) - coverage_error_values_epi)/2.0\r\n neg_coverage_error_values_pred = (np.abs(coverage_error_values_pred) - coverage_error_values_pred)/2.0\r\n\r\n auc_error_alea = np.trapz(y=abs_coverage_error_values_alea, x=alphas)\r\n print (\"AUC - Empirical coverage absolute error - Alea: %g\" % auc_error_alea)\r\n auc_error_epi = np.trapz(y=abs_coverage_error_values_epi, x=alphas)\r\n print (\"AUC - Empirical coverage absolute error - Epi: %g\" % auc_error_epi)\r\n auc_error_pred = np.trapz(y=abs_coverage_error_values_pred, x=alphas)\r\n print (\"AUC - Empirical coverage absolute error - Pred: %g\" % auc_error_pred)\r\n\r\n auc_neg_error_alea = np.trapz(y=neg_coverage_error_values_alea, x=alphas)\r\n print (\"AUC - Empirical coverage negative error - Alea: %g\" % auc_neg_error_alea)\r\n auc_neg_error_epi = np.trapz(y=neg_coverage_error_values_epi, x=alphas)\r\n print (\"AUC - Empirical coverage negative error - Epi: %g\" % auc_neg_error_epi)\r\n auc_neg_error_pred = np.trapz(y=neg_coverage_error_values_pred, x=alphas)\r\n print (\"AUC - Empirical coverage negative error - Pred: %g\" % auc_neg_error_pred)\r\n\r\n coverage_values[model_i][M][run] = np.array(coverage_values_pred)\r\n avg_length_values[model_i][M][run] = np.array(avg_length_values_pred)\r\n coverage_error_values[model_i][M][run] = np.array(coverage_error_values_pred)\r\n abs_coverage_error_values[model_i][M][run] = abs_coverage_error_values_pred\r\n neg_coverage_error_values[model_i][M][run] = neg_coverage_error_values_pred\r\n\r\n auc_abs_error_values[M].append(auc_error_pred)\r\n auc_length_values[M].append(auc_length_pred)\r\n auc_neg_error_values[M].append(auc_neg_error_pred)\r\n\r\n print (\"#######################\")\r\n\r\n print (\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\")\r\n\r\n print (\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\r\n\r\nauc_abs_error_means = {}\r\nauc_abs_error_stds = {}\r\nauc_neg_error_means = {}\r\nauc_neg_error_stds = {}\r\nauc_length_means = {}\r\nauc_length_stds = {}\r\nloss_means = {}\r\nloss_stds = {}\r\nrmse_means = {}\r\nrmse_stds = {}\r\nfor M in M_values:\r\n auc_abs_error_values_mean = 0.0\r\n for value in auc_abs_error_values[M]:\r\n auc_abs_error_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n auc_abs_error_values_var = 0.0\r\n for value in auc_abs_error_values[M]:\r\n auc_abs_error_values_var += ((value - auc_abs_error_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n auc_abs_error_values_std = np.sqrt(auc_abs_error_values_var)\r\n\r\n auc_abs_error_means[M] = auc_abs_error_values_mean\r\n auc_abs_error_stds[M] = auc_abs_error_values_std\r\n\r\n ###\r\n\r\n auc_neg_error_values_mean = 0.0\r\n for value in auc_neg_error_values[M]:\r\n auc_neg_error_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n auc_neg_error_values_var = 0.0\r\n for value in auc_neg_error_values[M]:\r\n auc_neg_error_values_var += ((value - auc_neg_error_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n auc_neg_error_values_std = np.sqrt(auc_neg_error_values_var)\r\n\r\n auc_neg_error_means[M] = auc_neg_error_values_mean\r\n auc_neg_error_stds[M] = auc_neg_error_values_std\r\n\r\n ###\r\n\r\n auc_length_values_mean = 0.0\r\n for value in auc_length_values[M]:\r\n auc_length_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n auc_length_values_var = 0.0\r\n for value in auc_length_values[M]:\r\n auc_length_values_var += ((value - auc_length_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n auc_length_values_std = np.sqrt(auc_length_values_var)\r\n\r\n auc_length_means[M] = auc_length_values_mean\r\n auc_length_stds[M] = auc_length_values_std\r\n\r\n ###\r\n\r\n loss_values_mean = 0.0\r\n for value in loss_values[M]:\r\n loss_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n loss_values_var = 0.0\r\n for value in loss_values[M]:\r\n loss_values_var += ((value - loss_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n loss_values_std = np.sqrt(loss_values_var)\r\n\r\n loss_means[M] = loss_values_mean\r\n loss_stds[M] = loss_values_std\r\n\r\n ###\r\n\r\n rmse_values_mean = 0.0\r\n for value in rmse_values[M]:\r\n rmse_values_mean += value/float(num_runs_per_M*num_model_is)\r\n\r\n rmse_values_var = 0.0\r\n for value in rmse_values[M]:\r\n rmse_values_var += ((value - rmse_values_mean)**2)/float(num_runs_per_M*num_model_is)\r\n\r\n rmse_values_std = np.sqrt(rmse_values_var)\r\n\r\n rmse_means[M] = rmse_values_mean\r\n rmse_stds[M] = rmse_values_std\r\n\r\n\r\nfor M in M_values:\r\n print (\"M = %d, Empirical coverage absolute error (AUC) - mean: %g, std: %g\" % (M, auc_abs_error_means[M], auc_abs_error_stds[M]))\r\n print (\"M = %d, Empirical coverage negative error (AUC) - mean: %g, std: %g\" % (M, auc_neg_error_means[M], auc_neg_error_stds[M]))\r\n print (\"M = %d, Average length (AUC) - mean: %g, std: %g\" % (M, auc_length_means[M], auc_length_stds[M]))\r\n print (\"M = %d, Loss - mean: %g, std: %g\" % (M, loss_means[M], loss_stds[M]))\r\n print (\"M = %d, RMSE - mean: %g, std: %g\" % (M, rmse_means[M], rmse_stds[M]))\r\n print (\"#####\")\r\n\r\nplt.figure(1)\r\nplt.plot([0.0, 1.0], [0.0, 1.0], \"k:\", label=\"Perfect\")\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(coverage_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(coverage_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Empirical coverage\")\r\nplt.xlabel(\"p\")\r\nplt.title(\"Prediction intervals - Empirical coverage\")\r\nplt.savefig(\"%s/empirical_coverage.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nplt.figure(1)\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(avg_length_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(avg_length_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Average interval length [m]\")\r\nplt.xlabel(\"p\")\r\navg_length_ylim = plt.ylim()\r\nplt.title(\"Prediction intervals - Average interval length\")\r\nplt.savefig(\"%s/length.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nplt.figure(1)\r\nplt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Empirical coverage error\")\r\nplt.xlabel(\"p\")\r\ncoverage_error_ylim = plt.ylim()\r\nplt.title(\"Prediction intervals - Empirical coverage error\")\r\nplt.savefig(\"%s/empirical_coverage_error.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nplt.figure(1)\r\nplt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(abs_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(abs_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Empirical coverage absolute error\")\r\nplt.xlabel(\"p\")\r\nabs_coverage_error_ylim = plt.ylim()\r\nplt.title(\"Prediction intervals - Empirical coverage absolute error\")\r\nplt.savefig(\"%s/empirical_coverage_absolute_error.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nplt.figure(1)\r\nplt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\nfor M in M_values:\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(neg_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(neg_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\nplt.legend()\r\nplt.ylabel(\"Empirical coverage negative error\")\r\nplt.xlabel(\"p\")\r\nneg_coverage_error_ylim = plt.ylim()\r\nplt.title(\"Prediction intervals - Empirical coverage negative error\")\r\nplt.savefig(\"%s/empirical_coverage_negative_error.png\" % snapshot_dir)\r\nplt.close(1)\r\n\r\nfor M in M_values:\r\n plt.figure(1)\r\n plt.plot([0.0, 1.0], [0.0, 1.0], \"k:\", label=\"Perfect\")\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(coverage_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(coverage_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Empirical coverage\")\r\n plt.xlabel(\"p\")\r\n plt.title(\"Prediction intervals - Empirical coverage\")\r\n plt.savefig(\"%s/empirical_coverage_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\n plt.figure(1)\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(avg_length_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(avg_length_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Average interval length [m]\")\r\n plt.xlabel(\"p\")\r\n plt.ylim(avg_length_ylim)\r\n plt.title(\"Prediction intervals - Average interval length\")\r\n plt.savefig(\"%s/length_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\n plt.figure(1)\r\n plt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Empirical coverage error\")\r\n plt.xlabel(\"p\")\r\n plt.ylim(coverage_error_ylim)\r\n plt.title(\"Prediction intervals - Empirical coverage error\")\r\n plt.savefig(\"%s/empirical_coverage_error_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\n plt.figure(1)\r\n plt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(abs_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(abs_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Empirical coverage absolute error\")\r\n plt.xlabel(\"p\")\r\n plt.ylim(abs_coverage_error_ylim)\r\n plt.title(\"Prediction intervals - Empirical coverage absolute error\")\r\n plt.savefig(\"%s/empirical_coverage_absolute_error_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\n plt.figure(1)\r\n plt.plot([0.0, 1.0], [0.0, 0.0], \"k:\", label=\"Perfect\")\r\n for model_i_step, model_i in enumerate(model_is):\r\n for run in range(num_runs_per_M):\r\n if (model_i_step == 0) and (run == 0):\r\n plt.plot(alphas, np.flip(neg_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5, label=\"M = %d\" % M)\r\n else:\r\n plt.plot(alphas, np.flip(neg_coverage_error_values[model_i][M][run], 0), color=colors[M], alpha=0.5)\r\n plt.legend()\r\n plt.ylabel(\"Empirical coverage negative error\")\r\n plt.xlabel(\"p\")\r\n plt.ylim(neg_coverage_error_ylim)\r\n plt.title(\"Prediction intervals - Empirical coverage negative error\")\r\n plt.savefig(\"%s/empirical_coverage_negative_error_M%d.png\" % (snapshot_dir, M))\r\n plt.close(1)\r\n\r\nwith open(\"%s/auc_abs_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(auc_abs_error_values, file)\r\n\r\nwith open(\"%s/auc_neg_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(auc_neg_error_values, file)\r\n\r\nwith open(\"%s/auc_length_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(auc_length_values, file)\r\n\r\nwith open(\"%s/loss_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(loss_values, file)\r\n\r\nwith open(\"%s/rmse_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(rmse_values, file)\r\n\r\nwith open(\"%s/coverage_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(coverage_values, file)\r\n\r\nwith open(\"%s/avg_length_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(avg_length_values, file)\r\n\r\nwith open(\"%s/coverage_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(coverage_error_values, file)\r\n\r\nwith open(\"%s/abs_coverage_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(abs_coverage_error_values, file)\r\n\r\nwith open(\"%s/neg_coverage_error_values.pkl\" % snapshot_dir, \"wb\") as file:\r\n pickle.dump(neg_coverage_error_values, file)\r\n",
"# code-checked\n# server-checked\n\nfrom datasets import ToyDatasetEval # (this needs to be imported before torch, because cv2 needs to be imported before torch for some reason)\nfrom model import ToyNet\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport cv2\n\nbatch_size = 32\n\nM_values = [1, 4, 16, 64, 256]\nfor M in M_values:\n for iter in range(6):\n\n network = ToyNet(\"eval_MC-Dropout-MAP-02-Adam_1_M10\", project_dir=\"/root/evaluating_bdl/toyRegression\").cuda()\n network.load_state_dict(torch.load(\"/root/evaluating_bdl/toyRegression/training_logs/model_MC-Dropout-MAP-02-Adam_1_M10_%d/checkpoints/model_MC-Dropout-MAP-02-Adam_1_M10_epoch_300.pth\" % iter))\n\n M_float = float(M)\n print (M_float)\n\n val_dataset = ToyDatasetEval()\n\n num_val_batches = int(len(val_dataset)/batch_size)\n print (\"num_val_batches:\", num_val_batches)\n\n val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)\n\n network.eval()\n\n x_values = []\n final_mean_values = []\n final_sigma_tot_values = []\n final_sigma_epi_values = []\n final_sigma_alea_values = []\n for step, (x) in enumerate(val_loader):\n x = Variable(x).cuda().unsqueeze(1) # (shape: (batch_size, 1))\n\n means = []\n vars = []\n for i in range(M):\n outputs = network(x)\n mean = outputs[0] # (shape: (batch_size, ))\n var = outputs[1] # (shape: (batch_size, )) (log(sigma^2))\n\n means.append(mean)\n vars.append(var)\n\n for i in range(x.size(0)):\n x_value = x[i].data.cpu().numpy()[0]\n\n mean_values = []\n for mean in means:\n mean_value = mean[i].data.cpu().numpy()[0]\n mean_values.append(mean_value)\n\n sigma_alea_values = []\n for var in vars:\n sigma_alea_value = torch.exp(var[i]).data.cpu().numpy()[0]\n sigma_alea_values.append(sigma_alea_value)\n\n mean_value = 0.0\n for value in mean_values:\n mean_value += value/M_float\n\n sigma_epi_value = 0.0\n for value in mean_values:\n sigma_epi_value += ((mean_value - value)**2)/M_float\n\n sigma_alea_value = 0.0\n for value in sigma_alea_values:\n sigma_alea_value += value/M_float\n\n sigma_tot_value = sigma_epi_value + sigma_alea_value\n\n x_values.append(x_value)\n final_mean_values.append(mean_value)\n final_sigma_epi_values.append(sigma_epi_value)\n final_sigma_alea_values.append(sigma_alea_value)\n final_sigma_tot_values.append(sigma_tot_value)\n\n max_sigma_alea_value = -1000\n for i in range(len(x_values)):\n if (x_values[i] < 3) and (x_values[i] > -3):\n if final_sigma_alea_values[i] > max_sigma_alea_value:\n max_sigma_alea_value = final_sigma_alea_values[i]\n\n print (max_sigma_alea_value)\n\n for i in range(len(x_values)):\n if final_sigma_alea_values[i] > max_sigma_alea_value:\n final_sigma_alea_values[i] = max_sigma_alea_value\n\n final_sigma_tot_values[i] = final_sigma_alea_values[i] + final_sigma_epi_values[i]\n\n plt.figure(1)\n plt.plot(x_values, final_mean_values, \"r\")\n plt.fill_between(x_values, np.array(final_mean_values) - 2*np.sqrt(np.array(final_sigma_tot_values)), np.array(final_mean_values) + 2*np.sqrt(np.array(final_sigma_tot_values)), color=\"C3\", alpha=0.25)\n plt.plot(x_values, np.sin(np.array(x_values)), \"k\")\n plt.axvline(x=-3.0, linestyle=\"--\", color=\"0.5\")\n plt.axvline(x=3.0, linestyle=\"--\", color=\"0.5\")\n plt.fill_between(x_values, np.sin(np.array(x_values)) - 2*0.15*(1.0/(1 + np.exp(-np.array(x_values)))), np.sin(np.array(x_values)) + 2*0.15*(1.0/(1 + np.exp(-np.array(x_values)))), color=\"0.5\", alpha=0.25)\n plt.xlim([-6, 6])\n plt.ylim([-4.25, 4.25])\n plt.tight_layout(pad=0.1, w_pad=0.1, h_pad=0.1)\n plt.savefig(\"%s/predictive_density_M=%d_%d.png\" % (network.model_dir, M, iter+1))\n plt.close(1)\n",
"# code-checked\n# server-checked\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torchvision.models as models\n\nimport os\n\nclass ToyNet(nn.Module):\n def __init__(self, model_id, project_dir):\n super(ToyNet, self).__init__()\n\n self.model_id = model_id\n self.project_dir = project_dir\n self.create_model_dirs()\n\n input_dim = 2\n hidden_dim = 10\n num_classes = 2\n\n self.fc1 = nn.Linear(input_dim, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, hidden_dim)\n self.fc3 = nn.Linear(hidden_dim, num_classes)\n\n def forward(self, x):\n # (x has shape (batch_size, input_dim))\n\n out = F.relu(self.fc1(x)) # (shape: (batch_size, hidden_dim))\n out = F.dropout(out, p=0.2, training=True)\n out = F.relu(self.fc2(out)) # (shape: (batch_size, hidden_dim))\n out = self.fc3(out) # (shape: batch_size, num_classes))\n\n return out\n\n def create_model_dirs(self):\n self.logs_dir = self.project_dir + \"/training_logs\"\n self.model_dir = self.logs_dir + \"/model_%s\" % self.model_id\n self.checkpoints_dir = self.model_dir + \"/checkpoints\"\n if not os.path.exists(self.logs_dir):\n os.makedirs(self.logs_dir)\n if not os.path.exists(self.model_dir):\n os.makedirs(self.model_dir)\n os.makedirs(self.checkpoints_dir)\n",
"# code-checked\n# server-checked\n\nimport torch\nimport torch.utils.data\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nimport pickle\n\nclass ToyDataset(torch.utils.data.Dataset):\n def __init__(self):\n self.examples = []\n\n with open(\"/root/evaluating_bdl/toyRegression/x.pkl\", \"rb\") as file: # (needed for python3)\n x = pickle.load(file)\n\n with open(\"/root/evaluating_bdl/toyRegression/y.pkl\", \"rb\") as file: # (needed for python3)\n y = pickle.load(file)\n\n plt.figure(1)\n plt.plot(x, y, \"k.\")\n plt.ylabel(\"y\")\n plt.xlabel(\"x\")\n plt.savefig(\"/root/evaluating_bdl/toyRegression/MC-Dropout-MAP-02-SGD/training_data.png\")\n plt.close(1)\n\n for i in range(x.shape[0]):\n example = {}\n example[\"x\"] = x[i]\n example[\"y\"] = y[i]\n self.examples.append(example)\n\n self.num_examples = len(self.examples)\n\n def __getitem__(self, index):\n example = self.examples[index]\n\n x = example[\"x\"]\n y = example[\"y\"]\n\n return (x, y)\n\n def __len__(self):\n return self.num_examples\n\nclass ToyDatasetEval(torch.utils.data.Dataset):\n def __init__(self):\n self.examples = []\n\n x = np.linspace(-7, 7, 1000, dtype=np.float32)\n\n for i in range(x.shape[0]):\n example = {}\n example[\"x\"] = x[i]\n self.examples.append(example)\n\n self.num_examples = len(self.examples)\n\n def __getitem__(self, index):\n example = self.examples[index]\n\n x = example[\"x\"]\n\n return (x)\n\n def __len__(self):\n return self.num_examples\n\n# _ = ToyDataset()\n",
"# code-checked\n# server-checked\n\nfrom model import ToyNet\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cv2\n\nL = 64\nnum_epochs = L*150\n\nnum_epochs_low = int(0.75*num_epochs)\nprint (num_epochs_low)\n\nx_min = -6.0\nx_max = 6.0\nnum_points = 60\n\nepsilon = 1.0e-30\n\nwith open(\"/root/evaluating_bdl/toyClassification/HMC/false_prob_values.pkl\", \"rb\") as file: # (needed for python3)\n false_prob_values_HMC = pickle.load(file) # (shape: (60, 60))\nprint (false_prob_values_HMC.shape)\nprint (np.max(false_prob_values_HMC))\nprint (np.min(false_prob_values_HMC))\n\np_HMC = false_prob_values_HMC/np.sum(false_prob_values_HMC)\n\nx_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)\n\nx_1_train_lower = 0 # (0)\nx_1_train_upper = 0 # (3)\nx_2_train_lower = 0 # (-3)\nx_2_train_upper = 0 # (3)\nfor index, value in enumerate(x_values):\n if value < 0:\n x_1_train_lower = index+1\n\n if value < 3:\n x_1_train_upper = index\n x_2_train_upper = index\n\n if value < -3:\n x_2_train_lower = index+1\n\nprint (x_1_train_lower)\nprint (x_values[x_1_train_lower])\nprint (x_1_train_upper)\nprint (x_values[x_1_train_upper])\nprint (x_2_train_lower)\nprint (x_values[x_2_train_lower])\nprint (x_2_train_upper)\nprint (x_values[x_2_train_upper])\n\np_HMC_train = p_HMC[x_2_train_lower:x_2_train_upper, x_1_train_lower:x_1_train_upper] # (shape: (29, 14))\np_HMC_train = p_HMC_train/np.sum(p_HMC_train)\n\nM_values = [2, 4, 8, 16, 32, 64, 128, 256, 512]\nfor M in M_values:\n print (M)\n\n step_size = float(num_epochs - num_epochs_low)/float(M-1)\n print (step_size)\n\n if (step_size < 1):\n break\n\n KL_p_HMC_q_total_values = []\n KL_p_HMC_q_train_values = []\n for j in range(10):\n networks = []\n for i in range(M):\n #print (int(num_epochs - i*step_size))\n\n network = ToyNet(\"eval_SGHMC-64_1-10\", project_dir=\"/root/evaluating_bdl/toyClassification\").cuda()\n network.load_state_dict(torch.load(\"/root/evaluating_bdl/toyClassification/training_logs/model_SGHMC-64_%d/checkpoints/model_SGHMC-64_%d_epoch_%d.pth\" % (j+1, j+1, int(num_epochs - i*step_size))))\n networks.append(network)\n\n M_float = float(len(networks))\n print (M_float)\n\n for network in networks:\n network.eval()\n\n false_prob_values = np.zeros((num_points, num_points))\n for x_1_i, x_1_value in enumerate(x_values):\n for x_2_i, x_2_value in enumerate(x_values):\n x = torch.from_numpy(np.array([x_1_value, x_2_value])).unsqueeze(0).cuda() # (shape: (1, 2))\n\n mean_prob_vector = np.zeros((2, ))\n for network in networks:\n logits = network(x) # (shape: (1, num_classes)) (num_classes==2)\n prob_vector = F.softmax(logits, dim=1) # (shape: (1, num_classes))\n\n prob_vector = prob_vector.data.cpu().numpy()[0] # (shape: (2, ))\n\n mean_prob_vector += prob_vector/M_float\n\n false_prob_values[x_2_i, x_1_i] = mean_prob_vector[0]\n\n # print (false_prob_values.shape)\n # print (np.max(false_prob_values))\n # print (np.min(false_prob_values))\n\n q = false_prob_values/np.sum(false_prob_values)\n\n KL_p_HMC_q_total = np.sum(p_HMC*np.log(p_HMC/(q + epsilon) + epsilon))\n KL_p_HMC_q_total_values.append(KL_p_HMC_q_total)\n #print (\"KL_p_HMC_q_total: %g\" % KL_p_HMC_q_total)\n\n q_train = q[x_2_train_lower:x_2_train_upper, x_1_train_lower:x_1_train_upper]\n q_train = q_train/np.sum(q_train)\n\n KL_p_HMC_q_train = np.sum(p_HMC_train*np.log(p_HMC_train/(q_train + epsilon) + epsilon))\n KL_p_HMC_q_train_values.append(KL_p_HMC_q_train)\n #print (\"KL_p_HMC_q_train: %g\" % KL_p_HMC_q_train)\n\n print (\"mean_total: %g\" % np.mean(np.array(KL_p_HMC_q_total_values)))\n print (\"std_total: %g\" % np.std(np.array(KL_p_HMC_q_total_values)))\n print (\"max_total: %g\" % np.max(np.array(KL_p_HMC_q_total_values)))\n print (\"min_total: %g\" % np.min(np.array(KL_p_HMC_q_total_values)))\n print (\"###\")\n\n print (\"mean_train: %g\" % np.mean(np.array(KL_p_HMC_q_train_values)))\n print (\"std_train: %g\" % np.std(np.array(KL_p_HMC_q_train_values)))\n print (\"max_train: %g\" % np.max(np.array(KL_p_HMC_q_train_values)))\n print (\"min_train: %g\" % np.min(np.array(KL_p_HMC_q_train_values)))\n\n print (M)\n\n print (\"########################\")\n",
"# code-checked\n# server-checked\n\nfrom datasets import ToyDataset # (this needs to be imported before torch, because cv2 needs to be imported before torch for some reason)\nfrom model import ToyNet\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport cv2\n\n# NOTE! change this to not overwrite all log data when you train the model:\nmodel_id = \"Ensemble-MAP-SGD_1_M1024\"\n\nnum_epochs = 150\nbatch_size = 32\nlearning_rate = 0.01\n\npower = 0.9\n\ndef lr_poly(base_lr, iter, max_iter, power):\n return base_lr*((1-float(iter)/max_iter)**(power))\n\ndef adjust_learning_rate(optimizer, i_iter):\n lr = lr_poly(learning_rate, i_iter, num_steps, power)\n optimizer.param_groups[0]['lr'] = lr\n return lr\n\ntrain_dataset = ToyDataset()\nN = float(len(train_dataset))\nprint (N)\n\nalpha = 1.0\n\nnum_train_batches = int(len(train_dataset)/batch_size)\nprint (\"num_train_batches:\", num_train_batches)\n\nnum_steps = num_epochs*num_train_batches\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n\nM = 1024\nfor i in range(M):\n network = ToyNet(model_id + \"_%d\" % i, project_dir=\"/root/evaluating_bdl/toyRegression\").cuda()\n\n optimizer = torch.optim.SGD(network.parameters(), lr=learning_rate)\n\n epoch_losses_train = []\n for epoch in range(num_epochs):\n print (\"###########################\")\n print (\"######## NEW EPOCH ########\")\n print (\"###########################\")\n print (\"epoch: %d/%d\" % (epoch+1, num_epochs))\n print (\"network: %d/%d\" % (i+1, M))\n\n network.train() # (set in training mode, this affects BatchNorm and dropout)\n batch_losses = []\n for step, (x, y) in enumerate(train_loader):\n x = Variable(x).cuda().unsqueeze(1) # (shape: (batch_size, 1))\n y = Variable(y).cuda().unsqueeze(1) # (shape: (batch_size, 1))\n\n outputs = network(x)\n mean = outputs[0] # (shape: (batch_size, ))\n log_var = outputs[1] # (shape: (batch_size, )) (log(sigma^2))\n\n ####################################################################\n # compute the loss:\n ####################################################################\n loss_likelihood = torch.mean(torch.exp(-log_var)*torch.pow(y - mean, 2) + log_var)\n\n loss_prior = 0.0\n for param in network.parameters():\n if param.requires_grad:\n loss_prior += (1.0/N)*(1.0/alpha)*torch.sum(torch.pow(param, 2))\n\n loss = loss_likelihood + loss_prior\n\n loss_value = loss.data.cpu().numpy()\n batch_losses.append(loss_value)\n\n ########################################################################\n # optimization step:\n ########################################################################\n lr = adjust_learning_rate(optimizer, epoch*num_train_batches + step)\n\n optimizer.zero_grad() # (reset gradients)\n loss.backward() # (compute gradients)\n optimizer.step() # (perform optimization step)\n\n epoch_loss = np.mean(batch_losses)\n epoch_losses_train.append(epoch_loss)\n with open(\"%s/epoch_losses_train.pkl\" % network.model_dir, \"wb\") as file:\n pickle.dump(epoch_losses_train, file)\n print (\"train loss: %g\" % epoch_loss)\n plt.figure(1)\n plt.plot(epoch_losses_train, \"k^\")\n plt.plot(epoch_losses_train, \"k\")\n plt.ylabel(\"loss\")\n plt.xlabel(\"epoch\")\n plt.title(\"train loss per epoch\")\n plt.savefig(\"%s/epoch_losses_train.png\" % network.model_dir)\n plt.close(1)\n\n print('learning_rate: {}'.format(lr))\n\n # save the model weights to disk:\n checkpoint_path = network.checkpoints_dir + \"/model_\" + model_id +\"_epoch_\" + str(epoch+1) + \".pth\"\n torch.save(network.state_dict(), checkpoint_path)\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"torch.load",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.plot",
"numpy.mean",
"torch.no_grad",
"numpy.trapz",
"torch.pow",
"torch.sqrt",
"numpy.arange",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"torch.unsqueeze",
"torch.exp",
"torch.log",
"numpy.array",
"numpy.flip",
"numpy.logical_and",
"matplotlib.pyplot.ylabel",
"numpy.abs",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"torch.nn.DataParallel"
],
[
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.tight_layout",
"torch.load",
"matplotlib.use",
"matplotlib.pyplot.ylim",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"torch.autograd.Variable",
"matplotlib.pyplot.xlim",
"torch.exp",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure"
],
[
"torch.nn.Linear",
"torch.nn.functional.dropout"
],
[
"numpy.linspace",
"matplotlib.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"numpy.log",
"torch.nn.functional.softmax",
"numpy.linspace",
"numpy.min",
"matplotlib.use",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"matplotlib.pyplot.title",
"matplotlib.use",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"torch.exp",
"matplotlib.pyplot.ylabel",
"torch.autograd.Variable",
"numpy.mean",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"torch.pow",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Classic-Daniel/gradient-video-delay | [
"a03e11a6b14e9e89198cf46e2b435cc1e9035d63"
] | [
"main.py"
] | [
"import numpy as np\nimport cv2\n\nBUFFER_SIZE = 30\n\nclass imageGenerator:\n def __init__(self, img):\n self.imgBuffer = [img] * BUFFER_SIZE\n self.currentIndex = 0\n print(f\"Image type: {type(img)}\")\n print(f\"buffer shape: {self.imgBuffer[0].shape}\")\n\n def addNewImage(self, img):\n self.imgBuffer[self.currentIndex] = img\n self.currentIndex = (self.currentIndex + 1) % BUFFER_SIZE\n\n def getProcessedImage(self):\n generatedImg = np.copy(self.imgBuffer[self.currentIndex])\n height = self.imgBuffer[self.currentIndex].shape[1]\n heightStep = round(height / BUFFER_SIZE)\n\n for i in range(1, BUFFER_SIZE):\n generatedImg[:, heightStep * i : heightStep * (i + 1)] = self.imgBuffer[(self.currentIndex + i) % BUFFER_SIZE][:, heightStep * i : heightStep * (i + 1)]\n\n return generatedImg\n\ndef initCameraStream(): \n cap = cv2.VideoCapture(cv2.CAP_V4L2)\n generator = None\n # The device number might be 0 or 1 depending on the device and the webcam\n # cap.open(0, cv2.CAP_DSHOW)\n while(True):\n ret, frame = cap.read()\n if(ret and frame.shape[0] > 0):\n if generator == None:\n generator = imageGenerator(frame)\n\n generator.addNewImage(frame)\n cv2.imshow('frame', generator.getProcessedImage())\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n\ndef main():\n initCameraStream()\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.copy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mhilmiasyrofi/Once-for-All-Adversarial-Training | [
"c92bc88bdcf8bd531ca02017a4d2d1410899519c"
] | [
"models/svhn/wide_resnet.py"
] | [
"\"\"\"PyTorch implementation of Wide-ResNet taken from \nhttps://github.com/jeromerony/fast_adversarial/blob/master/fast_adv/models/cifar10/wide_resnet.py\"\"\"\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, out_planes, stride, dropRate=0.0):\n super(BasicBlock, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.droprate = dropRate\n self.equalInOut = (in_planes == out_planes)\n self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False) or None\n\n def forward(self, x):\n if not self.equalInOut:\n x = self.relu1(self.bn1(x))\n else:\n out = self.relu1(self.bn1(x))\n out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))\n if self.droprate > 0:\n out = F.dropout(out, p=self.droprate, training=self.training)\n out = self.conv2(out)\n return torch.add(x if self.equalInOut else self.convShortcut(x), out)\n\n\nclass NetworkBlock(nn.Module):\n def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):\n super(NetworkBlock, self).__init__()\n self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)\n\n def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):\n layers = []\n for i in range(nb_layers):\n layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layer(x)\n\n\nclass WideResNet(nn.Module):\n def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):\n super(WideResNet, self).__init__()\n nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]\n assert (depth - 4) % 6 == 0, 'depth should be 6n+4'\n n = (depth - 4) // 6\n block = BasicBlock\n # 1st conv before any network block\n self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,\n padding=1, bias=False)\n # 1st block\n self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)\n # 2nd block\n self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)\n # 3rd block\n self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)\n # global average pooling and classifier\n self.bn1 = nn.BatchNorm2d(nChannels[3])\n self.relu = nn.ReLU(inplace=True)\n self.fc = nn.Linear(nChannels[3], num_classes)\n self.nChannels = nChannels[3]\n \n\n def forward(self, x):\n out = self.conv1(x)\n out = self.block1(out)\n out = self.block2(out)\n out = self.block3(out)\n out = self.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8)\n out = out.view(-1, self.nChannels)\n return self.fc(out)\n\ndef WRN16_8():\n return WideResNet(depth=16, num_classes=10, widen_factor=8, dropRate=0.3)"
] | [
[
"torch.nn.Sequential",
"torch.nn.functional.dropout",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ducnx/TPC-LoS-prediction | [
"49713f5bd7e77c2adb1ff950c885d087a398a1ad"
] | [
"models/hyperparameter_scripts/eICU/tpc_stage2.py"
] | [
"from eICU_preprocessing.split_train_test import create_folder\nfrom models.run_tpc import TPC\nimport numpy as np\nimport random\nfrom models.final_experiment_scripts.best_hyperparameters import best_global\nfrom models.initialise_arguments import initialise_tpc_arguments\n\n\ndef get_hyperparam_config(dataset):\n\n c = initialise_tpc_arguments()\n c['mode'] = 'train'\n c['exp_name'] = 'TPC'\n if dataset == 'MIMIC':\n c['no_diag'] = True\n c['dataset'] = dataset\n c['model_type'] = 'tpc'\n c = best_global(c)\n\n # hyper-parameter grid\n param_grid = {\n 'n_layers': [5, 6, 7, 8, 9, 10, 11, 12], # most promising range is 5-12 layers\n 'temp_kernels': list(int(x) for x in np.logspace(np.log2(4), np.log2(16), base=2, num=16)),\n 'point_sizes': list(int(x) for x in np.logspace(np.log2(4), np.log2(16), base=2, num=16)),\n 'learning_rate': list(np.logspace(np.log10(0.001), np.log10(0.01), base=10, num=100)),\n 'batch_size': list(int(x) for x in np.logspace(np.log2(4), np.log2(512), base=2, num=8)), # might have to search between 4 and 32 to avoid memory issues\n 'temp_dropout_rate': [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5],\n 'kernel_size': {#1: list(range(4, 25)),\n #2: [5, 6, 7, 8, 9, 10],\n #3: [3, 4, 5, 6, 7],\n #4: [2, 3, 4, 5, 6],\n 5: [2, 3, 4, 5],\n 6: [2, 3, 4, 5],\n 7: [2, 3, 4, 5],\n 8: [2, 3, 4, 5],\n 9: [3, 4],\n 10: [3, 4],\n 11: [3, 4],\n 12: [3, 4]}\n }\n\n c['n_layers'] = random.choice(param_grid['n_layers'])\n c['kernel_size'] = random.choice(param_grid['kernel_size'][c['n_layers']])\n c['temp_kernels'] = [random.choice(param_grid['temp_kernels'])] * c['n_layers']\n c['point_sizes'] = [random.choice(param_grid['point_sizes'])] * c['n_layers']\n c['learning_rate'] = round(random.choice(param_grid['learning_rate']), 5)\n c['batch_size'] = random.choice(param_grid['batch_size'])\n c['temp_dropout_rate'] = random.choice(param_grid['temp_dropout_rate'])\n\n return c\n\n\nif __name__=='__main__':\n\n\n for i in range(50):\n try:\n c = get_hyperparam_config('eICU')\n log_folder_path = create_folder('models/experiments/hyperparameters/eICU', c.exp_name)\n tpc = TPC(config=c,\n n_epochs=c.n_epochs,\n name=c.exp_name,\n base_dir=log_folder_path,\n explogger_kwargs={'folder_format': '%Y-%m-%d_%H%M%S{run_number}'})\n tpc.run()\n\n except RuntimeError:\n continue"
] | [
[
"numpy.log2",
"numpy.log10"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mchatton/pyroomacoustics | [
"913b45a311634283fe28dc5d133b27b8b610627b"
] | [
"pyroomacoustics/tests/test_build_rir.py"
] | [
"from __future__ import division, print_function\nimport pyroomacoustics as pra\nimport numpy as np\n\ntry:\n from pyroomacoustics import build_rir\n build_rir_available = True\nexcept:\n print('build_rir not available')\n build_rir_available = False\n\n# tolerance for test success (1%)\ntol = 0.01\n\nfdl = 81\nfs = 16000\n\nt0 = (2 * fdl + 0.1) / fs\nt1 = (3 * fdl - 0.1) / fs\nt2 = (4 * fdl + 0.45) / fs\nt3 = (5 * fdl + 0.001) / fs\nt4 = (6 * fdl + 0.999) / fs\n\ntimes = np.array(\n [\n [ t0 , t1 + (1 / 40 / 16000), t2, ],\n [ t0, t1 + (10 / fs), 3 * t3, ],\n [ t0, t3, t4, ],\n ],\n )\nalphas = np.array(\n [\n [ 1., 0.5, -0.1 ],\n [ 0.5, 0.3, 0.1 ],\n [ 0.3, 2., 0.1 ],\n ],\n )\nvisibilities = np.array(\n [\n [ 1, 1, 1,],\n [ 1, 1, 1,],\n [ 0, 1, 1,],\n ],\n dtype=np.int32,\n )\n\ntime_short = np.array([0.])\ntime_long = np.array([0.1])\nalpha_dummy = np.array([1.])\nvisibility_dummy = np.array([1], dtype=np.int32)\n\n\ndef build_rir_wrap(time, alpha, visibility, fs, fdl):\n\n # fractional delay length\n fdl = pra.constants.get('frac_delay_length')\n fdl2 = (fdl-1) // 2\n\n # the number of samples needed\n N = int(np.ceil(time.max() * fs) + fdl)\n\n ir_ref = np.zeros(N)\n ir_cython = np.zeros(N)\n\n # Try to use the Cython extension\n build_rir.fast_rir_builder(ir_cython, time, alpha, visibility, fs, fdl)\n\n # fallback to pure Python implemenation\n for i in range(time.shape[0]):\n if visibility[i] == 1:\n time_ip = int(np.round(fs * time[i]))\n time_fp = (fs * time[i]) - time_ip\n ir_ref[time_ip-fdl2:time_ip+fdl2+1] += alpha[i] * pra.fractional_delay(time_fp)\n\n return ir_ref, ir_cython\n\ndef test_build_rir():\n\n if not build_rir_available:\n return\n\n for t, a, v in zip(times, alphas, visibilities):\n ir_ref, ir_cython = build_rir_wrap(times[0], alphas[0], visibilities[0], fs, fdl)\n assert np.max(np.abs(ir_ref - ir_cython)) < tol\n\ndef test_short():\n ''' Tests that an error is raised if a provided time goes below the zero index '''\n\n if not build_rir_available:\n return\n\n N = 100\n fs = 16000\n fdl = 81\n rir = np.zeros(N)\n\n time = np.array([0.])\n alpha = np.array([1.])\n visibility = np.array([1], dtype=np.int32)\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha, visibility, fs, fdl)\n assert False\n except AssertionError:\n print('Ok, short times are caught')\n\n\n\ndef test_long():\n ''' Tests that an error is raised if a time falls outside the rir array '''\n\n if not build_rir_available:\n return\n\n N = 100\n fs = 16000\n fdl = 81\n rir = np.zeros(N)\n\n time = np.array([(N-1) / fs])\n alpha = np.array([1.])\n visibility = np.array([1], dtype=np.int32)\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha, visibility, fs, fdl)\n assert False\n except AssertionError:\n print('Ok, long times are caught')\n\ndef test_errors():\n ''' Tests that errors are raised when array lengths differ '''\n\n if not build_rir_available:\n return\n\n N = 300\n fs = 16000\n fdl = 81\n rir = np.zeros(N)\n\n time = np.array([100 / fs, 200 / fs])\n alpha = np.array([1., 1.])\n visibility = np.array([1, 1], dtype=np.int32)\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha[:1], visibility, fs, fdl)\n assert False\n except:\n print('Ok, alpha error occured')\n pass\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha, visibility[:1], fs, fdl)\n assert False\n except:\n print('Ok, visibility error occured')\n pass\n\n try:\n build_rir.fast_rir_builder(rir, time, alpha, visibility, fs, 80)\n assert False\n except:\n print('Ok, fdl error occured')\n pass\n\n\nif __name__ == '__main__':\n\n import matplotlib.pyplot as plt\n\n for t, a, v in zip(times, alphas, visibilities):\n ir_ref, ir_cython = build_rir_wrap(times[0], alphas[0], visibilities[0], fs, fdl)\n\n print('Error:', np.max(np.abs(ir_ref - ir_cython)))\n\n plt.figure()\n plt.plot(ir_ref, label='ref')\n plt.plot(ir_cython, label='cython')\n plt.legend()\n\n test_short()\n test_long()\n test_errors()\n\n plt.show()\n\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.abs",
"matplotlib.pyplot.plot",
"numpy.round",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sarikayamehmet/Framework-for-Actor-Critic-deep-reinforcement-learning-algorithms | [
"a2902f903956427074769b71b41ddc81e10276c3"
] | [
"A3C/environment/car_controller_environment.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport matplotlib\nmatplotlib.use('Agg',force=True) # no display\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom matplotlib.patches import Circle\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.lines import Line2D\n\nimport numpy as np\nfrom scipy import optimize\nfrom collections import deque\nfrom environment.environment import Environment\n\nimport options\nflags = options.get() # get command line args\n\t\nclass CarControllerEnvironment(Environment):\n\n\tdef get_state_shape(self):\n\t\t# There are 2 types of objects (obstacles and lanes), each object has 3 numbers (x, y and size)\n\t\tif self.max_obstacle_count > 0:\n\t\t\treturn (2,max(self.control_points_per_step,self.max_obstacle_count),3)\n\t\treturn (1,self.control_points_per_step,2) # no need for size because there are only lanes\n\n\tdef get_action_shape(self):\n\t\treturn (2,) # steering angle, continuous control without softmax\n\t\n\tdef __init__(self, thread_index):\n\t\tEnvironment.__init__(self)\n\t\tself.thread_index = thread_index\n\t\tself.max_step = 100\n\t\tself.control_points_per_step = 5\n\t\tself.mean_seconds_per_step = 0.1 # in average, a step every n seconds\n\t\tself.horizon_distance = 1 # meters\n\t\tself.max_distance_to_path = 0.1 # meters\n\t\t# obstacles related stuff\n\t\tself.max_obstacle_count = 3\n\t\tself.min_obstacle_radius = 0.15 # meters\n\t\tself.max_obstacle_radius = 0.45 # meters\n\t\t# information about speed parameters: http://www.ijtte.com/uploads/2012-10-01/5ebd8343-9b9c-b1d4IJTTE%20vol2%20no3%20%287%29.pdf\n\t\tself.min_speed = 0.1 # m/s\n\t\tself.max_speed = 1.4 # m/s\n\t\tself.speed_lower_limit = 0.7 # m/s # used together with max_speed to get the random speed upper limit\n\t\tself.max_speed_noise = 0.25 # m/s\n\t\t# the fastest car has max_acceleration 9.25 m/s (https://en.wikipedia.org/wiki/List_of_fastest_production_cars_by_acceleration)\n\t\t# the slowest car has max_acceleration 0.7 m/s (http://automdb.com/max_acceleration)\n\t\tself.max_acceleration = 0.7 # m/s\n\t\tself.max_steering_degree = 30\n\t\tself.max_steering_noise_degree = 2\n\t\tself.max_steering_angle = convert_degree_to_radiant(self.max_steering_degree)\n\t\tself.max_steering_noise_angle = convert_degree_to_radiant(self.max_steering_noise_degree)\n\t\t# splines related stuff\n\t\tself.spline_number = 2\n\t\tself.control_points_per_spline = 50\n\t\t# evaluator stuff\n\t\tself.episodes = deque()\n\t\t# shapes\n\t\tself.state_shape = self.get_state_shape()\n\t\tself.action_shape = self.get_action_shape()\n\t\n\tdef reset(self):\n\t\tself.step = 0\n\t\tself.seconds_per_step = self.get_step_seconds()\n\t\tself.path = self.build_random_path()\n\t\t# car position\n\t\tself.car_point = (0,0) # car point and orientation are always expressed with respect to the initial point and orientation of the road fragment\n\t\tself.car_progress, self.car_goal = self.get_position_and_goal(point=self.car_point)\n\t\tself.car_angle = self.get_angle_from_position(self.car_progress)\n\t\t# speed limit\n\t\tself.speed_upper_limit = self.speed_lower_limit + (self.max_speed-self.speed_lower_limit)*np.random.random() # in [speed_lower_limit,max_speed]\n\t\t# steering angle & speed\n\t\tself.speed = self.min_speed + (self.max_speed-self.min_speed)*np.random.random() # in [min_speed,max_speed]\n\t\tself.steering_angle = 0\n\t\t# get obstacles\n\t\tself.obstacles = self.get_new_obstacles()\n\t\t# init concat variables\n\t\tself.last_reward = 0\n\t\tself.last_state = self.get_state(car_point=self.car_point, car_angle=self.car_angle, car_progress=self.car_progress, car_goal=self.car_goal, obstacles=self.obstacles)\n\t\t# init log variables\n\t\tself.cumulative_reward = 0\n\t\tself.avg_speed_per_steps = 0\n\t\t\t\n\tdef get_new_obstacles(self):\n\t\tif self.max_obstacle_count <= 0:\n\t\t\treturn []\n\t\tobstacles = []\n\t\tpresence_mask = np.random.randint(2, size=self.max_obstacle_count)\n\t\tfor i in range(self.max_obstacle_count):\n\t\t\tif presence_mask[i] == 1: # obstacle is present\n\t\t\t\tpoint = self.get_point_from_position(self.spline_number*np.random.random())\n\t\t\t\tradius = self.min_obstacle_radius + (self.max_obstacle_radius-self.min_obstacle_radius)*np.random.random() # in [min_obstacle_radius,max_obstacle_radius]\n\t\t\t\tobstacles.append((point,radius))\n\t\treturn obstacles\n\t\t\n\tdef get_closest_obstacle(self, point, obstacles):\n\t\tif len(obstacles) == 0:\n\t\t\treturn None\n\t\tobstacle_distances_from_point = map(lambda obstacle: (obstacle, euclidean_distance(obstacle[0], point)-obstacle[1]), obstacles)\n\t\treturn min(obstacle_distances_from_point, key=lambda tup: tup[1])[0]\n\t\t\n\tdef get_point_from_position(self, position):\n\t\tspline = int(np.ceil(position)-1)\n\t\tif spline <= 0: # first spline \n\t\t\treturn (poly(position,self.U[0]), poly(position,self.V[0]))\n\t\t# second spline\n\t\treturn rotate_and_shift(poly(position-spline,self.U[spline]), poly(position-spline,self.V[spline]), self.middle_point[spline-1][0], self.middle_point[spline-1][1], self.theta[spline-1])\n\t\t\n\tdef get_angle_from_position(self, position):\n\t\tspline = int(np.ceil(position)-1)\n\t\tif spline <= 0: # first spline \n\t\t\treturn angle(position, self.U[0], self.V[0])\n\t\t# second spline\n\t\treturn angle(position-spline, self.U[spline], self.V[spline])+self.theta[spline-1]\n\t\t\n\tdef build_random_path(self):\n\t\t# setup environment\n\t\tself.U = []\n\t\tself.V = []\n\t\tself.theta = []\n\t\tself.middle_point = []\n\t\tfor i in range(self.spline_number):\n\t\t\tU, V = generate_random_polynomial()\n\t\t\tself.U.append(U)\n\t\t\tself.V.append(V)\n\t\t\tself.theta.append(angle(1, U, V))\n\t\t\tself.middle_point.append(self.get_point_from_position(i+1))\n\t\t# we generate all points for both polynomials, then we shall draw only a portion of them\n\t\tself.positions = np.linspace(start=0, stop=self.spline_number, num=self.spline_number*self.control_points_per_spline) # first spline is in [0,1] while the second one is in [1,2]\n\t\txy = [self.get_point_from_position(pos) for pos in self.positions]\n\t\treturn list(zip(*xy))\n\n\tdef is_terminal_position(self, position):\n\t\treturn position >= self.spline_number*0.9\n\n\tdef get_position_and_goal(self, point):\n\t\t# Find the closest spline point\n\t\tcar_closest_position = optimize.minimize_scalar(lambda pos: euclidean_distance(point, self.get_point_from_position(pos)), method='bounded', bounds=(0,self.spline_number))\n\t\tcar_position = car_closest_position.x\n\t\t# Find closest control point on horizon\n\t\tclosest_goal = optimize.minimize_scalar(lambda pos: np.absolute(euclidean_distance(point, self.get_point_from_position(pos))-self.horizon_distance), method='bounded', bounds=(car_position,self.spline_number))\n\t\tgoal = closest_goal.x\n\t\treturn car_position, goal\n\n\tdef move(self, point, angle, steering_angle, speed, add_noise=False):\n\t\t# add noise\n\t\tif add_noise:\n\t\t\tsteering_angle += (2*np.random.random()-1)*self.max_steering_noise_angle\n\t\t\tsteering_angle = np.clip(steering_angle, -self.max_steering_angle, self.max_steering_angle) # |steering_angle| <= max_steering_angle, ALWAYS\n\t\t\tspeed += (2*np.random.random()-1)*self.max_speed_noise\n\t\t# get new angle\n\t\tnew_angle = angle+steering_angle\n\t\t# move point\n\t\tx, y = point\n\t\tdir_x, dir_y = get_heading_vector(angle=new_angle, space=speed*self.seconds_per_step)\n\t\treturn (x+dir_x, y+dir_y), new_angle\n\n\tdef get_steering_angle_from_action(self, action): # action is in [-1,1]\n\t\treturn action*self.max_steering_angle # in [-max_steering_angle, max_steering_angle]\n\t\t\n\tdef get_acceleration_from_action(self, action): # action is in [-1,1]\n\t\treturn action*self.max_acceleration # in [-max_acceleration, max_acceleration]\n\t\t\n\tdef accelerate(self, speed, acceleration):\n\t\treturn np.clip(speed + acceleration*self.seconds_per_step, self.min_speed, self.max_speed)\n\t\t\n\tdef get_step_seconds(self):\n\t\treturn np.random.exponential(scale=self.mean_seconds_per_step)\n\n\tdef process(self, action_vector):\n\t\t# first of all, get the seconds passed from last step\n\t\tself.seconds_per_step = self.get_step_seconds()\n\t\t# compute new steering angle\n\t\tself.steering_angle = self.get_steering_angle_from_action(action=action_vector[0])\n\t\t# compute new acceleration\n\t\tself.acceleration = self.get_acceleration_from_action(action=action_vector[1])\n\t\t# compute new speed\n\t\tself.speed = self.accelerate(speed=self.speed, acceleration=self.acceleration)\n\t\t# move car\n\t\tself.car_point, self.car_angle = self.move(point=self.car_point, angle=self.car_angle, steering_angle=self.steering_angle, speed=self.speed, add_noise=True)\n\t\t# update position and direction\n\t\tcar_position, car_goal = self.get_position_and_goal(point=self.car_point)\n\t\t# compute perceived reward\n\t\treward, dead = self.get_reward(car_speed=self.speed, car_point=self.car_point, car_progress=self.car_progress, car_position=car_position, obstacles=self.obstacles)\n\t\tif car_position > self.car_progress: # is moving toward next position\n\t\t\tself.car_progress = car_position # progress update\n\t\t\tself.car_goal = car_goal\n\t\t# compute new state (after updating progress)\n\t\tstate = self.get_state(car_point=self.car_point, car_angle=self.car_angle, car_progress=self.car_progress, car_goal=self.car_goal, obstacles=self.obstacles)\n\t\t# update last action/state/reward\n\t\tself.last_state = state\n\t\tself.last_reward = reward\n\t\t# update cumulative reward\n\t\tself.cumulative_reward += reward\n\t\tself.avg_speed_per_steps += self.speed\n\t\t# update step\n\t\tself.step += 1\n\t\tterminal = dead or self.is_terminal_position(self.car_goal) or self.step >= self.max_step\n\t\tif terminal: # populate statistics\n\t\t\tstats = {\n\t\t\t\t\"avg_speed\": self.avg_speed_per_steps/self.step,\n\t\t\t\t\"reward\": self.cumulative_reward,\n\t\t\t\t\"step\": self.step,\n\t\t\t\t\"completed\": 1 if self.is_terminal_position(self.car_goal) else 0\n\t\t\t}\n\t\t\tif self.max_obstacle_count > 0:\n\t\t\t\tstats[\"hit\"] = 1 if dead else 0\n\t\t\tself.episodes.append(stats)\n\t\t\tif len(self.episodes) > flags.match_count_for_evaluation:\n\t\t\t\tself.episodes.popleft()\n\t\treturn state, reward, terminal\n\t\n\tdef get_concatenation_size(self):\n\t\treturn 4\n\t\t\n\tdef get_concatenation(self):\n\t\treturn [self.steering_angle, self.speed, self.seconds_per_step, self.speed_upper_limit]\n\t\t\n\tdef get_reward(self, car_speed, car_point, car_progress, car_position, obstacles):\n\t\tmax_distance_to_path = self.max_distance_to_path\n\t\tcar_projection_point = self.get_point_from_position(car_position)\n\t\tclosest_obstacle = self.get_closest_obstacle(point=car_projection_point, obstacles=obstacles)\n\t\tif closest_obstacle is not None:\n\t\t\tobstacle_point, obstacle_radius = closest_obstacle\n\t\t\tif euclidean_distance(obstacle_point, car_point) <= obstacle_radius: # collision\n\t\t\t\treturn (-1, True) # terminate episode\n\t\t\tif euclidean_distance(obstacle_point, car_projection_point) <= obstacle_radius: # could collide obstacle\n\t\t\t\tmax_distance_to_path += obstacle_radius\n\t\tif car_position > car_progress: # is moving toward next position\n\t\t\tdistance = euclidean_distance(car_point, car_projection_point)\n\t\t\tdistance_ratio = np.clip(distance/max_distance_to_path, 0,1) # always in [0,1]\n\t\t\tinverse_distance_ratio = 1 - distance_ratio\n\t\t\t# the more car_speed > self.speed_upper_limit, the bigger the malus\n\t\t\tmalus = self.speed_upper_limit*max(0,car_speed/self.speed_upper_limit-1)*self.seconds_per_step\n\t\t\t# smaller distances to path give higher rewards\n\t\t\tbonus = min(car_speed,self.speed_upper_limit)*self.seconds_per_step*inverse_distance_ratio\n\t\t\treturn (bonus-malus, False) # do not terminate episode\n\t\t# else is NOT moving toward next position\n\t\treturn (-0.1, False) # do not terminate episode\n\t\t\n\tdef get_state(self, car_point, car_angle, car_progress, car_goal, obstacles):\n\t\tstate = np.zeros(self.state_shape)\n\t\tcar_x, car_y = car_point\n\t\tcontrol_distance = (car_goal - car_progress)/self.control_points_per_step\n\t\t# add control points\n\t\tfor i in range(self.control_points_per_step):\n\t\t\tcp_x, cp_y = self.get_point_from_position(car_progress + (i+1)*control_distance)\n\t\t\trcp_x, rcp_y = shift_and_rotate(cp_x, cp_y, -car_x, -car_y, -car_angle) # get control point with coordinates relative to car point\n\t\t\tif self.max_obstacle_count > 0:\n\t\t\t\tstate[0][i] = (rcp_x, rcp_y, 0) # no collision with lanes\n\t\t\telse:\n\t\t\t\tstate[0][i] = (rcp_x, rcp_y)\n\t\t# add obstacles\n\t\tfor (j, obstacle) in enumerate(obstacles):\n\t\t\tobstacle_point, obstacle_radius = obstacle\n\t\t\tif euclidean_distance(obstacle_point,car_point) <= self.horizon_distance+obstacle_radius:\n\t\t\t\tro_x, ro_y = shift_and_rotate(obstacle_point[0], obstacle_point[1], -car_x, -car_y, -car_angle) # get control point with coordinates relative to car point\n\t\t\t\tstate[1][j] = (ro_x, ro_y, obstacle_radius)\n\t\treturn state\n\t\t\n\tdef get_screen(self): # RGB array\n\t\t# First set up the figure and the axis\n\t\t# fig, ax = matplotlib.pyplot.subplots(nrows=1, ncols=1, sharey=False, sharex=False, figsize=(10,10)) # this method causes memory leaks\n\t\tfigure = Figure(figsize=(5,5))\n\t\tcanvas = FigureCanvas(figure)\n\t\tax = figure.add_subplot(111) # nrows=1, ncols=1, index=1\n\t\t# [Obstacles]\n\t\tif len(self.obstacles) > 0:\n\t\t\tcircles = [Circle(point,radius,color='b') for (point,radius) in self.obstacles]\n\t\t\tpatch_collection = PatchCollection(circles, match_original=True)\n\t\t\tax.add_collection(patch_collection)\n\t\t# [Car]\n\t\tcar_x, car_y = self.car_point\n\t\tcar_handle = ax.scatter(car_x, car_y, marker='o', color='g', label='Car')\n\t\t# [Heading Vector]\n\t\tdir_x, dir_y = get_heading_vector(angle=self.car_angle)\n\t\theading_vector_handle, = ax.plot([car_x, car_x+dir_x],[car_y, car_y+dir_y], color='g', alpha=0.5, label='Heading Vector')\n\t\t# [Goal]\n\t\twaypoint_x, waypoint_y = self.get_point_from_position(self.car_goal)\n\t\tgoal_handle = ax.scatter(waypoint_x, waypoint_y, marker='o', color='r', label='Horizon')\n\t\t# [Path]\n\t\tpath_handle, = ax.plot(self.path[0], self.path[1], lw=2, alpha=0.5, label='Path')\n\t\t# Adjust ax limits in order to get the same scale factor on both x and y\n\t\ta,b = ax.get_xlim()\n\t\tc,d = ax.get_ylim()\n\t\tmax_length = max(d-c, b-a)\n\t\tax.set_xlim([a,a+max_length])\n\t\tax.set_ylim([c,c+max_length])\n\t\t# Build legend\n\t\thandles = [car_handle,heading_vector_handle,goal_handle,path_handle]\n\t\tif len(self.obstacles) > 0:\n\t\t\t# https://stackoverflow.com/questions/11423369/matplotlib-legend-circle-markers\n\t\t\thandles.append(Line2D(range(1), range(1), color=\"white\", marker='o', markerfacecolor=\"blue\", label='Obstacle'))\n\t\tax.legend(handles=handles)\n\t\t# Draw plot\n\t\tfigure.suptitle('[Speed]{0:.2f} m/s [Angle]{1:.2f} deg \\n [Limit]{3:.2f} m/s [Step]{2}'.format(self.speed,convert_radiant_to_degree(self.steering_angle), self.step, self.speed_upper_limit))\n\t\tcanvas.draw()\n\t\t# Save plot into RGB array\n\t\tdata = np.fromstring(figure.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n\t\tdata = data.reshape(figure.canvas.get_width_height()[::-1] + (3,))\n\t\treturn data # RGB array\n\t\t\n\tdef get_frame_info(self, network, value, action, reward, policy):\n\t\tstate_info = \"reward={}, speed={}, steering_angle={}, agent={}, value={}, policy={}\\n\".format(reward, self.speed, self.steering_angle, network.agent_id, value, policy)\n\t\tstate_info += \"state={}\\n\".format(self.last_state)\n\t\taction_info = \"action={}\\n\".format(action)\n\t\tframe_info = { \"log\": state_info + action_info }\n\t\tif flags.save_episode_screen:\n\t\t\tframe_info[\"screen\"] = { \"value\": self.get_screen(), \"type\": 'RGB' }\n\t\treturn frame_info\n\t\t\n\tdef get_statistics(self):\n\t\tresult = {}\n\t\tresult[\"avg_reward\"] = 0\n\t\tresult[\"avg_step\"] = 0\n\t\tresult[\"avg_speed\"] = 0\n\t\tresult[\"avg_completed\"] = 0\n\t\tif self.max_obstacle_count > 0:\n\t\t\tresult[\"avg_hit\"] = 0\n\t\tcount = len(self.episodes)\n\t\tif count>0:\n\t\t\tresult[\"avg_reward\"] = sum(e[\"reward\"] for e in self.episodes)/count\n\t\t\tresult[\"avg_step\"] = sum(e[\"step\"] for e in self.episodes)/count\n\t\t\tresult[\"avg_speed\"] = sum(e[\"avg_speed\"] for e in self.episodes)/count\n\t\t\tresult[\"avg_completed\"] = sum(e[\"completed\"] for e in self.episodes)/count\n\t\t\tif self.max_obstacle_count > 0:\n\t\t\t\tresult[\"avg_hit\"] = sum(e[\"hit\"] for e in self.episodes)/count\n\t\treturn result\n\t\t\ndef rotate(x,y,theta):\n\treturn (x*np.cos(theta)-y*np.sin(theta), x*np.sin(theta)+y*np.cos(theta))\n\ndef shift_and_rotate(xv,yv,dx,dy,theta):\n\treturn rotate(xv+dx,yv+dy,theta)\n\ndef rotate_and_shift(xv,yv,dx,dy,theta):\n\t(x,y) = rotate(xv,yv,theta)\n\treturn (x+dx,y+dy)\n\ndef generate_random_polynomial():\n\t#both x and y are defined by two polynomials in a third variable p, plus\n\t#an initial angle (that, when connecting splines, will be the same as\n\t#the final angle of the previous polynomial)\n\t#Both polynomials are third order.\n\t#The polynomial for x is aU, bU, cU, dU\n\t#The polynomial for y is aV, bV, cV, dV\n\t#aU and bU are always 0 (start at origin) and bV is always 0 (derivative at\n\t#origin is 0). bU must be positive\n\t# constraints initial coordinates must be the same as\n\t# ending coordinates of the previous polynomial\n\taU = 0\n\taV = 0\n\t# initial derivative must the same as the ending\n\t# derivative of the previous polynomial\n\tbU = (10-6)*np.random.random()+6 #around 8\n\tbV = 0\n\t#we randonmly generate values for cU and dU in the range ]-1,1[\n\tcU = 2*np.random.random()-1\n\tdU = 2*np.random.random()-1\n\tfinalV = 10*np.random.random()-5\n\t#final derivative between -pi/6 and pi/6\n\tfinald = np.tan((np.pi/3)*np.random.random() - np.pi/6)\n\t#now we fix parameters to meet the constraints:\n\t#bV + cV + dV = finalV \n\t#angle(1) = finald; see the definition of angle below\n\tUd = bU + 2*cU + 3*dU\n\t#Vd = bU + 2*cU + 3*dU = finald*Ud\n\tdV = finald*Ud - 2*finalV + bV\n\tcV = finalV - dV - bV\n\treturn ((aU,bU,cU,dU), (aV,bV,cV,dV))\n\ndef poly(p, points):\n\treturn points[0] + points[1]*p + points[2]*p**2 + points[3]*p**3\n\ndef derivative(p, points):\n\treturn points[1] + 2*points[2]*p + 3*points[3]*p**2\n\ndef angle(p, U, V):\n\tUd = derivative(p,U)\n\tVd = derivative(p,V)\n\treturn (np.arctan(Vd/Ud)) if abs(Ud) > abs(Vd/1000) else (np.pi/2)\n\t\ndef norm(angle):\n if angle >= np.pi:\n angle -= 2*np.pi\n elif angle < -np.pi:\n angle += 2*np.pi\n return angle\n\ndef convert_degree_to_radiant(degree):\n\treturn (degree/180)*np.pi\n\t\ndef convert_radiant_to_degree(radiant):\n\treturn radiant*(180/np.pi)\n\t\ndef get_heading_vector(angle, space=1):\n\treturn (space*np.cos(angle), space*np.sin(angle))\n\t\ndef euclidean_distance(a,b):\n\treturn np.sqrt(sum((j-k)**2 for (j,k) in zip(a,b)))"
] | [
[
"matplotlib.collections.PatchCollection",
"numpy.random.random",
"numpy.linspace",
"numpy.clip",
"matplotlib.figure.Figure",
"matplotlib.use",
"numpy.random.exponential",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"numpy.arctan",
"numpy.cos",
"matplotlib.patches.Circle",
"numpy.sin",
"numpy.ceil",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gimlidc/igre | [
"bf3425e838cca3d1fa8254a2550ecb44774ee0ef",
"bf3425e838cca3d1fa8254a2550ecb44774ee0ef"
] | [
"src/tftools/idx2pixel_layer.py",
"tests/test_ig_pixelwise.py"
] | [
"import tensorflow as tf\n\nglobal_visible = None\n\n\nclass Idx2PixelLayer(tf.keras.layers.Layer):\n\n def __init__(self, visible, trainable=False, shift_multi=1, **kwargs):\n \"\"\"\n :param visible: one dimension of visible image (for this dimension [x,y] will be computed)\n \"\"\"\n super(Idx2PixelLayer, self).__init__(**kwargs)\n self.visible = tf.constant(visible, dtype=tf.float32)\n global global_visible\n global_visible = self.visible\n\n def call(self, coords, **kwargs):\n return linear_interpolation(coords)\n\n\ndef reset_visible(stage_data):\n global global_visible\n global_visible = tf.constant(stage_data.copy(), dtype=tf.float32)\n\n\[email protected]_gradient\ndef linear_interpolation(coords):\n \"\"\"\n Calculate image pixel intensities from input coordinates by means of bilinear\n interpolation. Also calculate corresponding gradients for ANN training.\n\n 'Bottom-left', 'bottom-right', 'top-left', 'top-right' mean the four\n neighboring pixels closest to input coordinates. top/bottom corresponds to the\n first axis coordinate, right/left to the second. Coordinate values increase\n from left to right, top to bottom.\n\n\n top_left top_right\n mid_top\n X-----------------------------------X\n | . |\n | . |\n | . |\n | . x |\n mid_left X.......*...........................| mid_right\n | . |\n | . |\n | . |\n | . |\n | . |\n | . |\n | . |\n | . |\n X-------X---------------------------X\n mid_bottom\n bottom_left bottom_right\n \"\"\"\n # multiply gradient by factor to slow down learning of 'bias'\n grad_multiplier = tf.constant(1, dtype=tf.float32)\n visible = global_visible\n\n # ensure that the coordinates are in range [1, max-2] so we can take 2x2 neighbourhood of the coord in the Jacobian\n # TODO: We might do precheck outside this function\n\n # 0 - 400\n coords = tf.subtract(coords, 1)\n # -1 - 399\n coords = tf.math.mod(coords, tf.subtract(tf.cast(visible.shape.as_list()[:-1], dtype=tf.float32), 4))\n # 0 - (401-4) 397\n coords = tf.add(coords, 1)\n # 1 - 398\n # we can do coords -1 and +2 now\n\n # calculate index of top-left point\n idx_low = tf.floor(coords)\n\n # offset of input coordinates from top-left point\n delta = tf.cast(tf.subtract(coords, idx_low), dtype=tf.float32)\n # coords are the size of (batch, 2), delta as well\n\n top_left = tf.gather_nd(visible, tf.cast(idx_low, tf.int32))\n top_right = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [1, 0]), tf.int32))\n bottom_left = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [0, 1]), tf.int32))\n bottom_right = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [1, 1]), tf.int32))\n # these values are of size of [batch_size, input_dimensions]\n\n mid_bottom = tf.add(bottom_right, tf.einsum(\"i,ij->ij\", delta[:, 0], tf.subtract(bottom_left, bottom_right)))\n mid_top = tf.add(top_right, tf.einsum(\"i,ij->ij\", delta[:, 0], tf.subtract(top_left, top_right)))\n\n mid_left = tf.add(bottom_left, tf.einsum(\"i,ij->ij\", delta[:, 1], tf.subtract(top_left, bottom_left)))\n mid_right = tf.add(bottom_right, tf.einsum(\"i,ij->ij\", delta[:, 1], tf.subtract(top_right, bottom_right)))\n\n interpolation = tf.add(mid_bottom, tf.einsum(\"i,ij->ij\", delta[:, 1],\n tf.subtract(mid_top, mid_bottom)))\n\n def compute_2x2_jacobian():\n # This will produce Jacobian of size [batch_size, 2, input_dims]\n # Take bigger neighbourhood around the coord\n ttl = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [0, -1]), tf.int32))\n ttr = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [1, -1]), tf.int32))\n bbl = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [0, 2]), tf.int32))\n bbr = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [1, 2]), tf.int32))\n tll = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [-1, 0]), tf.int32))\n trr = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [2, 0]), tf.int32))\n bll = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [-1, 1]), tf.int32))\n brr = tf.gather_nd(visible, tf.cast(tf.add(idx_low, [2, 1]), tf.int32))\n\n mid_bb = tf.add(bbr, tf.einsum(\"i,ij->ij\", delta[:, 0], tf.subtract(bbl, bbr)))\n mid_tt = tf.add(ttr, tf.einsum(\"i,ij->ij\", delta[:, 0], tf.subtract(ttl, ttr)))\n mid_ll = tf.add(bll, tf.einsum(\"i,ij->ij\", delta[:, 1], tf.subtract(tll, bll)))\n mid_rr = tf.add(brr, tf.einsum(\"i,ij->ij\", delta[:, 1], tf.subtract(trr, brr)))\n\n d_x_r = tf.subtract(mid_rr, mid_right)\n d_x_c = tf.subtract(mid_right, mid_left)\n d_x_l = tf.subtract(mid_left, mid_ll)\n d_y_t = tf.subtract(mid_top, mid_tt)\n d_y_c = tf.subtract(mid_bottom, mid_top)\n d_y_b = tf.subtract(mid_bb, mid_bottom)\n\n # Weighted average of the derivatives\n d_x = tf.multiply(tf.add(d_x_r, d_x_l), 0.5)\n d_x = tf.multiply(tf.add(d_x, d_x_c), 0.5)\n d_y = tf.multiply(tf.add(d_y_t, d_y_b), 0.5)\n d_y = tf.multiply(tf.add(d_y, d_y_c), 0.5)\n return d_x, d_y\n\n d_c_x, d_c_y = compute_2x2_jacobian()\n jacob = tf.stack([d_c_x, d_c_y], axis=1)\n\n def grad(dy):\n \"\"\" This method should return tensor of gradients [batch_size, 6]\"\"\"\n return tf.multiply(tf.einsum(\"ijk,ik->ij\", jacob, dy), grad_multiplier)\n\n coords_off_boundary = tf.greater(tf.cast(coords, dtype=tf.float32), tf.cast(visible.shape[:-1], dtype=tf.float32))\n boundary_condition = tf.logical_or(coords_off_boundary[:, 0], coords_off_boundary[:, 0])\n masked = tf.where(boundary_condition, tf.zeros(tf.shape(interpolation)), interpolation)\n\n return masked, grad\n",
"import numpy as np\nimport pytest\nfrom stable.information_gain.pixelwise import information_gain\nimport tensorflow as tf\nimport os\nimport imageio\n\nLEONARDO_MADONNA_WITH_YARNWINDER = \"tests/assets/leonardo.npy\"\nMAGDALENA_VIS = \"tests/assets/mari_magdalena-detail.png\"\nMAGDALENA_NIR = \"tests/assets/mari_magdalenaIR-detail.png\"\n\n\[email protected](not os.path.isfile(LEONARDO_MADONNA_WITH_YARNWINDER),\n reason=\"Input file is missing\")\ndef test_information_gain_on_leonardo():\n tf.random.set_random_seed(12345)\n data = np.load(LEONARDO_MADONNA_WITH_YARNWINDER)\n [diff, target, _] = information_gain(data[:, :, :16], data[:, :, 20:25])\n\n np.testing.assert_almost_equal(target, data[:, :, 20:25], 0)\n diff.shape = (data.shape[0], data.shape[1], 5)\n\n\[email protected](not os.path.isfile(MAGDALENA_VIS),\n reason=\"Input file is missing\")\[email protected](not os.path.isfile(MAGDALENA_NIR),\n reason=\"Input file is missing\")\ndef test_information_gain_on_steborice():\n tf.random.set_random_seed(12345)\n vis = np.array(imageio.imread(MAGDALENA_VIS), dtype=float)/255\n nir = np.array(imageio.imread(MAGDALENA_NIR), dtype=float)/255\n [diff, target, _] = information_gain(vis, nir)\n\n np.testing.assert_almost_equal(target, nir, 0)\n diff.shape = nir.shape"
] | [
[
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.logical_or",
"tensorflow.stack",
"tensorflow.floor",
"tensorflow.cast",
"tensorflow.subtract",
"tensorflow.einsum",
"tensorflow.add"
],
[
"numpy.load",
"numpy.testing.assert_almost_equal",
"tensorflow.random.set_random_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexlioralexli/diagnosing_qlearning | [
"20a4338a324c1bab79e6ca65937830529d941302",
"20a4338a324c1bab79e6ca65937830529d941302",
"20a4338a324c1bab79e6ca65937830529d941302"
] | [
"debugq/envs/env_suite.py",
"rlutil/envs/tabular_cy/test_random_env.py",
"rlutil/dictarray.py"
] | [
"import numpy as np\nimport itertools\nimport random\nfrom debugq.envs import random_obs_wrapper, time_limit_wrapper, env_wrapper\nfrom rlutil.envs.tabular_cy import tabular_env\nfrom rlutil.envs.gridcraft import grid_env_cy\nfrom rlutil.envs.gridcraft import grid_spec_cy\nfrom rlutil.logging import log_utils\nfrom rlutil import math_utils\nfrom rlutil.envs.gridcraft.grid_spec_cy import TileType\n\n\ndef random_grid_env(size_x, size_y, dim_obs=32, time_limit=50, wall_ratio=0.1, smooth_obs=False, distance_reward=True,\n one_hot_obs=False,\n seed=None, absorb=False, tabular=False):\n total_size = size_x * size_y\n locations = list(itertools.product(range(size_x), range(size_y)))\n start_loc = (int(size_x/2), int(size_y/2))\n locations.remove(start_loc)\n\n with math_utils.np_seed(seed):\n # randomly place walls\n wall_locs = random.sample(locations, int(total_size*wall_ratio))\n [locations.remove(loc) for loc in wall_locs]\n\n cand_reward_locs = random.sample(locations, int(0.25 * total_size))\n # pick furthest one from center\n cand_reward_dists = [np.linalg.norm(np.array(reward_loc) - start_loc) for reward_loc in cand_reward_locs]\n furthest_reward = np.argmax(cand_reward_dists)\n reward_loc = cand_reward_locs[furthest_reward]\n locations.remove(cand_reward_locs[furthest_reward])\n\n gs = grid_spec_cy.spec_from_sparse_locations(size_x, size_y, {TileType.START: [start_loc],\n TileType.WALL: wall_locs,\n TileType.REWARD: [reward_loc]})\n\n if distance_reward:\n env = grid_env_cy.DistanceRewardGridEnv(gs, reward_loc[0], reward_loc[1], start_loc[0], start_loc[1])\n else:\n env = grid_env_cy.GridEnv(gs)\n env = env_wrapper.StochasticActionWrapper(env, eps=0.05)\n\n if absorb:\n env = env_wrapper.AbsorbingStateWrapper(env)\n if tabular:\n env = wrap_time(env, time_limit=time_limit)\n else:\n env = wrap_obs_time(env, time_limit=time_limit, one_hot_obs=one_hot_obs, dim_obs=dim_obs, smooth_obs=smooth_obs)\n return env\n\ndef wrap_obs_time(env, dim_obs=32, time_limit=50, smooth_obs=False, one_hot_obs=False):\n if smooth_obs:\n env = random_obs_wrapper.LocalObsWrapper(env, dim_obs=dim_obs)\n elif one_hot_obs:\n env = random_obs_wrapper.OneHotObsWrapper(env)\n else:\n env = random_obs_wrapper.RandomObsWrapper(env, dim_obs=dim_obs)\n env = time_limit_wrapper.TimeLimitWrapper(env, time_limit=time_limit)\n return env\n\ndef wrap_time(env, time_limit=50):\n return time_limit_wrapper.TimeLimitWrapper(env, time_limit=time_limit)\n\n# suite\nENV_KEYS = ['grid16randomobs', 'grid16onehot', 'grid64randomobs', 'grid64onehot', 'cliffwalk', 'pendulum', 'mountaincar', 'sparsegraph']\ndef get_env(name):\n if name == 'grid16randomobs':\n env = random_grid_env(16, 16, dim_obs=16, time_limit=50, wall_ratio=0.2, smooth_obs=False, seed=0)\n elif name == 'grid16onehot':\n env = random_grid_env(16, 16, time_limit=50, wall_ratio=0.2, one_hot_obs=True, seed=0)\n elif name == 'grid16sparse':\n env = random_grid_env(16, 16, time_limit=50, wall_ratio=0.2, one_hot_obs=True, seed=0, distance_reward=False)\n elif name == 'grid64randomobs':\n env = random_grid_env(64, 64, dim_obs=64, time_limit=100, wall_ratio=0.2, smooth_obs=False, seed=0)\n elif name == 'grid64onehot':\n env = random_grid_env(64, 64, time_limit=100, wall_ratio=0.2, one_hot_obs=True, seed=0)\n elif name == 'cliffwalk':\n with math_utils.np_seed(0):\n env = tabular_env.CliffwalkEnv(25)\n # Cliffwalk is unsolvable by QI with moderate entropy - up the reward to reduce the effects.\n env = env_wrapper.AbsorbingStateWrapper(env, absorb_reward=10.0)\n env = wrap_obs_time(env, dim_obs=16, time_limit=50)\n elif name == 'pendulum':\n env = tabular_env.InvertedPendulum(state_discretization=32, action_discretization=5)\n env = wrap_time(env, time_limit=50)\n elif name == 'mountaincar':\n env = tabular_env.MountainCar(posdisc=56, veldisc=32)\n # MountainCar is unsolvable by QI with moderate entropy - up the reward to reduce the effects.\n env = env_wrapper.AbsorbingStateWrapper(env, absorb_reward=10.0) \n env = wrap_time(env, time_limit=100)\n elif name == 'sparsegraph':\n with math_utils.np_seed(0):\n env = tabular_env.RandomTabularEnv(num_states=500, num_actions=3, transitions_per_action=1, self_loop=True)\n env = env_wrapper.AbsorbingStateWrapper(env, absorb_reward=10.0)\n env = wrap_obs_time(env, dim_obs=4, time_limit=10)\n else:\n raise NotImplementedError('Unknown env id: %s' % name)\n return env\n",
"import unittest\nimport parameterized\nimport numpy as np\n\nfrom rlutil.envs.tabular_cy import q_iteration, tabular_env\nfrom rlutil.envs.tabular_cy import q_iteration_py\n\n\nclass QIterationTest(unittest.TestCase):\n def setUp(self):\n self.num_states = 128\n self.env = tabular_env.RandomTabularEnv(num_states=self.num_states, num_actions=3, transitions_per_action=2)\n self.env_selfloop = tabular_env.RandomTabularEnv(num_states=self.num_states, num_actions=3, transitions_per_action=2, self_loop=True)\n\n def test_num_states(self):\n self.assertEqual(self.env.num_states, self.num_states)\n\n def test_selfloop(self):\n transitions = self.env_selfloop.transitions(2, 0)\n self.assertEqual(len(transitions), 1)\n self.assertEqual(transitions[2], 1.0)\n\n transitions = self.env_selfloop.transitions(2, 1)\n self.assertEqual(len(transitions), 2)\n\n def test_num_transitions(self):\n transitions = self.env.transitions(0, 0)\n self.assertEqual(len(transitions), 2)\n for ns in transitions:\n self.assertAlmostEqual(transitions[ns], 0.5)\n\n def test_random_rollout(self):\n self.env.reset()\n for _ in range(30):\n #self.env.render()\n self.env.step(np.random.randint(0, self.env.num_actions))\n\n def test_q_iteration(self):\n params = {\n 'num_itrs': 1000,\n 'ent_wt': 0.0,\n 'discount': 0.95,\n }\n qvals = q_iteration.softq_iteration(self.env, **params)\n self.env.reset()\n rews = 0\n for _ in range(200):\n #self.env_small.render()\n a_qvals = qvals[self.env.get_state()]\n _, rew, _, _ = self.env.step(np.argmax(a_qvals))\n rews += rew\n self.assertGreater(rews, 0.0)\n\nif __name__ == '__main__':\n unittest.main()\n",
"import numpy as np\n\ndef DictArray(shapes_dict, axis=0, dtype=np.float32):\n keys = sorted(list(shapes_dict.keys())) \n shapes = np.array([shapes_dict[k] for k in keys])\n sizes = [np.prod(shape) for shape in shapes]\n total_size = np.sum(sizes)\n\n key_to_slice = {}\n key_to_shape = {}\n key_to_size = {}\n prev_idx = 0\n for i, k in enumerate(keys):\n size = sizes[i]\n key_to_slice[k] = slice(prev_idx, prev_idx+size)\n key_to_shape[k] = shapes[i]\n key_to_size[k] = size\n prev_idx += size\n \n\n class Template(np.ndarray):\n def __getitem__(self, k):\n if isinstance(k, str):\n slice_ = key_to_slice[k]\n shape = key_to_shape[k]\n return np.reshape(self[slice_], shape)\n else:\n return super(Template, self).__getitem__(k)\n\n def template(**kwargs):\n arr = np.zeros(total_size, dtype=dtype)\n for k in kwargs:\n slice_ = key_to_slice[k]\n arr[slice_] = kwargs[k].reshape(key_to_size[k])\n return arr.view(Template)\n\n return template\n\n\nif __name__ == \"__main__\":\n TestArr = DictArray(shapes_dict={'a': (5,3), 'b': (4)}, axis=1)\n\n rand1 = np.random.randn(5,3)\n rand2 = np.random.randn(5,3)\n a1 = TestArr(a=rand1, b=np.ones(4))\n a2 = TestArr(a=rand2, b=np.ones(4))\n\n assert np.allclose(a1['a'], rand1)\n assert np.allclose(a2['a'], rand2)\n assert np.allclose((a1+a2)['a'], rand1+rand2)\n\n print(type(a1))\n a12 = np.c_[a1, a2].T\n print(a12)\n print(type(a12))\n print(a12['a'])\n\n"
] | [
[
"numpy.array",
"numpy.argmax"
],
[
"numpy.argmax",
"numpy.random.randint"
],
[
"numpy.allclose",
"numpy.reshape",
"numpy.ones",
"numpy.random.randn",
"numpy.prod",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.